219 lines
9.1 KiB
TeX
219 lines
9.1 KiB
TeX
\documentclass[12pt]{article}
|
|
\usepackage{chao}
|
|
\title{connectivity interdiction}
|
|
\author{}
|
|
\date{}
|
|
|
|
\DeclareMathOperator*{\opt}{OPT}
|
|
\DeclareMathOperator*{\len}{len}
|
|
|
|
\begin{document}
|
|
|
|
|
|
\section{``Cut-free'' Proof}
|
|
\begin{problem}[b-free knapsack]\label{bfreeknap}
|
|
Consider a set of elements $E$ and two weights $w:E\to \Z_+$ and
|
|
$c:E\to Z_+$ and a budget $b\in \Z_+$. Given a feasible set $\mathcal
|
|
F\subset 2^E$, find $\min_{X
|
|
\in \mathcal F, F\subset E} w(X\setminus F)$ such that $c(F)\leq b$.
|
|
\end{problem}
|
|
Always remember that $\mathcal F$ is usually not explicitly given.
|
|
|
|
\begin{problem}[Normalized knapsack]\label{nknap}
|
|
Given the same input as \autoref{bfreeknap}, find $\min \limits_{X
|
|
\in \mathcal F, F\subset E} \frac{w(X\setminus F)}{B-c(F)}$ such that
|
|
$c(F)\leq b$.
|
|
\end{problem}
|
|
In \cite{vygen_fptas_2024} the normalized min-cut problem use $B=b+1$. Here we use any integer $B>b$ and see how their method works.
|
|
|
|
Denote by $\tau$ the optimum of \autoref{nknap}. Define a new weight
|
|
$w_\tau:\E\to \R$,
|
|
|
|
\[
|
|
w_\tau(e)=\begin{cases}
|
|
w(e) & \text{if $w(e)< \tau\cdot c(e)$ (light elem)}\\
|
|
\tau\cdot c(e) & \text{otherwise (heavy elem)}
|
|
\end{cases}
|
|
\]
|
|
|
|
\begin{lemma}
|
|
Let $(X^N,F^N)$ be the optimal solution to \autoref{nknap}.
|
|
Every element in $F^N$ is heavy.
|
|
\end{lemma}
|
|
The proof is exactly the same as \cite[Lemma 1]{vygen_fptas_2024}.
|
|
|
|
The following two lemmas show (a general version of) that the optimal cut $C^N$ to normalized min-cut is exactly the minimum cut under weights $w_\tau$.
|
|
|
|
\begin{lemma}\label{lem:lb}
|
|
For any $X\in \mathcal F$, $w_\tau(X)\ge \tau B$.
|
|
\end{lemma}
|
|
|
|
|
|
\begin{lemma}
|
|
% $w_\tau (X^N)\le \tau(b+1)$.
|
|
$X^N\in \arg\min\limits_{X\in\mathcal F} w_\tau(X)$.
|
|
\end{lemma}
|
|
\begin{proof}
|
|
\begin{align*}
|
|
w_\tau (X^N) & \le w(X^N\setminus F^N) + w_\tau(F^N)\\
|
|
& = \tau \cdot(B-c(F^N)) + \tau\cdot c(F^N)\\
|
|
& = \tau B
|
|
\end{align*}
|
|
Thus by \autoref{lem:lb}, $X^N$ gets the minimum.
|
|
\end{proof}
|
|
|
|
Now we show the counter part of \cite[Theorem 5]{vygen_fptas_2024}, which states the optimal solution to \autoref{bfreeknap} is a $\alpha$-approximate solution to $\min_{F\in \mathcal{F}} w_\tau(F)$.
|
|
|
|
\begin{lemma}[Lemma 4 in \cite{vygen_fptas_2024}]\label{lem:conditionalLB}
|
|
Let $(X^*,F^*)$ be the optimal solution to \autoref{bfreeknap}.
|
|
$X^*$ is either an $\alpha$-approximate solution to $\min_{X\in\mathcal F}
|
|
w_\tau(X)$ for some $\alpha>1$, or $w(X^*\setminus F^*)\geq
|
|
\tau(\alpha B-b)$.
|
|
\end{lemma}
|
|
|
|
% In fact, corollary 1 and theorem 5 are also the same as those in
|
|
% \cite{vygen_fptas_2024}.
|
|
Then following the argument of Corollary 1 in \cite{vygen_fptas_2024}, assume that $X^*$ is not an $\alpha$-approximate solution to $\min_{X\in\mathcal F}
|
|
w_\tau(X)$ for some $\alpha>1$. We have
|
|
\[
|
|
\frac{w(C^N\setminus F^N)}{w(C^*\setminus F^*)}\leq \frac{\tau(B-c(F^N))}{\tau(\alpha B-b)}\leq \frac{B}{\alpha B-b},
|
|
\]
|
|
where the second inequality uses \autoref{lem:conditionalLB}.
|
|
One can see that if $\alpha>2$, $\frac{w(C^N\setminus F^N)}{w(C^*\setminus F^*)}\leq \frac{B}{\alpha B-b} <1$ which implies $(C^*,F^*)$ is not optimal. Thus for $\alpha >2$, $X^*$ must be a $2$-approximate solution to $\min_{X\in\mathcal F} w_\tau(X)$.
|
|
|
|
Finally we get a knapsack version of Theorem 4:
|
|
\begin{theorem}[Theorem 4 in \cite{vygen_fptas_2024}]
|
|
Let $X^{\min}$ be the optimal solution to $\min_{X\in\mathcal F} w_\tau(X)$.
|
|
The optimal set $X^*$ in \autoref{bfreeknap} is a
|
|
2-approximation to $X^{\min}$.
|
|
\end{theorem}
|
|
|
|
Thus to obtain a FPTAS for \autoref{bfreeknap}, one need to design a FPTAS for
|
|
\autoref{nknap} and a polynomial time alg for finding all 2-approximations to
|
|
$\min_{X\in\mathcal F} w_\tau(X)$.
|
|
|
|
\paragraph{FPTAS for \autoref{nknap} in \cite{vygen_fptas_2024}} (The name
|
|
``FPTAS'' here is not precise since we do not have a approximation scheme but
|
|
an enumeration algorithm. But I will use this term anyway.) In their settings,
|
|
$\mathcal F$ is the collection of all cuts in some graph.
|
|
Let $\opt^N$ be the optimum of \autoref{nknap}. We can assume that there is no
|
|
$X\in \mathcal F$ s.t. $c(X)\le b$ since this is polynomially detectable
|
|
(through min-cut on $c(\cdot)$) and the optimum is 0. Thus we have
|
|
$\frac{1}{b+1} \le \opt^N \le |E|\cdot \max_e w(e)$. Then we enumerate
|
|
$\frac{(1+\varepsilon)^i}{b+1}$ where $i\in \set{0,1,\ldots,\floor{\log_{1+
|
|
\varepsilon}(|E|w_{\max}(b+1))}}$. There is a feasible $i$ s.t. $(1-\varepsilon)
|
|
\opt^N\le \frac{(1+\varepsilon)^i}{b+1}\leq \opt^N$ since
|
|
$\frac{(1+\varepsilon)^i}{b+1}\le \opt^N\le \frac{(1+\varepsilon)^{i+1}}{b+1}$
|
|
holds for some $i$.
|
|
|
|
Note that this enumeration scheme also holds for arbitrary $\mathcal F$ if we
|
|
have a non-zero lowerbound on $\opt^N$.
|
|
|
|
\begin{conjecture}
|
|
Let $(C,F)$ be the optimal solution to connectivity interdiction. The optimum
|
|
cut $C$ can be computed in polynomial time. In other words, connectivity
|
|
interdiction is almost as easy as knapsack.
|
|
\end{conjecture}
|
|
|
|
\section{Connections}
|
|
For unit costs, connectivity interdiction with budget $b=k-1$ is the same
|
|
problem as finding the minimum weighted edge set whose removal breaks $k$-edge
|
|
connectivity.
|
|
|
|
It turns out that \autoref{nknap} is just a necessary ingredient for MWU.
|
|
Authors of \cite{vygen_fptas_2024} $\subset$ authors of
|
|
\cite{chalermsook_approximating_2022}.
|
|
|
|
How to derive normalized min cut for connectivity interdiction?
|
|
|
|
|
|
\begin{equation*}
|
|
\begin{aligned}
|
|
\max& & z& & & \\
|
|
s.t.& & \sum_{e} y_e c(e) &\leq B & &\text{(budget for $F$)}\\
|
|
& & \sum_{e\in T} x_e&\geq 1 & &\forall T\quad \text{($x$ forms a cut)}\\
|
|
& & \sum_{e} \min(0,x_e-y_e) w(e)&\geq z & &\\
|
|
& & y_e,x_e&\in\{0,1\} & &\forall e
|
|
\end{aligned}
|
|
\end{equation*}
|
|
|
|
we can assume that $y_e\leq x_e$.
|
|
|
|
\begin{equation*}
|
|
\begin{aligned}
|
|
\min& & \sum_{e} (x_e&-y_e) w(e) & & \\
|
|
% s.t.& & \sum_{e} (x_e-y_e) w(e)&\geq z & &\\
|
|
s.t.& & \sum_{e\in T} x_e&\geq 1 & &\forall T\quad \text{($x$ forms a cut)}\\
|
|
& & \sum_{e} y_e c(e) &\leq B & &\text{(budget for $F$)}\\
|
|
& & x_e&\geq y_e & &\forall e\quad(F\subset C)\\
|
|
& & y_e,x_e&\in\{0,1\} & &\forall e
|
|
\end{aligned}
|
|
\end{equation*}
|
|
|
|
Now this LP looks similar to the normalized min-cut problem.
|
|
|
|
A further reformulation (the new $x$ is $x-y$) gives us the following,
|
|
|
|
\begin{equation*}
|
|
\begin{aligned}
|
|
\min& & \sum_{e} x_e w(e) & & \\
|
|
s.t.& & \sum_{e\in T} x_e+y_e&\geq 1 & &\forall T\quad \text{($x$ forms a cut)}\\
|
|
& & \sum_{e} y_e c(e) &\leq B & &\text{(budget for $F$)}\\
|
|
% & & x_e&\geq y_e & &\forall e\quad(F\subset C)\\
|
|
& & y_e,x_e&\in\{0,1\} & &\forall e
|
|
\end{aligned}
|
|
\end{equation*}
|
|
|
|
Note that now this is almost a positive covering LP. Let $L(\lambda)= \min \{ w(C\setminus F)-\lambda(b-c(F)) | \forall \text{cut $C$}\;\forall F\subset C
|
|
% \land c(F)\leq b
|
|
\}$ Consider the Lagrangian dual,
|
|
\begin{equation*}
|
|
\max_{\lambda\geq 0} L(\lambda)= \max_{\lambda\geq 0} \min \left\{ w(C\setminus F)-\lambda(b-c(F)), \forall \text{cut $C$}\;\forall F\subset C
|
|
% \land c(F)\leq b
|
|
\right\}
|
|
\end{equation*}
|
|
|
|
|
|
At this point, it becomes clear how the normalized min-cut is implicated in \cite{vygen_fptas_2024}. The optimum of normalized min-cut is exactly the value of $\lambda$ when $L(\lambda)$ is 0.
|
|
|
|
|
|
\section{Random Stuff}
|
|
|
|
\subsection{remove box constraints}
|
|
Given a positive covering LP,
|
|
|
|
\begin{equation*}
|
|
\begin{aligned}
|
|
LP1=\min& & \sum_e w(e) x_e& & & \\
|
|
s.t.& & \sum_{e\in T} c(e)x_e&\geq k & &\forall T\\
|
|
& & c(e)\geq x_e&\geq 0 & &\forall e,
|
|
\end{aligned}
|
|
\end{equation*}
|
|
we want to remove constraints $c(e)\geq x_e$. Consider the following LP,
|
|
\begin{equation*}
|
|
\begin{aligned}
|
|
LP2=\min& & \sum_e w(e) x_e& & & \\
|
|
s.t.& & \sum_{e\in T} c(e)x_e&\geq k & &\forall T\\
|
|
& & \sum_{e\in T\setminus f} c(e)x_e&\geq k - c(f) & &\forall T \;
|
|
\forall f\in T\\
|
|
& & x_e&\geq 0 & &\forall e,
|
|
\end{aligned}
|
|
\end{equation*}
|
|
|
|
These two LPs have the same optimum. One can see that any feasible solution to
|
|
LP1 is feasible in LP2. Thus $\opt(LP1) \geq \opt(LP2)$. Next we show that any
|
|
$x_e$ in the optimum solution to LP2 is always in $[0,c(e)]$.
|
|
Let $x^*$ be the optimum and
|
|
suppose that $c(f)<x_f\in x^*$. Consider all constraints $\sum_{e\in T\setminus f} c(e)
|
|
x_e\geq k-c(f)$ on $T\ni f$. For any such constraint, we have $\sum_{e\in T} c(e)
|
|
x_e>k$ since we assume $x_f>c(f)$, which means we can decrease $x_f$ without violating any constraint. Thus it contradicts the assumption that $x^*$ is optimal. Then we can add redundant constraints $x_e\leq c(e) \;\forall e$ to LP2 and see that LP1 and LP2 have the same optimum.
|
|
|
|
This applies to \cite{chalermsook_approximating_2022} but cannot get an improvement on their algorithm.(MWU does not care the number of constraints.) So does this trick apply to connectivity interdiction?
|
|
|
|
\[
|
|
\min_{\text{cut C}, f\in C}\frac{\sum_{e\in C\setminus\set{f}}w(e)x_e}{k-c(f)}
|
|
\]
|
|
\bibliographystyle{plain}
|
|
\bibliography{ref}
|
|
\end{document}
|