This commit is contained in:
@@ -104,7 +104,7 @@
|
||||
\leavevmode
|
||||
\begin{columns}
|
||||
|
||||
\begin{column}{.8\linewidth}
|
||||
\begin{column}{\linewidth}
|
||||
\vskip2cm
|
||||
\centering
|
||||
%\usebeamercolor{title in headline}
|
||||
@@ -116,12 +116,12 @@
|
||||
{\color{fg} \large{\insertinstitute}\\[1ex]}
|
||||
\vskip2cm
|
||||
\end{column}
|
||||
\begin{column}{.2\linewidth}
|
||||
\begin{center}
|
||||
\includegraphics[width=0.55\linewidth]{image/uestc.png}
|
||||
\end{center}
|
||||
\end{column}
|
||||
\vspace{1cm}
|
||||
% \begin{column}{.2\linewidth}
|
||||
% \begin{center}
|
||||
% \includegraphics[width=0.55\linewidth]{image/uestc.png}
|
||||
% \end{center}
|
||||
% \end{column}
|
||||
% \vspace{1cm}
|
||||
\end{columns}
|
||||
|
||||
%%% additional bar under titles
|
||||
|
551
ijcai25.bib
Normal file
551
ijcai25.bib
Normal file
@@ -0,0 +1,551 @@
|
||||
@article{Dyer84,
|
||||
title = {An {{O}}(n) Algorithm for the Multiple-Choice Knapsack Linear Program},
|
||||
author = {Dyer, M. E.},
|
||||
year = {1984},
|
||||
month = may,
|
||||
journal = {Mathematical Programming},
|
||||
volume = {29},
|
||||
number = {1},
|
||||
pages = {57--63},
|
||||
issn = {0025-5610, 1436-4646},
|
||||
doi = {10.1007/BF02591729},
|
||||
langid = {english},
|
||||
file = {/Users/chaoxu/Zotero/storage/RLSLWMGP/Dyer - 1984 - An O(n) algorithm for the multiple-choice knapsack.pdf}
|
||||
}
|
||||
|
||||
@article{DavidPisinger,
|
||||
title = {Budgeting with bounded multiple-choice constraints},
|
||||
journal = {European Journal of Operational Research},
|
||||
volume = {129},
|
||||
number = {3},
|
||||
pages = {471-480},
|
||||
year = {2001},
|
||||
issn = {0377-2217},
|
||||
doi = {https://doi.org/10.1016/S0377-2217(99)00451-8},
|
||||
url = {https://www.sciencedirect.com/science/article/pii/S0377221799004518},
|
||||
author = {David Pisinger},
|
||||
keywords = {Integer programming, Dantzig–Wolfe decomposition, Dynamic programming, Bounded multiple-choice knapsack problem},
|
||||
abstract = {We consider a budgeting problem where a specified number of projects from some disjoint classes has to be selected such that the overall gain is largest possible, and such that the costs of the chosen projects do not exceed a fixed upper limit. The problem has several application in government budgeting, planning, and as relaxation from other combinatorial problems. It is demonstrated that the problem can be transformed to an equivalent multiple-choice knapsack problem through dynamic programming. A naive transformation however leads to a drastic increase in the number of variables, thus we propose an algorithm for the continuous problem based on Dantzig–Wolfe decomposition. A master problem solves a continuous multiple-choice knapsack problem knowing only some extreme points in each of the transformed classes. The individual subproblems find extreme points for each given direction, using a median search algorithm. An integer optimal solution is then derived by using the dynamic programming transformation to a multiple-choice knapsack problem for an expanding core. The individual classes are considered in an order given by their gradients, and the transformation to a multiple-choice knapsack problem is performed when needed. In this way, only a dozen of classes need to be transformed for standard instances from the literature. Computational experiments are presented, showing that the developed algorithm is orders of magnitude faster than a general LP/MIP algorithm.}
|
||||
},
|
||||
@article{CAMERINI1984157,
|
||||
title = {The matroidal knapsack: A class of (often) well-solvable problems},
|
||||
journal = {Operations Research Letters},
|
||||
volume = {3},
|
||||
number = {3},
|
||||
pages = {157-162},
|
||||
year = {1984},
|
||||
issn = {0167-6377},
|
||||
doi = {https://doi.org/10.1016/0167-6377(84)90009-9},
|
||||
url = {https://www.sciencedirect.com/science/article/pii/0167637784900099},
|
||||
author = {Paolo M. Camerini and Carlo Vercellis},
|
||||
keywords = {knapsack problems, Lagrangean relaxation, matroids, probabilistic evaluation},
|
||||
abstract = {A general class of problems, defined in terms of matroids, is recognized to include as special cases a variety of knapsack problems, subject to combinatorial constraints. A polynomial algorithm, based on Lagrangean relaxation, is proposed: A worst case and a probabilistic analysis demonstrate its ability to compute tight upper and lower bounds for the optimum, together with good approximate solutions.}
|
||||
}
|
||||
@article{Zoltners,
|
||||
issn = {0030364X, 15265463},
|
||||
url = {http://www.jstor.org/stable/170213},
|
||||
abstract = {The multiple-choice knapsack problem is defined as a binary knapsack problem with the addition of disjoint multiple-choice constraints. The strength of the branch-and-bound algorithm we present for this problem resides with the quick solution of the linear programming relaxation and its efficient, subsequent reoptimization as a result of branching. An implemented version of this algorithm has performed well on a large set of test problems. We cite computational results as well as a comparison with a previously reported algorithm. Several useful applications of the multiple-choice knapsack problem are also suggested.},
|
||||
author = {Prabhakant Sinha and Andris A. Zoltners},
|
||||
journal = {Operations Research},
|
||||
number = {3},
|
||||
pages = {503--515},
|
||||
publisher = {INFORMS},
|
||||
title = {The Multiple-Choice Knapsack Problem},
|
||||
urldate = {2022-11-06},
|
||||
volume = {27},
|
||||
year = {1979}
|
||||
},
|
||||
@unpublished{Chan1999RemarksOK,
|
||||
title = {Remarks on k-Level Algorithms in the Plane},
|
||||
author = {Timothy M. Chan},
|
||||
year = {1999},
|
||||
note = {Manuscript},
|
||||
url = {https://tmc.web.engr.illinois.edu/pub_kset.html}
|
||||
},
|
||||
@inproceedings{10.1109/ITSC55140.2022.9922143,
|
||||
author = {Javaudin, Lucas and Araldo, Andrea and de Palma, Andr\`{u}},
|
||||
title = {Large-Scale Allocation of Personalized Incentives},
|
||||
year = {2022},
|
||||
publisher = {IEEE Press},
|
||||
url = {https://doi.org/10.1109/ITSC55140.2022.9922143},
|
||||
doi = {10.1109/ITSC55140.2022.9922143},
|
||||
abstract = {We consider a regulator willing to drive individual choices towards increasing social welfare by providing incentives to a large population of individuals. For that purpose, we formalize and solve the problem of finding an optimal personalized-incentive policy: optimal in the sense that it maximizes social welfare under an incentive budget constraint, personalized in the sense that the incentives proposed depend on the alternatives available to each individual, as well as her preferences. We propose a polynomial time approximation algorithm that computes a policy within few seconds and we analytically prove that it is boundedly close to the optimum. We then extend the problem to efficiently calculate the Maximum Social Welfare Curve, which gives the maximum social welfare achievable for a range of incentive budgets (not just one value). This curve is a valuable practical tool for the regulator to determine the right incentive budget to invest. Finally, we simulate a large-scale application to mode choice in a French department (about 200 thousands individuals) and illustrate the effectiveness of the proposed personalized-incentive policy in reducing CO2 emissions.},
|
||||
booktitle = {2022 IEEE 25th International Conference on Intelligent Transportation Systems (ITSC)},
|
||||
pages = {4151–4156},
|
||||
numpages = {6},
|
||||
location = {Macau, China}
|
||||
}
|
||||
@misc{wang_shmoys_2019,
|
||||
title = {How to solve a linear optimization problem on incentive allocation?},
|
||||
url = {https://eng.lyft.com/how-to-solve-a-linear-optimization-problem-on-incentive-allocation-5a8fb5d04db1},
|
||||
journal = {Medium},
|
||||
publisher = {Lyft Engineering},
|
||||
author = {Wang, Shujing and Shmoys, David},
|
||||
year = {2019},
|
||||
month = {Sep}
|
||||
}
|
||||
@inbook{Kellerer2004,
|
||||
author = {Kellerer, Hans
|
||||
and Pferschy, Ulrich
|
||||
and Pisinger, David},
|
||||
title = {The Multiple-Choice Knapsack Problem},
|
||||
booktitle = {Knapsack Problems},
|
||||
year = {2004},
|
||||
publisher = {Springer Berlin Heidelberg},
|
||||
address = {Berlin, Heidelberg},
|
||||
pages = {317--347},
|
||||
abstract = {The multiple-choice knapsack problem (MCKP) is a generalization of the ordinary knapsack problem, where the set of items is partitioned into classes. The binary choice of taking an item is replaced by the selection of exactly one item out of each class of items. In Section 7.1 we already noticed that a (BKP) can be formulated as a (MCKP), and indeed the (MCKP) model is one of the most flexible knapsack models. (MCKP) is also denoted as knapsack problem with generalized upper bound constraints or for short knapsack problem with GUB.},
|
||||
isbn = {978-3-540-24777-7},
|
||||
doi = {10.1007/978-3-540-24777-7_11},
|
||||
url = {https://doi.org/10.1007/978-3-540-24777-7_11}
|
||||
},
|
||||
@article{Eppstein98,
|
||||
title = {Geometric {{Lower Bounds}} for {{Parametric Matroid Optimization}}},
|
||||
author = {Eppstein, D.},
|
||||
year = {1998},
|
||||
month = dec,
|
||||
journal = {Discrete \& Computational Geometry},
|
||||
volume = {20},
|
||||
number = {4},
|
||||
pages = {463--476},
|
||||
issn = {0179-5376},
|
||||
doi = {10.1007/PL00009396},
|
||||
langid = {english},
|
||||
file = {/Users/chaoxu/Zotero/storage/QR8HQ64I/Eppstein - 1998 - Geometric Lower Bounds for Parametric Matroid Opti.pdf}
|
||||
}
|
||||
|
||||
@article{ZEMEL1984123,
|
||||
title = {An O(n) algorithm for the linear multiple choice knapsack problem and related problems},
|
||||
journal = {Information Processing Letters},
|
||||
volume = {18},
|
||||
number = {3},
|
||||
pages = {123-128},
|
||||
year = {1984},
|
||||
issn = {0020-0190},
|
||||
doi = {https://doi.org/10.1016/0020-0190(84)90014-0},
|
||||
url = {https://www.sciencedirect.com/science/article/pii/0020019084900140},
|
||||
author = {Eitan Zemel},
|
||||
keywords = {Linear programming, multiple choice knapsack, linear algorithms, least distance hyperplane},
|
||||
abstract = {We present an O(n) algorithm for the Linear Multiple Choice Knapsack Problem and its d-dimensional generalization which is based on Megiddo's (1982) algorithm for linear programming. We also consider a certain type of convex programming problems which are common in geometric location models. An application of the linear case is an O(n) algorithm for finding a least distance hyperplane in Rd according to the rectilinear norm. The best previously available algorithm for this problem was an O(n log2n) algorithm for the two-dimensional case. A simple application of the nonlinear case is an O(n) algorithm for finding the point at which a ‘pursuer’ minimizes its distance from the furthest among n ‘targets’, when the trajectories involved are straight lines in Rd.}
|
||||
},
|
||||
@article{DyerOnalg,
|
||||
title = {An O(n) algorithm for the multiple-choice knapsack linear program},
|
||||
journal = {Mathematical Programming},
|
||||
volume = {29},
|
||||
pages = {57–63},
|
||||
year = {1984},
|
||||
doi = {https://doi.org/10.1007/BF02591729},
|
||||
url = {https://www.sciencedirect.com/science/article/pii/0020019084900140},
|
||||
author = {Dyer, M.E.}
|
||||
},
|
||||
@article{Carstensen83,
|
||||
title = {Complexity of Some Parametric Integer and Network Programming Problems},
|
||||
author = {Carstensen, Patricia J.},
|
||||
year = {1983},
|
||||
month = may,
|
||||
journal = {Mathematical Programming},
|
||||
volume = {26},
|
||||
number = {1},
|
||||
pages = {64--75},
|
||||
issn = {0025-5610, 1436-4646},
|
||||
doi = {10.1007/BF02591893},
|
||||
abstract = {Two examples of parametric cost programming problems--one in network programming and one in NP-hard 0-1 programming--are given; in each case, the number of breakpoints in the optimal cost curve is exponential in the square root of the number of variables in the problem.},
|
||||
langid = {english},
|
||||
file = {/Users/chaoxu/Zotero/storage/WUDD9HPG/Carstensen - 1983 - Complexity of some parametric integer and network .pdf}
|
||||
}
|
||||
@article{Zadeh73b,
|
||||
title = {A Bad Network Problem for the Simplex Method and Other Minimum Cost Flow Algorithms},
|
||||
author = {Zadeh, Norman},
|
||||
year = {1973},
|
||||
month = dec,
|
||||
journal = {Mathematical Programming},
|
||||
volume = {5},
|
||||
number = {1},
|
||||
pages = {255--266},
|
||||
issn = {0025-5610, 1436-4646},
|
||||
doi = {10.1007/BF01580132},
|
||||
langid = {english}
|
||||
}
|
||||
@article{Murty80,
|
||||
title = {Computational Complexity of Parametric Linear Programming},
|
||||
author = {Murty, Katta G.},
|
||||
year = {1980},
|
||||
month = dec,
|
||||
journal = {Mathematical Programming},
|
||||
volume = {19},
|
||||
number = {1},
|
||||
pages = {213--219},
|
||||
issn = {0025-5610, 1436-4646},
|
||||
doi = {10.1007/BF01581642},
|
||||
langid = {english}
|
||||
}
|
||||
@article{Megiddo,
|
||||
author = {Megiddo, Nimrod},
|
||||
title = {Applying Parallel Computation Algorithms in the Design of Serial Algorithms},
|
||||
year = {1983},
|
||||
issue_date = {Oct. 1983},
|
||||
publisher = {Association for Computing Machinery},
|
||||
address = {New York, NY, USA},
|
||||
volume = {30},
|
||||
number = {4},
|
||||
issn = {0004-5411},
|
||||
url = {https://doi.org/10.1145/2157.322410},
|
||||
doi = {10.1145/2157.322410},
|
||||
journal = {J. ACM},
|
||||
month = {oct},
|
||||
pages = {852–865},
|
||||
numpages = {14}
|
||||
}
|
||||
@article{Cole87,
|
||||
title = {Slowing down Sorting Networks to Obtain Faster Sorting Algorithms},
|
||||
author = {Cole, Richard},
|
||||
year = {1987},
|
||||
month = jan,
|
||||
journal = {Journal of the ACM},
|
||||
volume = {34},
|
||||
number = {1},
|
||||
pages = {200--208},
|
||||
issn = {0004-5411, 1557-735X},
|
||||
doi = {10.1145/7531.7537},
|
||||
abstract = {Megiddo introduced a technique for using a parallel algorithm for one problem to construct an efficient serial algorithm for a second problem. This paper provides a general method that trims a factor of O (log n ) time (or more) for many applications of this technique.},
|
||||
langid = {english},
|
||||
file = {/Users/chaoxu/Zotero/storage/B98DLXRF/Cole - 1987 - Slowing down sorting networks to obtain faster sor.pdf}
|
||||
}
|
||||
|
||||
@inproceedings{minimaxoptimization,
|
||||
author = {Tokuyama, Takeshi},
|
||||
title = {Minimax Parametric Optimization Problems and Multi-Dimensional Parametric Searching},
|
||||
year = {2001},
|
||||
isbn = {1581133499},
|
||||
publisher = {Association for Computing Machinery},
|
||||
address = {New York, NY, USA},
|
||||
url = {https://doi.org/10.1145/380752.380777},
|
||||
doi = {10.1145/380752.380777},
|
||||
abstract = {The parametric minimax problem, which finds the parameter value minimizing the weight of a solution of a combinatorial maximization problem, is a fundamental problem in sensitivity analysis. Moreover, several problems in computational geometry can be formulated as parametric minimax problems. The parametric search paradigm gives an efficient sequential algorithm for a convex parametric minimax problem with one parameter if the original non-parametric problem has an efficient parallel algorithm. We consider the parametric minimax problem with d parameters for a constant d, and solve it by using multidimensional version of the parametric search paradigm. As a new feature, we give a feasible region in the parameter space in which the parameter vector must be located.Typical results obtained as applications are: (1) Efficient solutions for some geometric problems, including theoretically efficient solutions for the minimum diameter bridging problem in d-dimensional space between convex polytopes. (2) Parametric polymatroid optimization, for example, O(n log n) time algorithm to compute the parameter vector minimizing k-largest linear parametric elements with d dimensions.},
|
||||
booktitle = {Proceedings of the Thirty-Third Annual ACM Symposium on Theory of Computing},
|
||||
pages = {75–83},
|
||||
numpages = {9},
|
||||
location = {Hersonissos, Greece},
|
||||
series = {STOC '01}
|
||||
},
|
||||
@article{Dey1998,
|
||||
author = {Dey, T. K.},
|
||||
title = {Improved Bounds for Planar k -Sets and Related Problems},
|
||||
journal = {Discrete {\&} Computational Geometry},
|
||||
year = {1998},
|
||||
month = {Mar},
|
||||
day = {01},
|
||||
volume = {19},
|
||||
number = {3},
|
||||
pages = {373-382},
|
||||
abstract = {We prove an O(n(k+1)1/3) upper bound for planar k -sets. This is the first considerable improvement on this bound after its early solution approximately 27 years ago. Our proof technique also applies to improve the current bounds on the combinatorial complexities of k -levels in the arrangement of line segments, k convex polygons in the union of n lines, parametric minimum spanning trees, and parametric matroids in general.<lsiheader><onlinepub>26 June, 1998<editor>Editors-in-Chief: {\&}lsilt;a href=../edboard.html{\#}chiefs{\&}lsigt;Jacob E. Goodman, Richard Pollack{\&}lsilt;/a{\&}lsigt;<pdfname>19n3p373.pdf<pdfexist>yes<htmlexist>no<htmlfexist>no<texexist>yes<sectionname></lsiheader>},
|
||||
issn = {1432-0444},
|
||||
doi = {10.1007/PL00009354},
|
||||
url = {https://doi.org/10.1007/PL00009354}
|
||||
}
|
||||
@article{Toth01,
|
||||
title = {Point {{Sets}} with {{Many}} K-{{Sets}}},
|
||||
author = {T{\'o}th, G.},
|
||||
year = {2001},
|
||||
month = jan,
|
||||
journal = {Discrete \& Computational Geometry},
|
||||
volume = {26},
|
||||
number = {2},
|
||||
pages = {187--194},
|
||||
issn = {0179-5376, 1432-0444},
|
||||
doi = {10.1007/s004540010022},
|
||||
langid = {english}
|
||||
}
|
||||
@book{Schrijver2002,
|
||||
address = {Berlin, Germany},
|
||||
edition = {2003},
|
||||
title = {Combinatorial Optimization: Polyhedra and Efficiency},
|
||||
isbn = {9783540443896},
|
||||
publisher = {Springer},
|
||||
author = {Schrijver, Alexander},
|
||||
year = {2002},
|
||||
language = {en}
|
||||
}
|
||||
|
||||
|
||||
@incollection{ERDOS1973139,
|
||||
title = {Chapter 13 - Dissection Graphs of Planar Point Sets},
|
||||
editor = {Jagdish N. Srivastava},
|
||||
booktitle = {A Survey of Combinatorial Theory},
|
||||
publisher = {North-Holland},
|
||||
pages = {139-149},
|
||||
year = {1973},
|
||||
isbn = {978-0-7204-2262-7},
|
||||
doi = {https://doi.org/10.1016/B978-0-7204-2262-7.50018-1},
|
||||
url = {https://www.sciencedirect.com/science/article/pii/B9780720422627500181},
|
||||
author = {P. Erdös and L. Lovász and A. Simmons and E.G. Straus},
|
||||
abstract = {Publisher Summary
|
||||
This chapter discusses the dissection graphs of planar point sets. It discusses the general properties of the graphs Gk. The graph Gk can be constructed as follows. Let l be any oriented line containing no points of S and having k + 1 points of S on its positive side. l is to be translated to its left until it meets a point p1 of S. This line will be called l(0). Then, l(0) is to be rotated counterclockwise by θ about p1 into line l(θ) until it meets a second point p2 of S at l(θ1) = 11. l(0) is then rotated counterclockwise about p2 until l(θ) meets a point p3 of S at l(θ2) = l2, etc. This gives a sequence of points p1, p2, …, pn of S with pN+1 = p1, pN+2 = p2 and a sequence of directed lines l1, l2, l N, l N+1 with l N+2 = l1.}
|
||||
}
|
||||
|
||||
@article{lovasz,
|
||||
author = {Lovász, L.},
|
||||
title = {On the number of halving lines},
|
||||
journal = {Annales Universitatis Scientiarum Budapestinensis de Rolando Eőtvős Nominatae Sectio Mathematica},
|
||||
year = {1971},
|
||||
volume = {14},
|
||||
pages = {107-108}
|
||||
}
|
||||
|
||||
@inbook{blelloch1990,
|
||||
title = {Prefix sums and their applications},
|
||||
chapter = 1,
|
||||
author = {Guy E. Blelloch},
|
||||
booktitle = {Synthesis of parallel algorithms},
|
||||
publisher = {Morgan Kaufmann},
|
||||
year = 1991,
|
||||
address = {Oxford, England},
|
||||
page = {35-60}
|
||||
}
|
||||
@article{randompoint,
|
||||
issn = {00063444},
|
||||
url = {http://www.jstor.org/stable/2333687},
|
||||
abstract = {Various expectations concerning the convex hull of $N$ independently and identically distributed random points in the plane or in space are evaluated. Integral expressions are given for the expected area, expected perimeter, expected probability content and expected number of sides. These integrals are shown to be particularly simple when the underlying distribution is normal or uniform over a disk or sphere.},
|
||||
author = {Bradley Efron},
|
||||
journal = {Biometrika},
|
||||
number = {3/4},
|
||||
pages = {331--343},
|
||||
publisher = {[Oxford University Press, Biometrika Trust]},
|
||||
title = {The Convex Hull of a Random Set of Points},
|
||||
urldate = {2023-01-19},
|
||||
volume = {52},
|
||||
year = {1965}
|
||||
}
|
||||
|
||||
|
||||
@article{vldb,
|
||||
author = {Tangwongsan, Kanat and Hirzel, Martin and Schneider, Scott and Wu, Kun-Lung},
|
||||
title = {General Incremental Sliding-Window Aggregation},
|
||||
year = {2015},
|
||||
issue_date = {February 2015},
|
||||
publisher = {VLDB Endowment},
|
||||
volume = {8},
|
||||
number = {7},
|
||||
issn = {2150-8097},
|
||||
url = {https://doi.org/10.14778/2752939.2752940},
|
||||
doi = {10.14778/2752939.2752940},
|
||||
abstract = {Stream processing is gaining importance as more data becomes available in the form of continuous streams and companies compete to promptly extract insights from them. In such applications, sliding-window aggregation is a central operator, and incremental aggregation helps avoid the performance penalty of re-aggregating from scratch for each window change.This paper presents Reactive Aggregator (RA), a new framework for incremental sliding-window aggregation. RA is general in that it does not require aggregation functions to be invertible or commutative, and it does not require windows to be FIFO. We implemented RA as a drop-in replacement for the Aggregate operator of a commercial streaming engine. Given m updates on a window of size n, RA has an algorithmic complexity of O(m + m log (n/m)), rivaling the best prior algorithms for any m. Furthermore, RA's implementation minimizes overheads from allocation and pointer traversals by using a single flat array.},
|
||||
journal = {Proc. VLDB Endow.},
|
||||
month = {feb},
|
||||
pages = {702–713},
|
||||
numpages = {12}
|
||||
}
|
||||
|
||||
|
||||
@article{overmars_maintenance_1981,
|
||||
title = {Maintenance of configurations in the plane},
|
||||
volume = {23},
|
||||
issn = {00220000},
|
||||
url = {https://linkinghub.elsevier.com/retrieve/pii/002200008190012X},
|
||||
doi = {10.1016/0022-0000(81)90012-X},
|
||||
pages = {166--204},
|
||||
number = {2},
|
||||
journaltitle = {Journal of Computer and System Sciences},
|
||||
shortjournal = {Journal of Computer and System Sciences},
|
||||
author = {Overmars, Mark H. and Van Leeuwen, Jan},
|
||||
urldate = {2023-05-24},
|
||||
date = {1981-10},
|
||||
langid = {english}
|
||||
}
|
||||
|
||||
|
||||
@inproceedings{agarwal_parametric_1998,
|
||||
location = {Palo Alto, {CA}, {USA}},
|
||||
title = {Parametric and kinetic minimum spanning trees},
|
||||
isbn = {978-0-8186-9172-0},
|
||||
url = {http://ieeexplore.ieee.org/document/743510/},
|
||||
doi = {10.1109/SFCS.1998.743510},
|
||||
abstract = {We consider the parametric minimum spanning tree problem, in which we are given a graph with edge weights that are linear functions of a parameter λ and wish to compute the sequence of minimum spanning trees generated as λ varies. We also consider the kinetic minimum spanning tree problem, in which λ represents time and the graph is subject in addition to changes such as edge insertions, deletions, and modifications of the weight functions as time progresses. We solve both problems in time O(n2/3 log4/3 n) per combinatorial change in the tree (or randomized O(n2/3 log n) per change). Our time bounds reduce to O(n1/2 log3/2 n) per change (O(n1/2 log n) randomized) for planar graphs or other minor-closed families of graphs, and O(n1/4 log3/2 n) per change (O(n1/4 log n) randomized) for planar graphs with weight changes but no insertions or deletions.},
|
||||
eventtitle = {39th Annual Symposium on Foundations of Computer Science},
|
||||
pages = {596--605},
|
||||
booktitle = {Proceedings 39th Annual Symposium on Foundations of Computer Science (Cat. No.98CB36280)},
|
||||
publisher = {{IEEE} Comput. Soc},
|
||||
author = {Agarwal, P.K. and Eppstein, D. and Guibas, L.J. and Henzinger, M.R.},
|
||||
urldate = {2023-05-02},
|
||||
date = {1998},
|
||||
langid = {english},
|
||||
year = {1998}
|
||||
}
|
||||
|
||||
@article{fife_laminar_2017,
|
||||
title = {Laminar matroids},
|
||||
volume = {62},
|
||||
issn = {01956698},
|
||||
url = {https://linkinghub.elsevier.com/retrieve/pii/S0195669817300021},
|
||||
doi = {10.1016/j.ejc.2017.01.002},
|
||||
abstract = {A laminar family is a collection A of subsets of a set E such that, for any two intersecting sets, one is contained in the other. For a capacity function c on A, let I be \{I : {\textbar}I ∩ A{\textbar} ≤ c(A) for all A ∈ A\}. Then I is the collection of independent sets of a (laminar) matroid on E. We present a method of compacting laminar presentations, characterize the class of laminar matroids by their excluded minors, present a way to construct all laminar matroids using basic operations, and compare the class of laminar matroids to other well-known classes of matroids.},
|
||||
pages = {206--216},
|
||||
journal = {European Journal of Combinatorics},
|
||||
journaltitle = {European Journal of Combinatorics},
|
||||
shortjournal = {European Journal of Combinatorics},
|
||||
author = {Fife, Tara and Oxley, James},
|
||||
urldate = {2023-05-04},
|
||||
date = {2017-05},
|
||||
langid = {english},
|
||||
year = {2017}
|
||||
}
|
||||
|
||||
|
||||
@inproceedings{heavypathdecomposition,
|
||||
author = {Sleator, Daniel D. and Tarjan, Robert Endre},
|
||||
title = {A Data Structure for Dynamic Trees},
|
||||
year = {1981},
|
||||
isbn = {9781450373920},
|
||||
publisher = {Association for Computing Machinery},
|
||||
address = {New York, NY, USA},
|
||||
url = {https://doi.org/10.1145/800076.802464},
|
||||
doi = {10.1145/800076.802464},
|
||||
abstract = {We propose a data structure to maintain a collection of vertex-disjoint trees under a sequence of two kinds of operations: a link operation that combines two trees into one by adding an edge, and a cut operation that divides one tree into two by deleting an edge. Our data structure requires O(log n) time per operation when the time is amortized over a sequence of operations. Using our data structure, we obtain new fast algorithms for the following problems:(1) Computing deepest common ancestors.(2) Solving various network flow problems including finding maximum flows, blocking flows, and acyclic flows.(3) Computing certain kinds of constrained minimum spanning trees.(4) Implementing the network simplex algorithm for the transshipment problem.Our most significant application is (2); we obtain an O(mn log n)-time algorithm to find a maximum flow in a network of n vertices and m edges, beating by a factor of log n the fastest algorithm previously known for sparse graphs.},
|
||||
booktitle = {Proceedings of the Thirteenth Annual ACM Symposium on Theory of Computing},
|
||||
pages = {114–122},
|
||||
numpages = {9},
|
||||
location = {Milwaukee, Wisconsin, USA},
|
||||
series = {STOC '81}
|
||||
}
|
||||
|
||||
@article{Edmonds1971,
|
||||
author={Edmonds, Jack},
|
||||
title={Matroids and the greedy algorithm},
|
||||
journal={Mathematical Programming},
|
||||
year={1971},
|
||||
month={Dec},
|
||||
day={01},
|
||||
volume={1},
|
||||
number={1},
|
||||
pages={127-136},
|
||||
abstract={Linear-algebra rank is the solution to an especially tractable optimization problem. This tractability is viewed abstractly, and extended to certain more general optimization problems which are linear programs relative to certain derived polyhedra.},
|
||||
issn={1436-4646},
|
||||
doi={10.1007/BF01584082},
|
||||
url={https://doi.org/10.1007/BF01584082}
|
||||
}
|
||||
|
||||
|
||||
@misc{enwiki_envelope,
|
||||
author = "{Wikipedia contributors}",
|
||||
title = "Lower envelope --- {Wikipedia}{,} The Free Encyclopedia",
|
||||
year = "2021",
|
||||
howpublished = "\url{https://en.wikipedia.org/w/index.php?title=Lower_envelope&oldid=1024815458}",
|
||||
note = "[Online; accessed 20-May-2024]"
|
||||
}
|
||||
|
||||
@article{daskalakis_how_nodate,
|
||||
title = {How good is the {Chord} algorithm?},
|
||||
abstract = {The Chord algorithm is a popular, simple method for the succinct approximation of curves, which is widely used, under different names, in a variety of areas, such as, multiobjective and parametric optimization, computational geometry, and graphics. We analyze the performance of the chord algorithm, as compared to the optimal approximation that achieves a desired accuracy with the minimum number of points. We prove sharp upper and lower bounds, both in the worst case and average case setting.},
|
||||
language = {en},
|
||||
author = {Daskalakis, Constantinos and Diakonikolas, Ilias and Yannakakis, Mihalis},
|
||||
file = {1309.7084v1:/Users/congyu/Zotero/storage/9M85GJGA/1309.7084v1.pdf:application/pdf;PDF:/Users/congyu/Zotero/storage/2EUWCRCA/Daskalakis et al. - How good is the Chord algorithm.pdf:application/pdf},
|
||||
}
|
||||
|
||||
|
||||
@article{eisner_mathematical_1976,
|
||||
title = {Mathematical {Techniques} for {Efficient} {Record} {Segmentation} in {Large} {Shared} {Databases}},
|
||||
volume = {23},
|
||||
issn = {0004-5411, 1557-735X},
|
||||
url = {https://dl.acm.org/doi/10.1145/321978.321982},
|
||||
doi = {10.1145/321978.321982},
|
||||
abstract = {It is possible to significantly reduce the average cost of information retrieval from a large shared database by partitioning data items stored within each record into a primary and a secondary record segment. An analytic model, based upon knowledge of data item lengths, transportation costs, and retrieval patterns, is developed to assist an analyst with this assignment problem. The model is generally applicable to environments in which a database resides in secondary storage, and is useful for both uniprogramming and multiprogramming systems. A computationally tractable record design algorithm has been implemented as a Fortran program and applied to numerous problems. Realistic examples are presented which demonstrate a potential for reducing total system cost by more than 65 percent.},
|
||||
language = {en},
|
||||
number = {4},
|
||||
urldate = {2024-09-26},
|
||||
journal = {Journal of the ACM},
|
||||
author = {Eisner, Mark J. and Severance, Dennis G.},
|
||||
month = oct,
|
||||
year = {1976},
|
||||
pages = {619--635},
|
||||
file = {PDF:/Users/congyu/Zotero/storage/Y7ILXXL9/Eisner and Severance - 1976 - Mathematical Techniques for Efficient Record Segmentation in Large Shared Databases.pdf:application/pdf},
|
||||
}
|
||||
|
||||
@article{lust_multiobjective_2012,
|
||||
title = {The multiobjective multidimensional knapsack problem: a survey and a new approach},
|
||||
volume = {19},
|
||||
issn = {0969-6016, 1475-3995},
|
||||
shorttitle = {The multiobjective multidimensional knapsack problem},
|
||||
url = {https://onlinelibrary.wiley.com/doi/10.1111/j.1475-3995.2011.00840.x},
|
||||
doi = {10.1111/j.1475-3995.2011.00840.x},
|
||||
abstract = {The knapsack problem (KP) and its multidimensional version (MKP) are basic problems in combinatorial optimization. In this paper we consider their multiobjective extension (MOKP and MOMKP), for which the aim is to obtain or to approximate the set of efficient solutions. In a first step, we classify and describe briefly the existing works, that are essentially based on the use of metaheuristics. In a second step, we propose the adaptation of the twophase Pareto local search (2PPLS) to the resolution of the MOMKP. With this aim, we use a very-large scale neighborhood (VLSN) in the second phase of the method, that is the Pareto local search. We compare our results to state-of-the-art results and we show that we obtain results never reached before by heuristics, for the biobjective instances. Finally we consider the extension to three-objective instances.},
|
||||
language = {en},
|
||||
number = {4},
|
||||
urldate = {2024-09-29},
|
||||
journal = {International Transactions in Operational Research},
|
||||
author = {Lust, Thibaut and Teghem, Jacques},
|
||||
month = jul,
|
||||
year = {2012},
|
||||
pages = {495--520},
|
||||
file = {PDF:/Users/congyu/Zotero/storage/X5N6KIUT/Lust and Teghem - 2012 - The multiobjective multidimensional knapsack problem a survey and a new approach.pdf:application/pdf},
|
||||
}
|
||||
|
||||
|
||||
@article{calinescu_maximizing_2011,
|
||||
title = {Maximizing a {Monotone} {Submodular} {Function} {Subject} to a {Matroid} {Constraint}},
|
||||
volume = {40},
|
||||
issn = {0097-5397, 1095-7111},
|
||||
url = {http://epubs.siam.org/doi/10.1137/080733991},
|
||||
doi = {10.1137/080733991},
|
||||
abstract = {Let f : 2X → R+ be a monotone submodular set function, and let (X, I) be a matroid. We consider the problem maxS∈I f (S). It is known that the greedy algorithm yields a 1/2approximation [17] for this problem. For certain special cases, e.g. max{\textbar}S{\textbar}≤k f (S), the greedy algorithm yields a (1 − 1/e)-approximation. It is known that this is optimal both in the value oracle model (where the only access to f is through a black box returning f (S) for a given set S) [37], and also for explicitly posed instances assuming P = N P [13].},
|
||||
language = {en},
|
||||
number = {6},
|
||||
urldate = {2024-08-01},
|
||||
journal = {SIAM Journal on Computing},
|
||||
author = {Calinescu, Gruia and Chekuri, Chandra and Pál, Martin and Vondrák, Jan},
|
||||
month = jan,
|
||||
year = {2011},
|
||||
pages = {1740--1766},
|
||||
file = {Calinescu et al. - 2011 - Maximizing a Monotone Submodular Function Subject .pdf:/Users/congyu/Zotero/storage/YX9EQ3UB/Calinescu et al. - 2011 - Maximizing a Monotone Submodular Function Subject .pdf:application/pdf},
|
||||
}
|
||||
|
||||
|
||||
@article{lee_maximizing_2010,
|
||||
title = {Maximizing {Nonmonotone} {Submodular} {Functions} under {Matroid} or {Knapsack} {Constraints}},
|
||||
volume = {23},
|
||||
issn = {0895-4801},
|
||||
url = {https://epubs.siam.org/doi/10.1137/090750020},
|
||||
doi = {10.1137/090750020},
|
||||
abstract = {Let \$f:2{\textasciicircum}X {\textbackslash}rightarrow {\textbackslash}cal R\_+\$ be a monotone submodular set function, and let \$(X,{\textbackslash}cal I)\$ be a matroid. We consider the problem \$\{{\textbackslash}rm max\}\_\{S {\textbackslash}in {\textbackslash}cal I\} f(S)\$. It is known that the greedy algorithm yields a \$1/2\$-approximation [M. L. Fisher, G. L. Nemhauser, and L. A. Wolsey, Math. Programming Stud., no. 8 (1978), pp. 73–87] for this problem. For certain special cases, e.g., \$\{{\textbackslash}rm max\}\_\{{\textbar}S{\textbar} {\textbackslash}leq k\} f(S)\$, the greedy algorithm yields a \$(1-1/e)\$-approximation. It is known that this is optimal both in the value oracle model (where the only access to f is through a black box returning \$f(S)\$ for a given set S) [G. L. Nemhauser and L. A. Wolsey, Math. Oper. Res., 3 (1978), pp. 177–188] and for explicitly posed instances assuming \$P {\textbackslash}neq NP\$ [U. Feige, J. ACM, 45 (1998), pp. 634–652]. In this paper, we provide a randomized \$(1-1/e)\$-approximation for any monotone submodular function and an arbitrary matroid. The algorithm works in the value oracle model. Our main tools are a variant of the pipage rounding technique of Ageev and Sviridenko [J. Combin. Optim., 8 (2004), pp. 307–328], and a continuous greedy process that may be of independent interest. As a special case, our algorithm implies an optimal approximation for the submodular welfare problem in the value oracle model [J. Vondrák, Proceedings of the \$38\$th ACM Symposium on Theory of Computing, 2008, pp. 67–74]. As a second application, we show that the generalized assignment problem (GAP) is also a special case; although the reduction requires \${\textbar}X{\textbar}\$ to be exponential in the original problem size, we are able to achieve a \$(1-1/e-o(1))\$-approximation for GAP, simplifying previously known algorithms. Additionally, the reduction enables us to obtain approximation algorithms for variants of GAP with more general constraints.},
|
||||
number = {4},
|
||||
urldate = {2024-09-29},
|
||||
journal = {SIAM Journal on Discrete Mathematics},
|
||||
author = {Lee, Jon and Mirrokni, Vahab S. and Nagarajan, Viswanath and Sviridenko, Maxim},
|
||||
month = jan,
|
||||
year = {2010},
|
||||
note = {Publisher: Society for Industrial and Applied Mathematics},
|
||||
pages = {2053--2078},
|
||||
file = {PDF:/Users/congyu/Zotero/storage/LZ2EKEKW/Lee et al. - 2010 - Maximizing Nonmonotone Submodular Functions under Matroid or Knapsack Constraints.pdf:application/pdf},
|
||||
}
|
||||
|
||||
@misc{sviridenko_optimal_2014,
|
||||
title = {Optimal approximation for submodular and supermodular optimization with bounded curvature},
|
||||
url = {http://arxiv.org/abs/1311.4728},
|
||||
abstract = {We design new approximation algorithms for the problems of optimizing submodular and supermodular functions subject to a single matroid constraint. Specifically, we consider the case in which we wish to maximize a nondecreasing submodular function or minimize a nonincreasing supermodular function in the setting of bounded total curvature c. In the case of submodular maximization with curvature c, we obtain a (1 − c/e)-approximation — the first improvement over the greedy (1 − e−c)/c-approximation of Conforti and Cornuejols from 1984, which holds for a cardinality constraint, as well as recent approaches that hold for an arbitrary matroid constraint.},
|
||||
language = {en},
|
||||
urldate = {2024-10-25},
|
||||
publisher = {arXiv},
|
||||
author = {Sviridenko, Maxim and Vondrák, Jan and Ward, Justin},
|
||||
month = dec,
|
||||
year = {2014},
|
||||
note = {arXiv:1311.4728 [cs]},
|
||||
keywords = {Computer Science - Data Structures and Algorithms},
|
||||
file = {PDF:/Users/congyu/Zotero/storage/LS7Y3DYF/Sviridenko et al. - 2014 - Optimal approximation for submodular and supermodular optimization with bounded curvature.pdf:application/pdf},
|
||||
}
|
||||
@InProceedings{doronarad_et_al:LIPIcs.ICALP.2024.56,
|
||||
author = {Doron-Arad, Ilan and Kulik, Ariel and Shachnai, Hadas},
|
||||
title = {{Lower Bounds for Matroid Optimization Problems with a Linear Constraint}},
|
||||
booktitle = {51st International Colloquium on Automata, Languages, and Programming (ICALP 2024)},
|
||||
pages = {56:1--56:20},
|
||||
series = {Leibniz International Proceedings in Informatics (LIPIcs)},
|
||||
ISBN = {978-3-95977-322-5},
|
||||
ISSN = {1868-8969},
|
||||
year = {2024},
|
||||
volume = {297},
|
||||
editor = {Bringmann, Karl and Grohe, Martin and Puppis, Gabriele and Svensson, Ola},
|
||||
publisher = {Schloss Dagstuhl -- Leibniz-Zentrum f{\"u}r Informatik},
|
||||
address = {Dagstuhl, Germany},
|
||||
URL = {https://drops.dagstuhl.de/entities/document/10.4230/LIPIcs.ICALP.2024.56},
|
||||
URN = {urn:nbn:de:0030-drops-201990},
|
||||
doi = {10.4230/LIPIcs.ICALP.2024.56},
|
||||
annote = {Keywords: matroid optimization, budgeted problems, knapsack, approximation schemes}
|
||||
}
|
336
ijcai25.sty
Normal file
336
ijcai25.sty
Normal file
@@ -0,0 +1,336 @@
|
||||
\typeout{Conference Style, version of November 2018}
|
||||
|
||||
% All bug reports should be directed to proceedings@ijcai.org
|
||||
% The following comments are from the original ijcai97.sty
|
||||
|
||||
% The current two-column conference style.
|
||||
% Heavily adapted from the IJCAI-89 original style.
|
||||
% Fixes from various people incorporated up to the IJCAI-95 style.
|
||||
% Some major changes for the IJCAI-2018 edition
|
||||
|
||||
% To use, place in a file called conference.sty, or whatever your conference
|
||||
% is called, in the TeX search path. (Placing it in the same directory as
|
||||
% the paper should also work.)
|
||||
|
||||
% Prepared by Peter F. Patel-Schneider,
|
||||
% liberally using the ideas of
|
||||
% other style hackers, including Barbara Beeton.
|
||||
% This style is NOT guaranteed to work. It is provided in the hope
|
||||
% that it will make the preparation of papers easier.
|
||||
%
|
||||
% The preparation of this file was supported by Schlumberger Palo Alto
|
||||
% Research, AT\&T Bell Laboratories, AAAI, and Morgan Kaufmann Publishers.
|
||||
%
|
||||
% \pubnote added by J. Scott Penberthy
|
||||
|
||||
% These instructions can be modified and used in other conferences as long
|
||||
% as credit to the authors and supporting agencies is retained, this notice
|
||||
% is not changed, and further modification or reuse is not restricted.
|
||||
%
|
||||
% If you are organizing a conference, and want to use this file, you should
|
||||
% appoint a contact person to handle any problems!
|
||||
%
|
||||
% If you are using this file for the preparation of papers for a
|
||||
% conference that supplied you with this file, you should contact the
|
||||
% organizers of the conference if you have any problems. They should have
|
||||
% much more information than I have.
|
||||
|
||||
% There are undoubtably bugs in this style. If you make bug fixes,
|
||||
% improvements, etc. please let us know at proceedings@ijcai.org.
|
||||
|
||||
% NOTE: Some laser printers have a serious problem printing TeX output.
|
||||
% These printing devices, commonly known as ``write-white'' laser
|
||||
% printers, tend to make characters too light. To get around this
|
||||
% problem, a darker set of fonts must be created for these devices.
|
||||
|
||||
% Physical page layout
|
||||
\twocolumn \flushbottom \sloppy
|
||||
% Note that TeX has built-in 1-inch top and left margins.
|
||||
\setlength\topmargin{-0.25in}
|
||||
\setlength\oddsidemargin{-0.25in}
|
||||
\setlength\evensidemargin{-0.25in}
|
||||
\setlength\textheight{9.0in}
|
||||
\setlength\textwidth{7.0in}
|
||||
\setlength\columnsep{0.25in}
|
||||
|
||||
% No pages numbers or other headers or footers
|
||||
\setlength\headheight{0pt} \setlength\headsep{0pt}
|
||||
%\setlength\footheight{0pt} \setlength\footskip{0pt}
|
||||
\thispagestyle{empty} \pagestyle{empty}
|
||||
|
||||
% jsp added:
|
||||
\def\pubnote#1{\thispagestyle{myheadings}
|
||||
\markboth{#1}{#1}
|
||||
\def\thepage{}
|
||||
}
|
||||
|
||||
% Less leading in most fonts (due to the narrow columns)
|
||||
% The choices were between 1-pt and 1.5-pt leading
|
||||
% \def\@normalsize{\@setsize\normalsize{11pt}\xpt\@xpt} % 10 point on 11
|
||||
% \def\small{\@setsize\small{10pt}\ixpt\@ixpt} % 9 point on 10
|
||||
% \def\footnotesize{\@setsize\footnotesize{10pt}\ixpt\@ixpt} % 9 point on 10
|
||||
% \def\scriptsize{\@setsize\scriptsize{8pt}\viipt\@viipt} % 7 point on 8
|
||||
% \def\tiny{\@setsize\tiny{7pt}\vipt\@vipt} % 6 point on 7
|
||||
% \def\large{\@setsize\large{12pt}\xipt\@xipt} % 11 point on 12
|
||||
% \def\Large{\@setsize\Large{14pt}\xiipt\@xiipt} % 12 point on 14
|
||||
% \def\LARGE{\@setsize\LARGE{16pt}\xivpt\@xivpt} % 14 point on 16
|
||||
% \def\huge{\@setsize\huge{20pt}\xviipt\@xviipt} % 17 point on 20
|
||||
% \def\Huge{\@setsize\Huge{23pt}\xxpt\@xxpt} % 20 point on 23
|
||||
|
||||
% latex2e compatibility mode hack - kek@cs.brown.edu 11/10/98
|
||||
\def\@normalsize{\@setsize\normalsize{11pt}\xpt\@xpt} % 10 point on 11
|
||||
\def\normalsize{\@setsize\normalsize{11pt}\xpt\@xpt} % 10 point on 11
|
||||
\def\small{\@setsize\small{10pt}\ixpt\@ixpt} % 9 point on 10
|
||||
\def\footnotesize{\@setsize\footnotesize{10pt}\ixpt\@ixpt} % 9 point on 10
|
||||
\def\scriptsize{\@setsize\scriptsize{8pt}\viipt\@viipt} % 7 point on 8
|
||||
\def\tiny{\@setsize\tiny{7pt}\vipt\@vipt} % 6 point on 7
|
||||
\def\large{\@setsize\large{12pt}\xipt\@xipt} % 11 point on 12
|
||||
\def\Large{\@setsize\Large{14pt}\xiipt\@xiipt} % 12 point on 14
|
||||
\def\LARGE{\@setsize\LARGE{16pt}\xivpt\@xivpt} % 14 point on 16
|
||||
\def\huge{\@setsize\huge{20pt}\xviipt\@xviipt} % 17 point on 20
|
||||
\def\Huge{\@setsize\Huge{23pt}\xxpt\@xxpt} % 20 point on 23
|
||||
|
||||
|
||||
% Paragraphs
|
||||
\parindent 1em
|
||||
\parskip 0pt plus 1pt
|
||||
|
||||
% Title stuff, taken from deproc.
|
||||
\newlength\titlepad \setlength\titlepad{0in}
|
||||
\newlength\titlebox \setlength\titlebox{2.25in}
|
||||
\def\maketitle{\par
|
||||
\begingroup % to make the footnote style local to the title
|
||||
\def\thefootnote{\fnsymbol{footnote}}
|
||||
\def\@makefnmark{$^{\@thefnmark}$}
|
||||
\twocolumn[\@maketitle] \@thanks
|
||||
\endgroup
|
||||
\setcounter{footnote}{0}
|
||||
\let\maketitle\relax \let\@maketitle\relax
|
||||
\gdef\@thanks{}\gdef\@author{}\gdef\@title{}\let\thanks\relax}%
|
||||
%
|
||||
\def\@maketitle{%
|
||||
\newsavebox{\titlearea}
|
||||
\sbox{\titlearea}{
|
||||
\let\footnote\thanks\relax
|
||||
\vbox{
|
||||
\hsize\textwidth \linewidth\hsize%
|
||||
\vskip 0.5in%
|
||||
\centering%
|
||||
{\LARGE\bf \@title \par}%
|
||||
\vskip 0.1in%
|
||||
{%
|
||||
\def\and{\unskip\thinspace{\rm ,}\enspace}%
|
||||
\def\And{\unskip\enspace{\rm and}\enspace}%
|
||||
\def\affiliations{%
|
||||
\egroup\par\Large\bgroup\rm%
|
||||
}%
|
||||
\def\emails{%
|
||||
\egroup\par\Large\bgroup\rm%
|
||||
}%
|
||||
\bgroup\Large\bf\@author\egroup%%
|
||||
}%
|
||||
\vskip 0.2in%
|
||||
}
|
||||
}
|
||||
|
||||
\newlength\actualheight
|
||||
\settoheight{\actualheight}{\usebox{\titlearea}}
|
||||
|
||||
\ifdim\actualheight>\titlebox
|
||||
\setlength{\titlebox}{\actualheight}
|
||||
\fi
|
||||
%\setlength{\titlepad}{\dimexpr\titlepad+\titlepad\relax}
|
||||
|
||||
\setcounter{footnote}{0}
|
||||
\vbox to \titlebox {
|
||||
\def\thanks##1{\footnotemark}\relax
|
||||
\hsize\textwidth \linewidth\hsize%
|
||||
\vskip 0.5in%
|
||||
\centering%
|
||||
{\LARGE\bf \@title \par}%
|
||||
\vskip 0.2in plus 4fil minus 0.1in%
|
||||
{%
|
||||
\def\and{\unskip\thinspace{\rm ,}\enspace}%
|
||||
\def\And{\unskip\enspace{\rm and}\enspace}%
|
||||
\def\affiliations{
|
||||
\egroup%
|
||||
\vskip 0.05in minus 0.05in%
|
||||
\par\bgroup\Large\rm%
|
||||
}
|
||||
\def\emails{
|
||||
\egroup%
|
||||
\vskip 0.05in minus 0.05in%
|
||||
\par\bgroup\Large\rm%
|
||||
}
|
||||
\bgroup\Large\bf\@author\egroup%
|
||||
}%
|
||||
\vskip 0.3in plus 8fil minus 0.1in
|
||||
}
|
||||
}
|
||||
\renewenvironment{abstract}{\centerline{\Large\bf
|
||||
Abstract}\vspace{0.5ex}\begin{quote}}{\par\end{quote}\vskip 1ex}
|
||||
|
||||
% Sections with less space
|
||||
\def\section{\@startsection{section}{1}{\z@}{-10pt plus
|
||||
-3pt minus -2pt}{4pt plus 2pt minus 1pt}{\Large\bf\raggedright}}
|
||||
\def\subsection{\@startsection{subsection}{2}{\z@}{-8pt plus
|
||||
-2pt minus -2pt}{3pt plus 2pt minus 1pt}{\large\bf\raggedright}}
|
||||
\def\subsubsection{\@startsection{subsubsection}{3}{\z@}{-6pt plus
|
||||
-2pt minus -1pt}{1pt plus 1pt minus 1pt}{\normalsize\bf\raggedright}}
|
||||
\renewcommand\paragraph{\@startsection{paragraph}{4}{\z@}{-4pt plus
|
||||
-2pt minus -1pt}{-1em}{\normalsize\bf}}
|
||||
\setcounter{secnumdepth}{2} % Don't number subsubsections
|
||||
|
||||
% Footnotes
|
||||
\footnotesep 6.65pt \skip\footins 9pt plus 4pt minus 2pt
|
||||
\def\footnoterule{\kern-3pt \hrule width 5pc \kern 2.6pt }
|
||||
\setcounter{footnote}{0}
|
||||
|
||||
% Illustrations (floats)
|
||||
\floatsep 12pt plus 2pt minus 2pt
|
||||
\textfloatsep 16pt plus 2pt minus 4pt
|
||||
\intextsep 12pt plus 2pt minus 2pt
|
||||
\dblfloatsep 12pt plus 2pt minus 2pt
|
||||
\dbltextfloatsep 18pt plus 2pt minus 4pt
|
||||
|
||||
% Displays
|
||||
\abovedisplayskip 7pt plus2pt minus5pt%
|
||||
\belowdisplayskip \abovedisplayskip
|
||||
\abovedisplayshortskip 0pt plus3pt%
|
||||
\belowdisplayshortskip 4pt plus3pt minus3pt%
|
||||
|
||||
% Lists
|
||||
\leftmargini 2em
|
||||
\leftmarginii 2em
|
||||
\leftmarginiii 1em
|
||||
\leftmarginiv 0.5em
|
||||
\leftmarginv 0.5em
|
||||
\leftmarginvi 0.5em
|
||||
|
||||
\leftmargin\leftmargini
|
||||
\labelsep 5pt
|
||||
\labelwidth\leftmargini\advance\labelwidth-\labelsep
|
||||
|
||||
\def\@listI{\leftmargin\leftmargini
|
||||
\parsep 2pt plus 1pt minus 0.5pt%
|
||||
\topsep 4pt plus 1pt minus 2pt%
|
||||
\itemsep 2pt plus 1pt minus 0.5pt%
|
||||
\partopsep 1pt plus 0.5pt minus 0.5pt}
|
||||
|
||||
\let\@listi\@listI
|
||||
\@listi
|
||||
|
||||
\def\@listii{\leftmargin\leftmarginii
|
||||
\labelwidth\leftmarginii\advance\labelwidth-\labelsep
|
||||
\parsep 1pt plus 0.5pt minus 0.5pt
|
||||
\topsep 2pt plus 1pt minus 0.5pt
|
||||
\itemsep \parsep}
|
||||
\def\@listiii{\leftmargin\leftmarginiii
|
||||
\labelwidth\leftmarginiii\advance\labelwidth-\labelsep
|
||||
\parsep 0pt plus 1pt
|
||||
\partopsep 0.5pt plus 0pt minus 0.5pt
|
||||
\topsep 1pt plus 0.5pt minus 0.5pt
|
||||
\itemsep \topsep}
|
||||
\def\@listiv{\leftmargin\leftmarginiv
|
||||
\labelwidth\leftmarginiv\advance\labelwidth-\labelsep}
|
||||
\def\@listv{\leftmargin\leftmarginv
|
||||
\labelwidth\leftmarginv\advance\labelwidth-\labelsep}
|
||||
\def\@listvi{\leftmargin\leftmarginvi
|
||||
\labelwidth\leftmarginvi\advance\labelwidth-\labelsep}
|
||||
|
||||
% We're never going to need a table of contents, so just flush it to
|
||||
% save space --- suggested by drstrip@sandia-2
|
||||
%\def\addcontentsline#1#2#3{}
|
||||
|
||||
|
||||
%%%% named.sty
|
||||
|
||||
\typeout{Named Citation Style, version of 30 November 1994}
|
||||
|
||||
% This file implements citations for the ``named'' bibliography style.
|
||||
% Place it in a file called named.sty in the TeX search path. (Placing it
|
||||
% in the same directory as the LaTeX document should also work.)
|
||||
|
||||
% Prepared by Peter F. Patel-Schneider, with the assistance of several,
|
||||
% since forgotten, LaTeX hackers.
|
||||
% This style is NOT guaranteed to work. It is provided in the hope
|
||||
% that it will make the preparation of papers easier.
|
||||
%
|
||||
% There are undoubtably bugs in this style. If you make bug fixes,
|
||||
% improvements, etc. please let me know. My e-mail address is:
|
||||
% pfps@research.att.com
|
||||
|
||||
% The preparation of this file was supported by Schlumberger Palo Alto
|
||||
% Research and AT\&T Bell Laboratories.
|
||||
|
||||
% This file can be modified and used in other conferences as long
|
||||
% as credit to the authors and supporting agencies is retained, this notice
|
||||
% is not changed, and further modification or reuse is not restricted.
|
||||
|
||||
% The ``named'' bibliography style creates citations with labels like
|
||||
% \citeauthoryear{author-info}{year}
|
||||
% these labels are processed by the following commands:
|
||||
% \cite{keylist}
|
||||
% which produces citations with both author and year,
|
||||
% enclosed in square brackets
|
||||
% \shortcite{keylist}
|
||||
% which produces citations with year only,
|
||||
% enclosed in square brackets
|
||||
% \citeauthor{key}
|
||||
% which produces the author information only
|
||||
% \citeyear{key}
|
||||
% which produces the year information only
|
||||
|
||||
\def\leftcite{\@up[}\def\rightcite{\@up]}
|
||||
|
||||
\def\cite{\def\citeauthoryear##1##2{\def\@thisauthor{##1}%
|
||||
\ifx \@lastauthor \@thisauthor \relax \else##1, \fi ##2}\@icite}
|
||||
\def\shortcite{\def\citeauthoryear##1##2{##2}\@icite}
|
||||
|
||||
\def\citeauthor{\def\citeauthoryear##1##2{##1}\@nbcite}
|
||||
\def\citeyear{\def\citeauthoryear##1##2{##2}\@nbcite}
|
||||
|
||||
% internal macro for citations with [] and with breaks between citations
|
||||
% used in \cite and \shortcite
|
||||
\def\@icite{\leavevmode\def\@citeseppen{-1000}%
|
||||
\def\@cite##1##2{\leftcite\nobreak\hskip 0in{##1\if@tempswa , ##2\fi}\rightcite}%
|
||||
\@ifnextchar [{\@tempswatrue\@citex}{\@tempswafalse\@citex[]}}
|
||||
% internal macro for citations without [] and with no breaks
|
||||
% used in \citeauthor and \citeyear
|
||||
\def\@nbcite{\leavevmode\def\@citeseppen{1000}%
|
||||
\def\@cite##1##2{{##1\if@tempswa , ##2\fi}}%
|
||||
\@ifnextchar [{\@tempswatrue\@citex}{\@tempswafalse\@citex[]}}
|
||||
|
||||
% don't box citations, separate with ; and a space
|
||||
% also, make the penalty between citations a parameter,
|
||||
% it may be a good place to break
|
||||
\def\@citex[#1]#2{%
|
||||
\def\@lastauthor{}\def\@citea{}%
|
||||
\@cite{\@for\@citeb:=#2\do
|
||||
{\@citea\def\@citea{;\penalty\@citeseppen\ }%
|
||||
\if@filesw\immediate\write\@auxout{\string\citation{\@citeb}}\fi
|
||||
\@ifundefined{b@\@citeb}{\def\@thisauthor{}{\bf ?}\@warning
|
||||
{Citation `\@citeb' on page \thepage \space undefined}}%
|
||||
{\csname b@\@citeb\endcsname}\let\@lastauthor\@thisauthor}}{#1}}
|
||||
|
||||
% raise the brackets in bibliography labels
|
||||
\def\@biblabel#1{\def\citeauthoryear##1##2{##1, ##2}\@up{[}#1\@up{]}\hfill}
|
||||
|
||||
\def\@up#1{\leavevmode\raise.2ex\hbox{#1}}
|
||||
|
||||
% Optional changes
|
||||
|
||||
%%%% use parentheses in the reference list and citations
|
||||
%\def\leftcite{(}\def\rightcite{)}
|
||||
%\def\@biblabel#1{\def\citeauthoryear##1##2{##1, ##2}(#1)\hfill}
|
||||
|
||||
%%%% no key in the reference list
|
||||
%\def\@lbibitem[#1]#2{\item\if@filesw
|
||||
% { \def\protect##1{\string ##1\space}\immediate
|
||||
% \write\@auxout{\string\bibcite{#2}{#1}}}\fi\ignorespaces}
|
||||
%\def\thebibliography#1{\section*{References\@mkboth
|
||||
% {REFERENCES}{REFERENCES}}\list
|
||||
% {}{\labelwidth 0pt\leftmargin\labelwidth \itemsep 0.5ex}
|
||||
% \def\newblock{\hskip .11em plus .33em minus .07em}
|
||||
% \sloppy\clubpenalty4000\widowpenalty4000
|
||||
% \sfcode`\.=1000\relax}
|
636
ijcai25.tex
Normal file
636
ijcai25.tex
Normal file
@@ -0,0 +1,636 @@
|
||||
%%%% ijcai25.tex
|
||||
|
||||
\typeout{IJCAI--25 Instructions for Authors}
|
||||
|
||||
% These are the instructions for authors for IJCAI-25.
|
||||
|
||||
\documentclass{article}
|
||||
\pdfpagewidth=8.5in
|
||||
\pdfpageheight=11in
|
||||
|
||||
% The file ijcai25.sty is a copy from ijcai22.sty
|
||||
% The file ijcai22.sty is NOT the same as previous years'
|
||||
\usepackage{ijcai25}
|
||||
|
||||
% Use the postscript times font!
|
||||
\usepackage{times}
|
||||
\usepackage{soul}
|
||||
\usepackage{url}
|
||||
\usepackage[hidelinks]{hyperref}
|
||||
\usepackage[utf8]{inputenc}
|
||||
\usepackage[small]{caption}
|
||||
\usepackage{graphicx}
|
||||
\usepackage{amsmath}
|
||||
\usepackage{amsthm}
|
||||
\usepackage{booktabs}
|
||||
\usepackage{algorithm}
|
||||
\usepackage{algorithmic}
|
||||
\usepackage[switch]{lineno}
|
||||
|
||||
\usepackage{amssymb}
|
||||
\usepackage{subcaption}
|
||||
\usepackage{multirow}
|
||||
\usepackage{comment}
|
||||
\usepackage{cleveref}
|
||||
% \usepackage{cite}
|
||||
\usepackage{float}
|
||||
\usepackage{thmtools}
|
||||
\usepackage{thm-restate}
|
||||
\usepackage{newfloat}
|
||||
\usepackage{listings}
|
||||
\lstset{%
|
||||
basicstyle={\footnotesize\ttfamily},% footnotesize acceptable for monospace
|
||||
numbers=left,numberstyle=\footnotesize,xleftmargin=2em,% show line numbers, remove this entire line if you don't want the numbers.
|
||||
aboveskip=0pt,belowskip=0pt,%
|
||||
showstringspaces=false,tabsize=2,breaklines=true}
|
||||
\floatstyle{ruled}
|
||||
\newfloat{listing}{tb}{lst}{}
|
||||
\floatname{listing}{Listing}
|
||||
|
||||
\newcommand{\ips}{\bar{\tau}}
|
||||
\newcommand{\lps}{\tau}
|
||||
\newcommand{\lpr}{\pi}
|
||||
\newcommand{\N}{\mathbb{N}}
|
||||
\newcommand{\R}{\mathbb{R}}
|
||||
|
||||
|
||||
|
||||
|
||||
% Comment out this line in the camera-ready submission
|
||||
% \linenumbers
|
||||
|
||||
\urlstyle{same}
|
||||
|
||||
% the following package is optional:
|
||||
%\usepackage{latexsym}
|
||||
|
||||
% See https://www.overleaf.com/learn/latex/theorems_and_proofs
|
||||
% for a nice explanation of how to define new theorems, but keep
|
||||
% in mind that the amsthm package is already included in this
|
||||
% template and that you must *not* alter the styling.
|
||||
\newtheorem{example}{Example}
|
||||
\newtheorem{theorem}{Theorem}
|
||||
|
||||
% Following comment is from ijcai97-submit.tex:
|
||||
% The preparation of these files was supported by Schlumberger Palo Alto
|
||||
% Research, AT\&T Bell Laboratories, and Morgan Kaufmann Publishers.
|
||||
% Shirley Jowell, of Morgan Kaufmann Publishers, and Peter F.
|
||||
% Patel-Schneider, of AT\&T Bell Laboratories collaborated on their
|
||||
% preparation.
|
||||
|
||||
% These instructions can be modified and used in other conferences as long
|
||||
% as credit to the authors and supporting agencies is retained, this notice
|
||||
% is not changed, and further modification or reuse is not restricted.
|
||||
% Neither Shirley Jowell nor Peter F. Patel-Schneider can be listed as
|
||||
% contacts for providing assistance without their prior permission.
|
||||
|
||||
% To use for other conferences, change references to files and the
|
||||
% conference appropriate and use other authors, contacts, publishers, and
|
||||
% organizations.
|
||||
% Also change the deadline and address for returning papers and the length and
|
||||
% page charge instructions.
|
||||
% Put where the files are available in the appropriate places.
|
||||
|
||||
|
||||
% PDF Info Is REQUIRED.
|
||||
|
||||
% Please leave this \pdfinfo block untouched both for the submission and
|
||||
% Camera Ready Copy. Do not include Title and Author information in the pdfinfo section
|
||||
% \pdfinfo{
|
||||
% /TemplateVersion (IJCAI.2025.0)
|
||||
% }
|
||||
|
||||
\title{Large-Scale Trade-Off Curve Computation for Incentive Allocation with Cardinality and Matroid Constraints}
|
||||
|
||||
|
||||
% % Single author syntax
|
||||
% \author{
|
||||
% Author Name
|
||||
% \affiliations
|
||||
% Affiliation
|
||||
% \emails
|
||||
% email@example.com
|
||||
% }
|
||||
|
||||
% Multiple author syntax (remove the single-author syntax above and the \iffalse ... \fi here)
|
||||
% \iffalse
|
||||
\author{
|
||||
Yu Cong$^1$
|
||||
\and
|
||||
Chao Xu$^1$\And
|
||||
Yi Zhou$^1$\\
|
||||
\affiliations
|
||||
$^1$University of Electronic Science and Technology of China\\
|
||||
\emails
|
||||
\{yucong143, the.chao.xu\}@gmail.com,
|
||||
zhou.yi@uestc.edu.cn
|
||||
% third@other.example.com,
|
||||
% fourth@example.com
|
||||
}
|
||||
% \fi
|
||||
|
||||
\begin{document}
|
||||
|
||||
\maketitle
|
||||
|
||||
\begin{abstract}
|
||||
We consider a large-scale incentive allocation problem where the entire trade-off curve between budget and profit has to be maintained approximately at all time.
|
||||
The application originally comes from assigning coupons to users of the ride-sharing apps, where each user can have a limit on the number of coupons been assigned. We consider a more general form, where the coupons for each user forms a matroid, and the coupon assigned to each user must be an independent set. We show the entire trade-off curve can be maintained approximately in near real time.
|
||||
\end{abstract}
|
||||
|
||||
\section{Introduction}
|
||||
|
||||
In the current age, we are dealing with increasingly large incentive allocation problems. One is given a fixed amount of budget to allocate to different incentives to maximize some objective. A prototypical example is assigning a single coupon to each rider in ridesharing apps, where each assignment uses up some marketing budget, and increase some metric such as rides or driving hours \cite{wang_shmoys_2019}. For example, an incentive allocation problem under cardinality constraints can be formalized as the following integer program.\let\thefootnote\relax\footnote{Supplementary materials are available at \url{https://github.com/congyu711/incentive-allocation-supplementaries}.}
|
||||
|
||||
\begin{align*}
|
||||
\max_x \quad \sum_{i}\sum_{j} v_{ij}x_{ij}\\
|
||||
\text{s.t.} \quad \sum_{i}\sum_{j} c_{ij}x_{ij} &\leq B\\
|
||||
\sum_j x_{ij}&\leq k\phantom{\in \{0,1\}} \quad \forall i\\
|
||||
x_{ij}&\in \{0,1\}\phantom{\leq k} \quad \forall i, \forall j\\
|
||||
\end{align*}
|
||||
|
||||
For each agent $i$, there is a candidate incentive set consisting of coupons, where the value and cost of the $j$th coupon is $v_{ij}$ and $c_{ij}$, respectively. The goal is to select at most $k$ coupons for each agent and to make sure that the total cost of the selected coupons does not exceed the budget $B$ while maximizing the total value of those coupons.
|
||||
|
||||
The problem is a variant of the knapsack problem, and computing the exact optimum is NP-hard. However, the fractional optimum is very close to the integer optimum, even if there are strong constraints in the allocation \cite{CAMERINI1984157}. Hence, in this article, we only consider finding the fractional optimum.
|
||||
|
||||
The allocation under a fixed budget is often insufficient for decision and analytic purposes. For example, the company might want to decide on the total budget for a campaign. A data scientist might need to know how much marketing spend is required to obtain an expected profit. These questions can all be answered if the entire trade-off curve of the budget vs profit can be computed.
|
||||
There can be more complicated cases where the decider might not be a person but an algorithm. Consider the following case: The ridesharing company wants to allocate a budget to two campaigns, one of which is incentive allocation. We are optimizing $\max_x f(x)+g(B-x)$, where $f$ would map to the trade-off curve in the incentive allocation problem and $g$ is the value of the other campaign. $g$ can be complicated and in that case an algorithm optimizing the sum would evaluate $g$ at many different values of $x$. Implementing such an algorithm will be a lot faster and easier if we can compute the trade-off curve $f$ quickly.
|
||||
Moreover, the curve is not static: In practice, the cost and value of a coupon are usually predicted by algorithms or models. An agent might take a certain action, say take a ride, and the model would change its predictions of the expected profit of each incentive associated with the agent.
|
||||
|
||||
Hence, we investigate the \emph{dynamic} incentive allocation trade-off curve problem, where the \emph{entire} trade-off curve has to be maintained, while supporting updates(insertions and deletions) of agents' incentives.
|
||||
In practice, the number of agents is large (in the millions), the choices of incentives for each agent is relatively small (a few hundred), and no agent is critical to the objective. That is, removing any agent would not significantly impact the objective. Also, we assume each agent is independent: the incentives to one do not affect others.
|
||||
|
||||
Generally, for each agent, there can be constraints on the allocation of incentives. We consider some examples in the ridesharing apps. A user can be assigned at most one of the incentives (\emph{multiple choice constraint}). A user can be assigned no more than $p$ incentives (\emph{cardinality constraint}). A user can have $2$ incentives for weekends, and $2$ incentives for weekdays, but only $3$ incentives in total (special case of \emph{matroid constraint}). The most general constraints are given as an arbitrary family of feasible subset of incentives. Our work would also consider how the problem changes under different constraints, but we mainly focus on cardinality and matroid constraints.
|
||||
|
||||
Finally, we want the implementation to be easily transferable to queries in a modern OLAP databases.
|
||||
|
||||
\begin{table*}[!htb]
|
||||
\centering
|
||||
\begin{tabular}{ccccc}
|
||||
Constraint Type & Result & Fixed budget & Trade-off curve & Dynamic\\
|
||||
\bottomrule
|
||||
\hline
|
||||
\multirow{3}{*}{Multiple Choice}& \cite{Dyer84,ZEMEL1984123}& $O(m)$ & - & -\\
|
||||
&\cite{10.1109/ITSC55140.2022.9922143} & - & $O(m\log m)$ & No\\
|
||||
& \Cref{thm:cardinality} & - & $O(m\log m)$ & Yes\\
|
||||
\hline
|
||||
\multirow{4}{*}{Cardinality}& \cite{DavidPisinger} & $O(m\log VC)$ & - & -\\
|
||||
& \cite{DavidPisinger} & $O(mp+nB)$ & - & -\\
|
||||
& \cite{minimaxoptimization} & $O(m\log m)$ & - & -\\
|
||||
& \Cref{thm:cardinality} & - & $O((k+m)\log m)$ & Yes\\
|
||||
\hline
|
||||
\multirow{3}{*}{Matroid}& \cite{CAMERINI1984157} & $O(m^2 + T \log m)$ & - & -\\
|
||||
& \cite{minimaxoptimization} & $O(T \log m)$ & - & -\\
|
||||
& \Cref{thm:matroid} & - & $O(Tk+k\log m)$ & Yes\\
|
||||
\bottomrule
|
||||
\end{tabular}
|
||||
\caption{Comparison of algorithms for incentive allocation: $m$ is the total number of incentives, $M$ is the maximum number of incentives over each agent, $p$ is the max rank of the matroid constraint over each agent, or the limit in the cardinality constraint. $V$ and $C$ is the maximum value and cost of the incentives, respectively. $B$ is the budget. $k=O(mp^{1/3})$ is the number of breakpoints of the trade-off curve. $T$ is the time complexity of matroid optimum base algorithm.}
|
||||
\label{runtimetable}
|
||||
\end{table*}
|
||||
|
||||
\paragraph{Previous Works.}
|
||||
The (integral) incentive allocation problem for a fixed budget $B$ is a knapsack problem with side constraints. Our work is concerned with the linear programming relaxation, the fractional version, of the knapsack problem.
|
||||
|
||||
When each agent is allocated exactly $1$ incentive, it is also called the \emph{continuous multiple choice knapsack problem} (CMCKP), and was widely studied. Sinha and Zoltners \cite{Zoltners} showed the optimum gap from the integral case is the value of a single incentive. Later, optimum linear time algorithm was discovered \cite{Dyer84,ZEMEL1984123}.
|
||||
When each agent is required to be allocated exactly (or at most) $p$ incentives, namely having cardinality constraint on the incentives, it is equivalent to the \emph{continuous bounded multiple choice knapsack problem} (CBMCKP). CMCKP is the special case of CBMCKP when $p=1$. Pisinger showed a reduction from CBMCKP to CMCKP, but running time depends on $B$ \shortcite{DavidPisinger}. In the same paper, Pisinger used the Dantzig-Wolfe decomposition to devise a faster polynomial time algorithm. However, the algorithm's running time depends on the size of the value and the cost, therefore it is not a strongly polynomial time algorithm.
|
||||
|
||||
When the incentive for each agent must form an independent set (or a base) in a matroid, it is the (continuous) matroidal knapsack problem \cite{CAMERINI1984157}. The running time for finding an optimum is $O(m^2+ T\log m)$ time, where $m$ is the number of incentives and $T$ is the complexity of finding the optimum base for a given weighting of the elements in the matroid. After the technique of parametric search was introduced and improved \cite{Megiddo,Cole87}, the running time was improved to $O(T\log m)$ \cite{minimaxoptimization}. CBMCKP is a special case of the matroidal knapsack problem when the matroid is a $p$-uniform matroid. Although not explicitly stated, the matroid algorithm can be used for CBMCKP, and obtain an $O(m\log m)$ time algorithm because it takes $O(m)$ time to find the optimum base for a uniform matroid \cite{minimaxoptimization}. See \Cref{runtimetable} for a comparison of results.
|
||||
|
||||
From another point of view, the incentive allocation problem can be considered a matroid optimization problem with an additional linear constraint. For general matroid this problem admits no fully polynomial-time approximation scheme \cite{doronarad_et_al:LIPIcs.ICALP.2024.56}.
|
||||
|
||||
For readers familiar with parametric or multi-objective optimization, it may also be helpful to view the trade-off curve as the Pareto curve between objectives. Under the multi-objective optimization framework, we are solving matroidal knapsack problem with an additional objective that minimize the total budget. Computing the trade-off curve can also be considered a sensitivity analysis problem, where the budget is the parameter whose sensitivity we are interested in. While these interpretations provide additional insight, our analysis is mainly conducted within the linear programs for the incentive allocation problem, as LPs better capture the properties of the problem and are easier to understand.
|
||||
|
||||
We are not aware of explicit computation of the entire trade-off curve except in the CMCKP case. A recent study in the transportation economics area \cite{10.1109/ITSC55140.2022.9922143} considered each agent must pick one of a few incentives, each having a different impact to social welfare. The regulator consults the entire trade-off curve for informed policymaking. The algorithm has a running time of $O(m\log m)$. The result is static, as it does not concern about updating the curve when individual incentive changes.
|
||||
|
||||
\paragraph{Our contribution.}
|
||||
We show that the entire incentive allocation trade-off curve is piecewise linear and concave. We construct a conceptually simple method to maintain the curve under different constraints, while allowing updates in logarithmic time with respect to number of fundamental changes of the trade-off curve.
|
||||
In particular,
|
||||
\begin{enumerate}
|
||||
\item In the multiple choice constraint case, the result matches the current fastest algorithm for static trade-off curve, but our implementation allows dynamic updates.
|
||||
\item In the cardinality constraint case, we show the \emph{entire trade-off curve} can be computed with $O(\log m)$ amortized time per breakpoint.
|
||||
\item We also observe that our problem is related to the $k$-level problem in computational geometry and parametric matroid optimization. The connection shows a subquadratic bound to the number of breakpoints in the trade-off curve, when previously the bound is quadratic.
|
||||
\end{enumerate}
|
||||
|
||||
Finally, we show part of the algorithm can be handled by modern OLAP database to avoid implementation complexity.
|
||||
|
||||
As a preview, we will prove the following theorems.
|
||||
|
||||
\begin{restatable}{theorem}{cardinality}
|
||||
\label{thm:cardinality}
|
||||
Consider an incentive allocation problem with a total of $m$ incentives.
|
||||
If there is a cardinality $p$ constraint on each agent, and $k$ is the number of breakpoints on the trade-off curve, then $k=O(mp^{1/3})$, and the trade-off curve can be computed in $O((k+m)\log m)$ time.
|
||||
\end{restatable}
|
||||
|
||||
\begin{restatable}{theorem}{matroid}
|
||||
\label{thm:matroid}
|
||||
Consider an incentive allocation problem with a total of $m$ incentives.
|
||||
If there is a matroid constraint on each agent, each matroid has rank at most $p$, and $k$ is the number of breakpoints on the trade-off curve, then $k=O(mp^{1/3})$, and the trade-off curve can be computed in $O(Tk+k\log m)$ time, where $T$ is the time to compute the optimum weight base.
|
||||
\end{restatable}
|
||||
|
||||
|
||||
% \note{the complexity $O(m^2(\log m+T'))$. We don't need an extra $\log$ factor. Computing sig functions is already slow enough to cover the merge process.}
|
||||
|
||||
\begin{restatable}{theorem}{updatethm}
|
||||
\label{thm:update}
|
||||
If the slope-difference form of trade-off curve after an update differs from previous trade-off curve at $t$ positions, then the update takes $O(t\log k)$ time, where $k$ is the total number of breakpoints in the curve.
|
||||
\end{restatable}
|
||||
|
||||
Assuming each agent is only available for a few hundred incentives, then each update of an agent, $t$ would be around the same number, which would make the running time near-real time.
|
||||
|
||||
\section{Preliminaries}\label{sec:prelim}
|
||||
|
||||
% \note{should we define $n,m,M,p,k...$ here again?}
|
||||
|
||||
We define $[n]=\{1,\ldots,n\}$. Let $x\in \R^m$, if $I\subseteq [m]$, then $x_{I}$ is the vector of length $|I|$ obtained by deleting elements outside the index set. $x(I) = \sum_{i\in I} x_i$. $\operatorname{Conv}(X)$ is the convex hull of $X$.
|
||||
|
||||
\subsection{Prefix Sum and Piecewise Linear Convex Function Representations}
|
||||
Given a sequence of elements $a_1,\ldots,a_n$ and some associative operation $\oplus$, the prefix sum is the sequence $b_1,\ldots,b_n$, such that $b_1=a_1$, and $b_i = b_{i-1} \oplus a_i$. The prefix sum data structure maintains the corresponding prefix sums under updates of the original sequence, allowing query of each prefix sum value and binary search (if monotonic) in $O(\log n)$ time \cite{blelloch1990}.
|
||||
|
||||
Let $f:[0,\infty) \to \R$ be a piecewise linear convex function with $n$ breakpoints ($0$ is always a breakpoint). There are 3 different forms that capture almost all information of $f$, and one can transform between them using prefix sum or even easier operations.
|
||||
|
||||
\begin{enumerate}
|
||||
\item The slope-difference form $SD(f) = \{(x_1,\Delta_1),\ldots,\allowbreak (x_n,\Delta_n)\}$, where $x_1=0$, $\Delta_1$ is the left most slope of $f$, and $x_i$ is the $i$th breakpoint, and $\Delta_i$ for $i>1$ is the difference between the right slope and the left slope.
|
||||
\item The slope form $S(f) = \{(x_1,s_1),\ldots,(x_n,s_n)\}$. Again, $x_i$ are the breakpoints, and $s_i=\sum_{j=1}^i \Delta_i$ is the right slope at point $x_i$.
|
||||
\item The value form $V(f) = \{(x_1,f(x_1)-f(0)),\ldots, \allowbreak (x_n,f(x_n)-f(0))\}$.
|
||||
\end{enumerate}
|
||||
|
||||
Note that the previous forms also require the value of $f(0)$ in order to uniquely recover the function, hence it has to be stored elsewhere.
|
||||
One can write the prefix sum data structure by hand, such that the original sequence is the slope-difference form, and any update in slope-difference form would propagate to slope and value form.
|
||||
|
||||
The slope-difference form is also easy for sums. $SD(f+g)$ is simply $SD(f)\cup SD(g)$ if $f$ and $g$ do not share breakpoints, otherwise, sum the slope-difference at the breakpoint. For simplicity of exposition, we assume the functions we sum do not share breakpoints. This also allows one to maintain $f=\sum_{i} f_i$ easily by taking the union.
|
||||
|
||||
By maintaining a function, we means that the following question can be answered quickly
|
||||
\begin{enumerate}\label{enum:operations}
|
||||
\item Evaluate: Given $x$, return $f(x)$.
|
||||
\item Inverse: Given $y$, find smallest $x$ such that $f(x)=y$.
|
||||
\item Output: Given $x$ and $y$, output the function $f$ restricted on $[x,y]$.
|
||||
\end{enumerate}
|
||||
|
||||
If $f$ is the trade-off curve, then ``Evaluate'' can answer how much value can be obtained for a given budget, and ``Inverse'' can answer how much budget is required for a particular value.
|
||||
|
||||
\subsection{Matroids}
|
||||
|
||||
A matroid $M=(E,\mathcal{I})$ is a set system over ground set $E$, and $\mathcal{I}$ consists of subsets of $E$, such that the following properties hold.
|
||||
|
||||
\begin{enumerate}
|
||||
\item $\emptyset\in \mathcal{I}$.
|
||||
\item $A\in \mathcal{I}$, then every subset of $A$ is in $\mathcal{I}$.
|
||||
\item If $A,B\in \mathcal{I}$, and $|A|>|B|$, then there is $x\in A\setminus B$, such that $B\cup \{x\}\in \mathcal{I}$.
|
||||
\end{enumerate}
|
||||
|
||||
The sets in $\mathcal{I}$ are called \emph{independent sets}, and the maximal independent sets are called \emph{bases}. The \emph{rank} function $r$ associated with $M$ is defined as $r(S) =\max\{ |S'| \mid S'\subseteq S ,S' \in \mathcal{I}\}$, the size of the largest independent set contained in $S$. The rank of the matroid is defined as $r(E)$.
|
||||
|
||||
A matroid is a $p$-uniform matroid if there exists an integer $p$, such that every set of size at most $p$ is an independent set. % A family of sets $\mathcal{F}$ is \emph{laminar}, if for any two sets $A,B\in \mathcal{F}$, either $A\subseteq B$, $B\subseteq A$ or $A\cap B=\emptyset$. A matroid is a laminar matroid, if there is a laminar family $\mathcal{F}$ and a function $d:\mathcal{F}\to \N$, such that $A$ is an independent set if and only if for every $B\in \mathcal{F}$, $|A\cap B|\leq d(B)$.
|
||||
|
||||
\subsection{Problem and Properties}
|
||||
Consider an (integral) incentive allocation problem with $n$ agents. The $i$th agent has a candidate set of incentives, $E_i$. Each incentive $e$ has a cost $c_e$ and a value $v_e$, respectively. To model constraints, let $\mathcal{F}_i$ be the feasible subsets of $E_i$, which can be encoded as a set of binary vectors. Let $m_i = |E_i|$ and $m=\sum_{i} m_i$. The problem is to choose a feasible set of incentives for each agent, such that the sum of value of all the chosen incentives is maximized while the total cost does not exceed budget $B$.
|
||||
|
||||
The (integral) incentive allocation problem can be formulated as the following integer program ($IP$):
|
||||
|
||||
\[
|
||||
\begin{aligned}
|
||||
\max_x \quad v \cdot x \\
|
||||
s.t. \quad c \cdot x &\leq B & \\
|
||||
x_{E_i}&\in \mathcal{F}_i &\forall i\in [n]\\
|
||||
x&\in \{0,1\}^m
|
||||
\end{aligned}
|
||||
\]
|
||||
|
||||
Define $\ips(B)$ to be the objective value of the above integer program. The exact trade-off curve is the function $\ips$ as $B$ ranges from $0$ to $\infty$. Finding $\ips(B)$ is NP-hard, therefore we consider the linear programming relaxation instead. This is shown below.
|
||||
|
||||
\begin{equation}
|
||||
\label{eq:generallp}
|
||||
\begin{aligned}
|
||||
\max_x \quad v \cdot x \\
|
||||
s.t. \quad c \cdot x &\leq B & \\
|
||||
x_{E_i}&\in \operatorname{Conv}(\mathcal{F}_i) &\forall i\in [n]\\
|
||||
\end{aligned}
|
||||
\end{equation}
|
||||
|
||||
Define $\lps(B)$ to be the objective value of the linear programming relaxation of the integer program $IP$. We call $\lps$ the (fractional) \emph{trade-off curve}.
|
||||
|
||||
One can reduce the problem to multiple choice knapsack similar to the reduction by Pisinger \shortcite{DavidPisinger}, and show $\lps(B)-\ips(B) \leq \max_i\{ \sum_{e\in E_i} v_e\}$.
|
||||
That is, the maximum difference is at most the value a single agent can provide. If additionally, we know $\mathcal{F}_i$ forms a matroid for each $i$, then a stronger result exists: the difference is at most the value of a single incentive \cite{CAMERINI1984157}. Namely, $\lps(B)-\ips(B) \leq \|v\|_\infty$.
|
||||
|
||||
Because in large-scale problems such as coupon assignment, single agent's value is \emph{small} compared to the objective. Therefore, $\lps$ is a very close approximation of $\ips$. Hence, our work is to maintain the function $\lps$.
|
||||
|
||||
\section{Algorithm}\label{sec:alg}
|
||||
The algorithm is conceptually simple. The computation gets broken into two independent parts, allowing for greater parallelization and customization.
|
||||
|
||||
The idea is to compute a signature function for each agent. The signature functions can be computed in parallel, completely independently.
|
||||
The sum of the signature functions is the function we will maintain, and we show how to use the sum to obtain the desired information on $\lps$.
|
||||
|
||||
\subsection{From Signature Functions to Trade-Off Curve}
|
||||
\label{sec:sigf}
|
||||
|
||||
We start with the most general form of the problem \Cref{eq:generallp}. Let $P_i = \operatorname{Conv(\mathcal{F}_i)}$. Consider we have $n$ polyhedrons $P_1,\ldots,P_n$ together with disjoint index sets $E_1,\ldots,E_n$ with their union $[m]$.
|
||||
|
||||
Consider the following linear program,
|
||||
\[
|
||||
\begin{aligned}
|
||||
\max_x \quad v \cdot x \\
|
||||
s.t. \quad c \cdot x &\leq B & \\
|
||||
x_{E_i} &\in P_i & \forall i\in[n]\\
|
||||
\end{aligned}
|
||||
\]
|
||||
|
||||
We define $f_i(\lambda) = \max\{(v_{E_i}-\lambda c_{E_i}) x | x\in P_i \}$, and we call it the \emph{signature function} of agent $i$. The signature function $f_i$ is piecewise linear and convex since it is the upper envelope of line arrangement $\{l_x(\lambda)=v_{E_i}\cdot x-\lambda c_{E_i}\cdot x | \forall x\in P_i\}$.
|
||||
|
||||
|
||||
Let $f = \sum_{i} f_i$.
|
||||
The Lagrangian dual of the linear program is therefore
|
||||
|
||||
|
||||
\begin{equation}
|
||||
\label{eq:Lagrangiandual}
|
||||
\begin{aligned}
|
||||
\min_{\lambda} \left( B\lambda+f(\lambda)\right).
|
||||
\end{aligned}
|
||||
\end{equation}
|
||||
|
||||
Note that each $f_i$ is a piecewise linear convex function, hence $\lambda B + f(\lambda)$ is also piecewise linear and convex.
|
||||
|
||||
Given the signature function $f_i$ for each agent, we have to maintain the function $f=\sum_{i} f_i$. Maintaining $f$ itself is an easy task since it is just the sum of piecewise linear functions, the number of breakpoints is the total number of breakpoints for $f_i$. If each $f_i$ is stored in slope-difference form, then $f$ can be computed through a simple merge of the lists.
|
||||
|
||||
|
||||
\begin{theorem}\label{thm:lps}
|
||||
$\tau$ is a piecewise linear concave function and $\tau(B) = \min_{\lambda} \lambda B+f(\lambda)$.
|
||||
\end{theorem}
|
||||
|
||||
This shows once we have the signature functions, the trade-off curve is easy to compute through common techniques for manipulating piecewise linear functions. See the technical appendix for the full proof.
|
||||
|
||||
% \paragraph{Updates on the trade-off curve}
|
||||
We have already established that $\lps$ is closely related to $f$. Next, we show how to maintain $\lps$ dynamically.
|
||||
% The proof of \Cref{thm:update} is straightforward once we know that $\lps$ is piecewise linear and concave. The complexity in \Cref{thm:update} comes from performing binary search on $SD(f)$ $t$ times. See supplementary materials for the analysis of dynamic operations on $\lps$.
|
||||
|
||||
Assume $f$ has $k$ breakpoints and all three forms(value, slope and slope-difference) of $f$ are given. We answer the following questions.
|
||||
|
||||
\begin{enumerate}
|
||||
\item Evaluate: For a fixed $B$, how to find $\lps(B)$?
|
||||
\item Inverse: Find a $B$ such that $\lps(B)=y$.
|
||||
\item Update: Maintain $\lps$ after a single agent's incentive changes.
|
||||
\item Output: Output a contiguous piece of $\lps$.
|
||||
\end{enumerate}
|
||||
|
||||
|
||||
\paragraph{Evaluate.} $B\lambda + f(\lambda)$ is a piecewise linear convex function, the minimum is at the first position where the slope becomes positive. Or in other words, find the first $\lambda$ in $f$, where the slope is greater than $-B$. This can be processed easily by looking at the slope form of $f$ and do a binary search, and it would take $O(\log k)$ time.
|
||||
|
||||
\paragraph{Inverse.} The idea is to find $\lps(B_1)\leq y< \lps(B_2)$, such that $B_1$ and $B_2$ correspond to the two consecutive slopes of $f$. We can do binary search over the breakpoint of $f$ to find the corresponding $\lambda_1$ for $B_1$. Finally, solve the linear equation $B\lambda_1 - f(\lambda_1) = y$ to obtain $B$.
|
||||
|
||||
\paragraph{Update.}
|
||||
Assume information for agent $i$ updates, then the only change is the signature function for that agent. Assume the new signature function after the update is $g_i$. The new $f$ is obtained by subtracting $f_i$ and adding $g_i$ successively. We could also save time by only updating the difference in $f_i$ and $g_i$. Hence, the amount of time spent on update is bounded by the number of breakpoint changes times a log factor, and the time finding the new signature function.
|
||||
|
||||
Note that the function is stored in slope-difference form; hence, any update in which only $t$ positions in the slope-difference form change takes $O(t\log k)$ time, which proves \Cref{thm:update}.
|
||||
|
||||
\paragraph{Output $\lps$.}
|
||||
Evaluating and finding the inverse are sufficient for most purposes, but if we do want to output a contiguous piece of $\lps$ that consists of at most $t$ breakpoints, we can do so in $O(\log k+t)$ time. We know precisely for which $B$ the curve $\lps$ changes in slope: a one to one correspondence with the breakpoints of $f$. Hence, we can first find the desired place in $\lps$ and output the breakpoints one by one by walking through the slope table of $f$.
|
||||
|
||||
|
||||
\begin{theorem}\label{thm:outeralgorithm}
|
||||
Given the signature functions for each agent, it takes $O(k\log k)$ time to compute a representation for $\lps$, where $k$ is the number of breakpoints in the trade-off curve.
|
||||
\end{theorem}
|
||||
|
||||
The running time in \Cref{thm:outeralgorithm} is obtained by merging signature functions of agents and depends on the output size. The complexity of maintaining the trade-off curve depends on how many breakpoints there are in $f$, which in turn is linearly related to the number of breakpoints in each signature function. The next step is to bound the number of breakpoints in the signature functions, and the time to compute it.
|
||||
|
||||
Because $f$ decomposes as the sum of signature functions, we only have to focus on a single agent. So from this point on, we only consider the signature function for a single agent.
|
||||
|
||||
\subsection{General Signature Function}
|
||||
|
||||
In the most general case, we would define the signature function $f(\lambda)$ to be the optimum of $\max_x \{(v-\lambda c)\cdot x | x\in P\}$, where $x$ is an $m$ dimensional vector. This is the general parametric linear program. The number of breakpoints in $f$ can be exponentially large, namely $\Omega(2^{\sqrt{m}})$ \cite{Zadeh73b,Murty80,Carstensen83}.
|
||||
|
||||
However, if the constraints in the question are matroids, the number of breakpoints is reasonably small, and can be computed quickly. To start, we focus on the cardinality constrained case.
|
||||
|
||||
\subsection{Cardinality Constraint}
|
||||
|
||||
Consider an agent who has $m$ incentives $E$, and at most $p$ of them can be allocated to the agent. The signature function is $f(\lambda) = \max \{(v-\lambda c)\cdot x | \mathbf{1}\cdot x \leq p, 0\leq x\leq 1\}$. For ease of manipulation later, we actually want equality. That is, the agent gets exactly $p$ incentives. Indeed, we can add $p$ dummy incentives with $0$ value and $0$ cost. Pisinger observed the number of possible slopes is upper bounded by $m^2$, hence showing $f$ have at most $O(m^2)$ breakpoints \shortcite{DavidPisinger}.
|
||||
|
||||
We use techniques from computational geometry to view this problem. Consider an arrangement of lines $\{\ell_e \mid e\in E\}$, where $\ell_e(\lambda) = v_e-\lambda c_e$ for $e\in E$. $f(\lambda)$ is the sum of $p$ top most lines when $x$ coordinate is $\lambda$. Therefore, in order to find $f$, it is sufficient to find the top $p$ lines in the arrangement for each $\lambda$.
|
||||
|
||||
The simple brute force method is to first find all intersections of the lines, and sort them by $x$ coordinate. In between each two consecutive intersections, the top $p$ lines cannot change. So we calculate the top $p$ lines on all $x$ intervals formed by two consecutive intersections.
|
||||
Because there are $O(m^2)$ intersections, the number of breakpoints is also $O(m^2)$, which gives an alternative way to show Pisinger's bound. The bound is very loose, and next we show how the geometric view can improve the bound. The set of points that is the top $p$th point in an arrangement of lines is called the $p$-level \cite{ERDOS1973139,lovasz}. $1$-level is the upper envelope of the lines, which is the boundary of a convex space. However, for $p>1$, it is not necessarily convex, see \Cref{fig:2level}. $p$-level is known to be computable in $O(m\log m + k)$ time through clever computational geometry data structures \cite{Chan1999RemarksOK}, where $k$ is the number of breakpoints of in the $p$-level. Observe that the slope of the signature function $f$ can only change at the breakpoint of the $p$-level. Indeed, even when there are many line intersections above the $p$-level, the top $p$ lines does not change, hence the sum would not change. The current best upper bound on the number of breakpoints of $p$-level is $O(mp^{1/3})$ \cite{Dey1998}. Together, it reflects $f$ has $O(mp^{1/3})$ breakpoints and can be computed in the same time as computing the $p$-level.
|
||||
|
||||
\begin{theorem}\label{thm:singlepsignature}
|
||||
A signature function of $k$ breakpoints for $p$ cardinality constrained incentive allocation trade-off curve can be computed in $O(m\log m + k)$ time, and it has at most $O(mp^{1/3})$ breakpoints.
|
||||
\end{theorem}
|
||||
|
||||
The true upper bound for number of breakpoints in $p$-level might be much smaller. The currently known lower bound is only $m 2^{\Omega(\sqrt{\log p})}$ \cite{Toth01}. Any improvement in the upper bound implies a better bound on the complexity of the trade-off curve.
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=.35\textwidth]{image/klevel_black}
|
||||
\caption{The bold line forms a $2$-level in the line arrangement.}
|
||||
\label{fig:2level}
|
||||
\end{figure}
|
||||
\subsection{Matroid Constraint} \label{matroid case}
|
||||
|
||||
The agent must be assigned an independent set of incentives in a matroid over ground set $E$. Let $|E|=m$, $r$ is the rank function, and $p=r(E)$ is the rank of the matroid.
|
||||
|
||||
Using standard knowledge from matroid theory \cite{Schrijver2002}, $f(\lambda)$ would be defined as the optimum of the following linear program, after adding dummy items with 0 cost and 0 value into every independent set.
|
||||
\begin{align*}
|
||||
\max_x \quad & (v-\lambda c)\cdot x\\
|
||||
s.t. \quad
|
||||
x(S)&\leq r(S) \quad\forall S\subseteq E\\
|
||||
x(E)&=p\\
|
||||
x&\geq 0
|
||||
\end{align*}
|
||||
|
||||
For a fixed $\lambda$, this LP is finding the optimum weight base in a matroid, where the weight is $w(e)=v_e-\lambda c_e$.
|
||||
% As $\lambda$ changes, only when the optimum base change, the slope of $f$ might change.
|
||||
% As $\lambda$ increases, the slope of $f$ changes when the optimum weight base changes.
|
||||
Breakpoints on $f(\lambda)$ indicate that the matroid's optimum base changes due to the linear change in weights.
|
||||
The number of breakpoints on $f$ is bounded by $O(mp^{1/3})$ \cite{Dey1998}.
|
||||
Unfortunately, unlike the cardinality case, there are matroids forcing $\Omega(mp^{1/3})$ breakpoints on the signature function \cite{Eppstein98}.
|
||||
|
||||
|
||||
% To compute $f$, we run the greedy optimum base algorithm, each time an element is added to the set, we would also consider the candidate element that was not considered, and compute for which $\lambda$, it would be chosen instead of the current element. Let $\lambda^*$ be the minimum of them all. Once the greedy algorithm finishes, we would know the next smallest $\lambda$ that would have a different optimum base is $\lambda^*$, and repeat the greedy optimum base algorithm for weights at $\lambda^*$. The running time for each run is the same as a single run of the greedy algorithm, since we do an $O(1)$ extra work for each inspected element. This shows we spend $O(T)$ time per breakpoint of $f$, where $T$ is the running time to find the optimum base in the matroid.
|
||||
|
||||
To compute $f$, we need to find all breakpoints on $f$. However, for matroid constraints there is no existing efficient algorithm finding breakpoints on the signature functions for general matroids as the $k$-level algorithm for cardinality constraints.
|
||||
|
||||
Similar problem under graphic matroid has been studied in \cite{agarwal_parametric_1998}. The authors use parametric search and sparsification techniques to find breakpoints efficiently.
|
||||
The techniques are limited to graphs and cannot be applied to general matroids.
|
||||
We achieve a running time of $O(Tmp^{1/3})$ for general matroids by using the Eisner-Severance method, where $T$ is the time complexity of finding optimum weight base in a matroid.
|
||||
% use ES method.
|
||||
% Thus we find breakpoints in the most trivial way. We compute all $O(m^2)$ intersections of $m$ lines and sort them increasingly by $\lambda$. Then we iterate the sorted sequence of intersections and check at each intersection whether there is an optimum base change. We maintain the current optimum base $B$ while processing intersections. For each intersection we check whether it is formed by $b\in B$ and $c\in E\backslash B$ since the optimum base changes only if there is an intersection of one line in the current base and another not in the base. If so, we further check whether the optimum base actually changes by computing the rank of $B\cup \{c\} \backslash \{b\}$. Thus computing the signature function for one agent with $m$ incentives takes $O(m^2(\log m+T'))$ time, where $T'$ is the time of computing rank.
|
||||
|
||||
|
||||
Eisner-Severance method is a simple algorithm for finding breakpoints on convex piecewise linear functions \cite{eisner_mathematical_1976}.
|
||||
Given any piecewise linear convex function $f:\R\to\R$ with $k$ breakpoints and an oracle which computes $f(\lambda)$ and arbitrary tangent line of $f$ at $\lambda$.
|
||||
ES method finds all breakpoints on $f$ with $O(k)$ oracle calls. The method is as follows.
|
||||
We maintain a sequence of line segments $L=\{l_1,...,l_k\}$ of $f$.
|
||||
Initially, the sequence $L=\{l_1,l_k\}$ contains the leftmost and rightmost segments.
|
||||
Denote by $\Lambda$ the list of intersections of adjacent lines in $L$.
|
||||
ES method works by repeatedly adding line segments to $L$. In each iteration we check one intersection $\lambda_i\in\Lambda$ and evaluate $f(\lambda_i)$. Suppose $\lambda_i$ is the intersection of line segments $l_t$ and $l_{t+1}$.
|
||||
Note that $\lambda_i$ is a breakpoint on $f$ if and only if $f(\lambda_i)=l_t(\lambda_i)=l_{t+1}(\lambda_i)$. Thus for every $\lambda\in \Lambda$, we can easily check if it is a breakpoint on $f$ by calling the oracle at $\lambda$ and performing several comparisons. If $\lambda$ is a breakpoint on $f$, we remove $\lambda$ from list $\Lambda$; Otherwise, there exists a new line segment $l_p$ that attains the maximum at $\lambda$ among all lines in $L$ and can be found using the oracle. We insert $l_p$ to $L$ and add its intersections with adjacent lines to $\Lambda$. The algorithm terminates when $\Lambda=\emptyset$. The correctness of the algorithm is ensured by the correctness of the ES method.
|
||||
|
||||
Each intersection added to $\Lambda$ gives us a breakpoint or a new line segment. Thus the total number of evaluations of $f$ is $O(k)$, where $k=O(mp^{1/3})$ is the number of breakpoints.
|
||||
|
||||
For finding $l_p$ and evaluate $f(\lambda)$, we need to find the optimal weight base which takes $O(T)$. Thus the total time complexity of computing signature function for one agent with $m$ incentives is $O(Tm p^{1/3})$.
|
||||
|
||||
% Note that taking the Lagrangian dual is still necessarily since if we directly apply Eisner-Severance method on $\lps(B)$ is would be hard to find an optimum to LP.
|
||||
|
||||
|
||||
% \subsubsection{At most two fractional variables}
|
||||
|
||||
Our algorithm also leads to a simple proof that the optimal solution has at most two fractional variables. This fact can also be deduced from the proof of the integrality gap \cite{CAMERINI1984157}.
|
||||
\begin{theorem}\label{thm:2frac}
|
||||
There exists an optimal solution to \Cref{eq:generallp} under matroid constraints with at most $2$ fractional variables.
|
||||
\end{theorem}
|
||||
|
||||
See the technical appendix for the proof.
|
||||
% \begin{proof}
|
||||
% The optimum weight base of a matroid $M=(E,\mathcal{I})$ with weight function $w: E \rightarrow \mathbb{R}$ can be found with a simple greedy algorithm \cite{Edmonds1971}.
|
||||
% \begin{enumerate}
|
||||
% \item Order $E=\{e_1,e_2,\ldots,e_n\}$ s.t. $w(e_1)\geq w(e_2)\geq \ldots \geq w(e_n)$. Let the optimum weight base $B=\emptyset$.
|
||||
% \item Consider elements in $E$ in turn. Add $e_i$ to $B$ if and only if $B\cup \{e_i\} \in \mathcal{I}$.
|
||||
% \end{enumerate}
|
||||
% Observe that the optimal $\lambda$ for the dual is a breakpoint on the piecewise linear convex function $g(\lambda)=B\lambda + f(\lambda)$. Assume, without loss of generality, no three lines intersect at the same point. Thus every breakpoints on $g$ is an intersection of two lines. Suppose at $\lambda$ two lines $y=v_i-c_i\lambda$ and $y=v_j-c_j\lambda$ intersect. The ordered sequence $E$ at $\lambda - \epsilon$ is just two elements different from $E$ at $\lambda + \epsilon$ since only two lines $y=v_i-c_i\lambda$ and $y=v_j-c_j\lambda$ swap their positions. From the greedy algorithm we know that the optimum weight base at $\lambda-\epsilon$ is at most one element different from the optimal base at $\lambda + \epsilon$. Recall that our algorithm provides all integer solution to the Lagrangian dual. Only two variables in the all integer solution need to be adjusted to satisfied the complementary slackness condition. Thus our algorithm generates solution to the linear program \Cref{eq:generallp} with at most two fractional variables.
|
||||
% \end{proof}
|
||||
|
||||
\subsection{Wrapping Up}
|
||||
|
||||
Combining \Cref{thm:singlepsignature} and \Cref{thm:outeralgorithm}, we obtain the desired theorems.
|
||||
\cardinality*
|
||||
\begin{proof}
|
||||
Assume the $i$th agent has $m_i$ choices of incentives, and the breakpoint of the signature function is $k_i$. By \Cref{thm:singlepsignature}, the running time for computing all signature functions is $O(\sum_{i} m_i\log m_i + k_i) = O(m \log m + k)$. By \Cref{thm:outeralgorithm}, constructing the data structure for $\lps$ takes $O(k\log k) = O(k\log m)$ time. So together we have the running time $O(m\log m + k + k\log m) = O((m+k)\log m)$.
|
||||
\end{proof}
|
||||
By the above theorem, when $p=1$, namely the multiple choice constrained case, $k=O(m)$, and we obtain the desired $O(m\log m)$ running time.
|
||||
|
||||
For matroid constraints, we get a more modest result.
|
||||
\matroid*
|
||||
\begin{proof}
|
||||
% Observe that $T=\Omega(m)$ because the greedy algorithm must inspect each element in the matroid at least once. Assume the signature function has $k_i$ breakpoints. The running time to compute all signature functions is $O(\sum_{i} k_iT) = O(kT)$. By \Cref{thm:outeralgorithm}, constructing the data structure for $\lps$ takes $O(k\log k) = O(k\log m)$ time. So together we have the running time $O(kT + k\log m) = O(kT)$.
|
||||
Assume the signature function has $k_i$ breakpoints for agent $i$ and $m$ is the total number of incentives. The running time of computing one signature function is $O(Tk_i)$.
|
||||
% Since $\sum_i m_i^2(\log m_i+T')\leq \sum_i m_i^2(\log M+T') \leq Mm(\log M+T')$ computing all signature functions takes $O(Mm(\log M+T'))$.
|
||||
Computing all signature functions takes $O(Tk)$ since $\sum_i Tk_i\leq Tk$.
|
||||
By \Cref{thm:outeralgorithm}, constructing the data structure for $\lps$ takes $O(k\log k) = O(k\log m)$ time. So together we have the running time $O( Tk+ k\log m) $.
|
||||
\end{proof}
|
||||
|
||||
For practical purpose, once we have the signature functions for single agents, the trade-off curve can be easily computed with OLAP databases. See the Technical Appendix for details.
|
||||
|
||||
Next we discuss the work per update of a single agent. Each single agent update can only change breakpoints of the associated signature function. If $s$ incentives are related to the agent, at most $O(s^{4/3})$ breakpoint changes can happen, The update time would be $O(s^{4/3}\log k)$. As we assumed in the scenario, $s$ is small because no agent is related to too many incentives, hence this would be a fast operation in modern systems.
|
||||
|
||||
|
||||
\section{Submodular Objective}
|
||||
|
||||
In this section we discuss a more general case where the objective function is submodular instead of linear.
|
||||
Submodular objective function reflects the diminishing marginal gain phenomenon thus is closer to reality. In practice, agents usually receive incentives for free. We further assume that the submodular objective function $g:2^E\to \R$ is monotone, non-negative and satisfies $g(\emptyset)=0$. Thus, we are particularly interested in polymatroid objective functions.
|
||||
|
||||
The submodular incentive allocation problem can be formulated as follows:
|
||||
\begin{align*}
|
||||
\max_x \; g(x) \\
|
||||
s.t. \quad c \cdot x &\leq B \\
|
||||
x_{E_i}&\in \mathcal{F}_i \quad\forall i\in [n]\\
|
||||
x&\in \{0,1\}^m
|
||||
\end{align*}
|
||||
|
||||
|
||||
where $g:\{0,1\}^m \to \R$ is a polymatroid set function.
|
||||
|
||||
% Consider its Lagrangian dual,
|
||||
|
||||
% \begin{align*}
|
||||
% \min_\lambda \; &\left( B\lambda + \max_x \left( g(x)-\lambda c\cdot x \right) \right) \\
|
||||
% s.t. \quad
|
||||
% x_{E_i}&\in \mathcal{F}_i \quad\forall i\in [n]\\
|
||||
% x&\in \{0,1\}^m\\
|
||||
% \lambda &\geq 0
|
||||
% \end{align*}
|
||||
|
||||
We define the signature function for agent $i$ to be $f_i(\lambda) = \max\left\{g(x)-\lambda c\cdot x | x\in \mathcal{F}_i \cap \{0,1 \}^{|E_i|}\right\}$. The Lagrangian dual can be written as $\min_{\lambda\geq 0} B\lambda + \sum_i f_i(\lambda)$.
|
||||
|
||||
Note that the properties of signature functions in \Cref{sec:sigf} are independent of the objective function and constraints. Therefore, $f_i(\lambda)$ is piecewise linear and convex, even for submodular objectives. However, our algorithm does not extend to the submodular case.
|
||||
|
||||
% Evaluating $f_i(\lambda)$ is hard.
|
||||
Our method requires an efficient algorithm for evaluating $f_i(\lambda)$.
|
||||
For the submodular case, we need to solve a constrained submodular maximization problem to compute $f_i(\lambda)$.
|
||||
It is known that this problem is NP-hard \cite{calinescu_maximizing_2011}, so we consider solving it approximately. $g(x)-\lambda c\cdot x$ is still submodular for $x$ but is not monotone.
|
||||
% Lee et al. invented an approximation algorithm for maximizing non-negative submodular functions under matroid constraints in \cite{lee_maximizing_2010}. They achieve $(\frac{1}{4+\epsilon})$-approximation in polynomial time.
|
||||
The best approximation rate is $g(x)-\lambda \cdot x \geq (1-1/e)g(x_{OPT})-\lambda \cdot x_{OPT}$ in \cite{sviridenko_optimal_2014}.
|
||||
However, the running time is impractical for implementations and currently no nontrivial upper bound is known for the number of breakpoints on $f_i$.
|
||||
|
||||
|
||||
\section{Computational Results}
|
||||
Our paper is mostly theoretical, but we did an implementation to see how does theory fair in practice for the \emph{cardinality constraint} case. Do we need to use advanced computational geometry tools to obtain good result in practice?
|
||||
|
||||
For cardinality case we implemented two algorithms, one is to use the optimum $p$-level data structure which runs in $O((k+m)\log m)$ time. The other is a simple scan line algorithm. The algorithm maintains the $p$-level by looking at all intersections with the current $p$th line, which gives an $O(km)$ running time.
|
||||
|
||||
All tests were run on MacOS operating system with an M2Max cpu.
|
||||
\Cref{tab:klevel} shows the average running time of 10 random instances each case, the numbers are drawn from a uniform sample.
|
||||
|
||||
\begin{table}[!ht]
|
||||
\centering
|
||||
\begin{tabular}{ccccc}
|
||||
\toprule
|
||||
\multirow{2}*{$m$} & \multicolumn{2}{c}{$p=20$} & \multicolumn{2}{c}{$p=40$} \\%& \multicolumn{2}{c}{$p=2000$} & \multicolumn{2}{c}{$p=m/5$}\\
|
||||
\cmidrule(lr){2-3} \cmidrule(lr){4-5} %\cmidrule(lr){6-7} \cmidrule(lr){8-9}
|
||||
& scan & opt & scan & opt \\%& scan & opt & scan & opt\\
|
||||
\midrule
|
||||
$1\times 10^3$ & 0.000 & 0.000 & 0.000 & 0.001 \\%& - & - & 0.003& 0.002 \\
|
||||
$5\times 10^3$ & 0.003 & 0.005 & 0.006 & 0.005 \\%& 0.137 & 0.027 & 0.091& 0.02\\
|
||||
$1\times 10^4$ & 0.008 & 0.010 & 0.014 & 0.012 \\%& 0.384 & 0.048 & 0.384 & 0.048\\
|
||||
$5\times 10^4$ & 0.043 & 0.089 & 0.080 & 0.087 \\%& 2.634 & 0.187 & 9.531& 0.326\\
|
||||
$1\times 10^5$ & 0.094 & 0.216 & 0.173 & 0.223 \\%& 5.795 & 0.397 & 38.275& 1.222\\
|
||||
$5\times 10^5$ & 0.528 & 2.911 & 0.937 & 2.952 \\%& 33.760 & 3.398 & TLE & 10.500 \\
|
||||
$1\times 10^6$ & 1.147 & 7.291 & 1.989 & 7.140 \\%& 72.485 & 7.604 & TLE & 23.203\\
|
||||
$1\times 10^7$ & 12.994 & 100.512 & 23.863 & 101.675 \\%& TLE & 101.775 & TLE & 133.974\\
|
||||
|
||||
\bottomrule
|
||||
\end{tabular}
|
||||
\begin{tabular}{ccccc}
|
||||
% \toprule
|
||||
\multirow{2}*{$m$} & \multicolumn{2}{c}{$p=2000$} & \multicolumn{2}{c}{$p=m/5$}\\
|
||||
\cmidrule(lr){2-3} \cmidrule(lr){4-5}
|
||||
& scan & opt & scan & opt \\
|
||||
\midrule
|
||||
$1\times 10^3$ & - & - & 0.003& 0.002 \\
|
||||
$5\times 10^3$ & 0.137 & 0.027 & 0.091& 0.02\\
|
||||
$1\times 10^4$ & 0.384 & 0.048 & 0.384 & 0.048\\
|
||||
$5\times 10^4$ & 2.634 & 0.187 & 9.531& 0.326\\
|
||||
$1\times 10^5$ & 5.795 & 0.397 & 38.275& 1.222\\
|
||||
$5\times 10^5$ & 33.760 & 3.398 & TLE & 10.500 \\
|
||||
$1\times 10^6$ & 72.485 & 7.604 & TLE & 23.203\\
|
||||
$1\times 10^7$ & TLE & 101.775 & TLE & 133.974\\
|
||||
|
||||
\bottomrule
|
||||
\end{tabular}
|
||||
\caption{The time (in seconds) to compute the breakpoints on the signature function under cardinality constraint using the optimum $p$-level algorithm (opt) and the scan line algorithm (scan).}
|
||||
\label{tab:klevel}
|
||||
\end{table}
|
||||
|
||||
The scan line algorithm is surprisingly good for small $p$. It is because in those cases $k$ is actually very small, much smaller than $m$. There is an intuitive argument. If $p=1$, then $k$ is the same as the number of points on the convex hull of a uniform random sample of $m$ points. The expected value is $O(\log m)$ \cite{randompoint}. Note as $p$ becomes larger, for a random set of points $k$ also becomes larger, and therefore the $O(mk)$ algorithm suffers. Still, $k$ is much smaller than $m$, and we get the optimum algorithm with a running time of $O(m\log m)$.
|
||||
|
||||
For the matroid case we tested our algorithm on laminar matroids.
|
||||
The laminar matroid is defined on a laminar family. Given a set $E$, a family $\mathcal{A}$ of subsets of $E$ is \emph{laminar} if for every two sets $A,B\in \mathcal{A}$ with $A\cap B\not= \emptyset$, either $A\subseteq B$ or $B\subseteq A$. Define the capacity function $c: \mathcal{A} \rightarrow \mathbb{R}$. The independent set $\mathcal{I}$ of a laminar matroid $\mathcal{L}$ is the set of subsets $I$ of $E$ such that $|I\cap A|\leq c(A)$ for all $A\in \mathcal{A}$ \cite{fife_laminar_2017}.
|
||||
We implemented the Eisner-Severance method on laminar matroids for demonstration purposes. \Cref{tab:matroid} shows the average running time for computing the signature function under laminar matroid constraints.
|
||||
|
||||
\begin{table}[!t]
|
||||
\centering
|
||||
\resizebox{\columnwidth}{!}{
|
||||
\begin{tabular}{cc|cc|cc}
|
||||
\toprule
|
||||
$m$ & $t$ & $m$ & $t$ & $m$ & $t$ \\
|
||||
\midrule
|
||||
$1\times 10^3$ & 0.0161 & $1.1\times 10^4$ & 1.5270 & $2.5 \times 10^4$ & 6.8601 \\
|
||||
$2\times 10^3$ & 0.0575 & $1.2\times 10^4$ & 1.8602 & $3 \times 10^4$ & 7.8284 \\
|
||||
$3\times 10^3$ & 0.1375 & $1.3\times 10^4$ & 1.8959 & $3.5 \times 10^4$ & 12.1495 \\
|
||||
$4\times 10^3$ & 0.2093 & $1.4\times 10^4$ & 2.3682 & $4 \times 10^4$ & 15.6755 \\
|
||||
$5\times 10^3$ & 0.3547 & $1.5\times 10^4$ & 2.4609 & $4.5 \times 10^4$ & 18.9251 \\
|
||||
$6\times 10^3$ & 0.5193 & $1.6\times 10^4$ & 2.7309 & $5 \times 10^4$ & 25.0841 \\
|
||||
$7\times 10^3$ & 0.6469 & $1.7\times 10^4$ & 3.1121 & $5.5 \times 10^4$ & 24.6682 \\
|
||||
$8\times 10^3$ & 0.7878 & $1.8\times 10^4$ & 3.7226 & $6 \times 10^4$ & 26.5710 \\
|
||||
$9\times 10^3$ & 1.0582 & $1.9\times 10^4$ & 4.3983 & $6.5 \times 10^4$ & 34.9471 \\
|
||||
$1\times 10^4$ & 1.2360 & $2 \times 10^4$ & 4.2026 & $7 \times 10^4$ & 44.8108 \\
|
||||
\bottomrule
|
||||
\end{tabular}
|
||||
}
|
||||
\caption{The time (in seconds) to compute the signature function under matroid constraint.}
|
||||
\label{tab:matroid}
|
||||
\end{table}
|
||||
|
||||
\section*{Acknowledgments}
|
||||
This work was supported by the National Natural Science Foundation of China under grant 62372093, and by Science and Technology Department of Sichuan Province under grant M112024ZYD0170.
|
||||
|
||||
%% The file named.bst is a bibliography style file for BibTeX 0.99c
|
||||
\bibliographystyle{named}
|
||||
\bibliography{ijcai25}
|
||||
\end{document}
|
||||
|
BIN
image/klevel_black.pdf
Normal file
BIN
image/klevel_black.pdf
Normal file
Binary file not shown.
BIN
image/lines.pdf
Normal file
BIN
image/lines.pdf
Normal file
Binary file not shown.
BIN
image/sigfunction_Bl.pdf
Normal file
BIN
image/sigfunction_Bl.pdf
Normal file
Binary file not shown.
BIN
image/v-c.pdf
Normal file
BIN
image/v-c.pdf
Normal file
Binary file not shown.
24
poster.tex
24
poster.tex
@@ -1,11 +1,7 @@
|
||||
%==============================================================================
|
||||
%== template for LATEX poster =================================================
|
||||
%==============================================================================
|
||||
%
|
||||
%--A0 beamer slide-------------------------------------------------------------
|
||||
\documentclass[final]{beamer}
|
||||
\usepackage[orientation=portrait,size=a0,
|
||||
scale=1.25 % font scale factor
|
||||
scale=1.5 % font scale factor
|
||||
]{beamerposter}
|
||||
|
||||
\geometry{
|
||||
@@ -23,9 +19,9 @@
|
||||
|
||||
%==Title, date and authors of the poster=======================================
|
||||
\title
|
||||
[34th International Joint Conference on Artificial Intelligence (IJCAI25), 29 - 31 August 2025, Guangzhou, China] % Conference
|
||||
[34th International Joint Conference on Artificial Intelligence (IJCAI25), Guangzhou, China] % Conference
|
||||
{ % Poster title
|
||||
Large-Scale Trade-Off Curve Computation for Incentive Allocation with Cardinality and Matroid Constraints
|
||||
\#2001 Large-Scale Trade-Off Curve Computation for Incentive Allocation with Cardinality and Matroid Constraints
|
||||
}
|
||||
|
||||
\author{\underline{Yu Cong}, Chao Xu, Yi Zhou}
|
||||
@@ -41,18 +37,16 @@ Large-Scale Trade-Off Curve Computation for Incentive Allocation with Cardinalit
|
||||
\Large
|
||||
|
||||
\begin{frame}[t]
|
||||
%==============================================================================
|
||||
\begin{multicols}{2}
|
||||
%==============================================================================
|
||||
%==The poster content==========================================================
|
||||
%==============================================================================
|
||||
|
||||
\section{Introduction}
|
||||
\section{problem}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
\lipsum[1-7]
|
||||
|
||||
\end{multicols}
|
||||
|
||||
%==============================================================================
|
||||
\end{frame}
|
||||
\end{document}
|
||||
|
@@ -60,7 +60,7 @@ Three problems with this modeling:
|
||||
\begin{aligned}
|
||||
\tau(b)= \max_x& & v\cdot x& & &\\
|
||||
s.t.& & c\cdot x&\leq b & &\\
|
||||
& & \textcolor{Plum}{x_{K_i}}&\textcolor{Plum}{\in P_{K_i}} & &\forall i\in [n]\\
|
||||
& & \mathcolor{Plum}{x_{K_i}}&\mathcolor{Plum}{\in P_{K_i}} & &\forall i\in [n]\\
|
||||
\end{aligned}
|
||||
\end{equation*}
|
||||
|
||||
@@ -78,7 +78,7 @@ We focus on 2 kinds of constraints of \textcolor{Plum}{$x_{K_i}\in P_{K_i}$}.
|
||||
We compute the curve $\tau(b)$ fast.
|
||||
\begin{theorem}
|
||||
Consider an incentive allocation problem with a total of $m$ incentives.
|
||||
The trade-off curve is piecewise linear concave function with $k$ breakpoints.
|
||||
The trade-off curve is a piecewise linear concave function with $k$ breakpoints.
|
||||
\begin{itemize}
|
||||
\item Cardinality constraint.
|
||||
$k=O(mp^{1/3})$ and $\tau$ can be computed in $O((k+m)\log m)$ time.
|
||||
|
Reference in New Issue
Block a user