Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

297

298

299

300

301

302

303

304

305

306

307

308

309

310

311

312

313

314

315

316

317

318

319

320

321

322

323

324

325

326

327

328

329

330

331

332

333

334

335

336

337

338

339

340

341

342

343

344

345

346

347

348

349

350

351

352

353

354

355

356

357

358

359

360

361

362

363

364

365

366

367

368

369

370

371

372

373

374

375

376

377

378

379

380

381

382

383

384

385

386

387

388

389

390

391

392

393

394

395

396

397

398

399

400

401

402

403

404

405

406

407

408

409

410

411

412

413

414

415

416

417

418

419

420

421

422

423

424

425

426

427

428

429

430

431

432

433

434

435

436

437

438

439

440

441

442

443

444

445

446

447

448

449

450

451

452

453

454

455

456

457

458

459

460

461

462

463

464

465

466

467

468

469

470

471

472

473

474

475

476

477

478

479

480

481

482

483

484

485

486

487

488

489

490

491

492

493

494

495

496

497

498

499

500

501

502

503

504

505

506

507

508

509

510

511

512

513

514

515

516

517

518

519

520

521

522

523

524

525

526

527

528

529

530

531

532

533

534

535

536

537

538

539

540

541

542

543

544

545

546

547

548

549

550

551

552

553

""" 

A top-level linear programming interface. Currently this interface solves 

linear programming problems via the Simplex and Interior-Point methods. 

 

.. versionadded:: 0.15.0 

 

Functions 

--------- 

.. autosummary:: 

:toctree: generated/ 

 

linprog 

linprog_verbose_callback 

linprog_terse_callback 

 

""" 

 

from __future__ import division, print_function, absolute_import 

 

import numpy as np 

 

from .optimize import OptimizeResult, OptimizeWarning 

from warnings import warn 

from ._linprog_ip import _linprog_ip 

from ._linprog_simplex import _linprog_simplex 

from ._linprog_rs import _linprog_rs 

from ._linprog_util import ( 

_parse_linprog, _presolve, _get_Abc, _postprocess 

) 

 

__all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback'] 

 

__docformat__ = "restructuredtext en" 

 

 

def linprog_verbose_callback(res): 

""" 

A sample callback function demonstrating the linprog callback interface. 

This callback produces detailed output to sys.stdout before each iteration 

and after the final iteration of the simplex algorithm. 

 

Parameters 

---------- 

res : A `scipy.optimize.OptimizeResult` consisting of the following fields: 

 

x : 1D array 

The independent variable vector which optimizes the linear 

programming problem. 

fun : float 

Value of the objective function. 

success : bool 

True if the algorithm succeeded in finding an optimal solution. 

slack : 1D array 

The values of the slack variables. Each slack variable corresponds 

to an inequality constraint. If the slack is zero, then the 

corresponding constraint is active. 

con : 1D array 

The (nominally zero) residuals of the equality constraints, that is, 

``b - A_eq @ x`` 

phase : int 

The phase of the optimization being executed. In phase 1 a basic 

feasible solution is sought and the T has an additional row 

representing an alternate objective function. 

status : int 

An integer representing the exit status of the optimization:: 

 

0 : Optimization terminated successfully 

1 : Iteration limit reached 

2 : Problem appears to be infeasible 

3 : Problem appears to be unbounded 

4 : Serious numerical difficulties encountered 

 

nit : int 

The number of iterations performed. 

message : str 

A string descriptor of the exit status of the optimization. 

""" 

x = res['x'] 

fun = res['fun'] 

phase = res['phase'] 

status = res['status'] 

nit = res['nit'] 

message = res['message'] 

complete = res['complete'] 

 

saved_printoptions = np.get_printoptions() 

np.set_printoptions(linewidth=500, 

formatter={'float': lambda x: "{0: 12.4f}".format(x)}) 

if status: 

print('--------- Simplex Early Exit -------\n'.format(nit)) 

print('The simplex method exited early with status {0:d}'.format(status)) 

print(message) 

elif complete: 

print('--------- Simplex Complete --------\n') 

print('Iterations required: {}'.format(nit)) 

else: 

print('--------- Iteration {0:d} ---------\n'.format(nit)) 

 

if nit > 0: 

if phase == 1: 

print('Current Pseudo-Objective Value:') 

else: 

print('Current Objective Value:') 

print('f = ', fun) 

print() 

print('Current Solution Vector:') 

print('x = ', x) 

print() 

 

np.set_printoptions(**saved_printoptions) 

 

 

def linprog_terse_callback(res): 

""" 

A sample callback function demonstrating the linprog callback interface. 

This callback produces brief output to sys.stdout before each iteration 

and after the final iteration of the simplex algorithm. 

 

Parameters 

---------- 

res : A `scipy.optimize.OptimizeResult` consisting of the following fields: 

 

x : 1D array 

The independent variable vector which optimizes the linear 

programming problem. 

fun : float 

Value of the objective function. 

success : bool 

True if the algorithm succeeded in finding an optimal solution. 

slack : 1D array 

The values of the slack variables. Each slack variable corresponds 

to an inequality constraint. If the slack is zero, then the 

corresponding constraint is active. 

con : 1D array 

The (nominally zero) residuals of the equality constraints, that is, 

``b - A_eq @ x``. 

phase : int 

The phase of the optimization being executed. In phase 1 a basic 

feasible solution is sought and the T has an additional row 

representing an alternate objective function. 

status : int 

An integer representing the exit status of the optimization:: 

 

0 : Optimization terminated successfully 

1 : Iteration limit reached 

2 : Problem appears to be infeasible 

3 : Problem appears to be unbounded 

4 : Serious numerical difficulties encountered 

 

nit : int 

The number of iterations performed. 

message : str 

A string descriptor of the exit status of the optimization. 

""" 

nit = res['nit'] 

x = res['x'] 

 

if nit == 0: 

print("Iter: X:") 

print("{0: <5d} ".format(nit), end="") 

print(x) 

 

 

def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, 

bounds=None, method='interior-point', callback=None, 

options=None, x0=None): 

r""" 

Linear programming: minimize a linear objective function subject to linear 

equality and inequality constraints. 

 

Linear programming solves problems of the following form: 

 

.. math:: 

 

\min_x \ & c^T x \\ 

\mbox{such that} \ & A_{ub} x \leq b_{ub},\\ 

& A_{eq} x = b_{eq},\\ 

& l \leq x \leq u , 

 

where :math:`x` is a vector of decision variables; :math:`c`, 

:math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and 

:math:`A_{ub}` and :math:`A_{eq}` are matrices. 

 

Informally, that's: 

 

minimize:: 

 

c @ x 

 

such that:: 

 

A_ub @ x <= b_ub 

A_eq @ x == b_eq 

lb <= x <= ub 

 

Note that by default ``lb = 0`` and ``ub = None`` unless specified with 

``bounds``. 

 

Parameters 

---------- 

c : 1D array 

The coefficients of the linear objective function to be minimized. 

A_ub : 2D array, optional 

The inequality constraint matrix. Each row of ``A_ub`` specifies the 

coefficients of a linear inequality constraint on ``x``. 

b_ub : 1D array, optional 

The inequality constraint vector. Each element represents an 

upper bound on the corresponding value of ``A_ub @ x``. 

A_eq : 2D array, optional 

The equality constraint matrix. Each row of ``A_eq`` specifies the 

coefficients of a linear equality constraint on ``x``. 

b_eq : 1D array, optional 

The equality constraint vector. Each element of ``A_eq @ x`` must equal 

the corresponding element of ``b_eq``. 

bounds : sequence, optional 

A sequence of ``(min, max)`` pairs for each element in ``x``, defining 

the minimum and maximum values of that decision variable. Use ``None`` to 

indicate that there is no bound. By default, bounds are ``(0, None)`` 

(all decision variables are non-negative). 

If a single tuple ``(min, max)`` is provided, then ``min`` and 

``max`` will serve as bounds for all decision variables. 

method : {'interior-point', 'revised simplex', 'simplex'}, optional 

The algorithm used to solve the standard form problem. 

:ref:`'interior-point' <optimize.linprog-interior-point>` (default), 

:ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and 

:ref:`'simplex' <optimize.linprog-simplex>` (legacy) 

are supported. 

callback : callable, optional 

If a callback function is provided, it will be called at least once per 

iteration of the algorithm. The callback function must accept a single 

`scipy.optimize.OptimizeResult` consisting of the following fields: 

 

x : 1D array 

The current solution vector. 

fun : float 

The current value of the objective function ``c @ x``. 

success : bool 

``True`` when the algorithm has completed successfully. 

slack : 1D array 

The (nominally positive) values of the slack, 

``b_ub - A_ub @ x``. 

con : 1D array 

The (nominally zero) residuals of the equality constraints, 

``b_eq - A_eq @ x``. 

phase : int 

The phase of the algorithm being executed. 

status : int 

An integer representing the status of the algorithm. 

 

``0`` : Optimization proceeding nominally. 

 

``1`` : Iteration limit reached. 

 

``2`` : Problem appears to be infeasible. 

 

``3`` : Problem appears to be unbounded. 

 

``4`` : Numerical difficulties encountered. 

 

nit : int 

The current iteration number. 

message : str 

A string descriptor of the algorithm status. 

 

options : dict, optional 

A dictionary of solver options. All methods accept the following 

options: 

 

maxiter : int 

Maximum number of iterations to perform. 

disp : bool 

Set to ``True`` to print convergence messages. 

 

For method-specific options, see 

:func:`show_options('linprog') <show_options>`. 

 

x0 : 1D array, optional 

Guess values of the decision variables, which will be refined by 

the optimization algorithm. This argument is currently used only by the 

'revised simplex' method, and can only be used if `x0` represents a 

basic feasible solution. 

 

 

Returns 

------- 

res : OptimizeResult 

A :class:`scipy.optimize.OptimizeResult` consisting of the fields: 

 

x : 1D array 

The values of the decision variables that minimizes the 

objective function while satisfying the constraints. 

fun : float 

The optimal value of the objective function ``c @ x``. 

slack : 1D array 

The (nominally positive) values of the slack variables, 

``b_ub - A_ub @ x``. 

con : 1D array 

The (nominally zero) residuals of the equality constraints, 

``b_eq - A_eq @ x``. 

success : bool 

``True`` when the algorithm succeeds in finding an optimal 

solution. 

status : int 

An integer representing the exit status of the algorithm. 

 

``0`` : Optimization terminated successfully. 

 

``1`` : Iteration limit reached. 

 

``2`` : Problem appears to be infeasible. 

 

``3`` : Problem appears to be unbounded. 

 

``4`` : Numerical difficulties encountered. 

 

nit : int 

The total number of iterations performed in all phases. 

message : str 

A string descriptor of the exit status of the algorithm. 

 

See Also 

-------- 

show_options : Additional options accepted by the solvers. 

 

Notes 

----- 

This section describes the available solvers that can be selected by the 

'method' parameter. 

 

:ref:`'interior-point' <optimize.linprog-interior-point>` is the default 

as it is typically the fastest and most robust method. 

:ref:`'revised simplex' <optimize.linprog-revised_simplex>` is more 

accurate for the problems it solves. 

:ref:`'simplex' <optimize.linprog-simplex>` is the legacy method and is 

included for backwards compatibility and educational purposes. 

 

Method *interior-point* uses the primal-dual path following algorithm 

as outlined in [4]_. This algorithm supports sparse constraint matrices and 

is typically faster than the simplex methods, especially for large, sparse 

problems. Note, however, that the solution returned may be slightly less 

accurate than those of the simplex methods and will not, in general, 

correspond with a vertex of the polytope defined by the constraints. 

 

.. versionadded:: 1.0.0 

 

Method *revised simplex* uses the revised simplex method as decribed in 

[9]_, except that a factorization [11]_ of the basis matrix, rather than 

its inverse, is efficiently maintained and used to solve the linear systems 

at each iteration of the algorithm. 

 

.. versionadded:: 1.3.0 

 

Method *simplex* uses a traditional, full-tableau implementation of 

Dantzig's simplex algorithm [1]_, [2]_ (*not* the 

Nelder-Mead simplex). This algorithm is included for backwards 

compatibility and educational purposes. 

 

.. versionadded:: 0.15.0 

 

Before applying any method, a presolve procedure based on [8]_ attempts 

to identify trivial infeasibilities, trivial unboundedness, and potential 

problem simplifications. Specifically, it checks for: 

 

- rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints; 

- columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained 

variables; 

- column singletons in ``A_eq``, representing fixed variables; and 

- column singletons in ``A_ub``, representing simple bounds. 

 

If presolve reveals that the problem is unbounded (e.g. an unconstrained 

and unbounded variable has negative cost) or infeasible (e.g. a row of 

zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver 

terminates with the appropriate status code. Note that presolve terminates 

as soon as any sign of unboundedness is detected; consequently, a problem 

may be reported as unbounded when in reality the problem is infeasible 

(but infeasibility has not been detected yet). Therefore, if it is 

important to know whether the problem is actually infeasible, solve the 

problem again with option ``presolve=False``. 

 

If neither infeasibility nor unboundedness are detected in a single pass 

of the presolve, bounds are tightened where possible and fixed 

variables are removed from the problem. Then, linearly dependent rows 

of the ``A_eq`` matrix are removed, (unless they represent an 

infeasibility) to avoid numerical difficulties in the primary solve 

routine. Note that rows that are nearly linearly dependent (within a 

prescribed tolerance) may also be removed, which can change the optimal 

solution in rare cases. If this is a concern, eliminate redundancy from 

your problem formulation and run with option ``rr=False`` or 

``presolve=False``. 

 

Several potential improvements can be made here: additional presolve 

checks outlined in [8]_ should be implemented, the presolve routine should 

be run multiple times (until no further simplifications can be made), and 

more of the efficiency improvements from [5]_ should be implemented in the 

redundancy removal routines. 

 

After presolve, the problem is transformed to standard form by converting 

the (tightened) simple bounds to upper bound constraints, introducing 

non-negative slack variables for inequality constraints, and expressing 

unbounded variables as the difference between two non-negative variables. 

The selected algorithm solves the standard form problem, and a 

postprocessing routine converts this to a solution to the original problem. 

 

References 

---------- 

.. [1] Dantzig, George B., Linear programming and extensions. Rand 

Corporation Research Study Princeton Univ. Press, Princeton, NJ, 

1963 

.. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to 

Mathematical Programming", McGraw-Hill, Chapter 4. 

.. [3] Bland, Robert G. New finite pivoting rules for the simplex method. 

Mathematics of Operations Research (2), 1977: pp. 103-107. 

.. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point 

optimizer for linear programming: an implementation of the 

homogeneous algorithm." High performance optimization. Springer US, 

2000. 197-232. 

.. [5] Andersen, Erling D. "Finding all linearly dependent rows in 

large-scale linear programming." Optimization Methods and Software 

6.3 (1995): 219-227. 

.. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear 

Programming based on Newton's Method." Unpublished Course Notes, 

March 2004. Available 2/25/2017 at 

https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf 

.. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods." 

Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at 

http://www.4er.org/CourseNotes/Book%20B/B-III.pdf 

.. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear 

programming." Mathematical Programming 71.2 (1995): 221-245. 

.. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear 

programming." Athena Scientific 1 (1997): 997. 

.. [10] Andersen, Erling D., et al. Implementation of interior point 

methods for large scale linear programming. HEC/Universite de 

Geneve, 1996. 

.. [11] Bartels, Richard H. "A stabilization of the simplex method." 

Journal in Numerische Mathematik 16.5 (1971): 414-434. 

 

Examples 

-------- 

Consider the following problem: 

 

.. math:: 

 

\min_{x_0, x_1} \ -x_0 + 4x_1 & \\ 

\mbox{such that} \ -3x_0 + x_1 & \leq 6,\\ 

-x_0 - 2x_1 & \geq -4,\\ 

x_1 & \geq -3. 

 

The problem is not presented in the form accepted by `linprog`. This is 

easily remedied by converting the "greater than" inequality 

constraint to a "less than" inequality constraint by 

multiplying both sides by a factor of :math:`-1`. Note also that the last 

constraint is really the simple bound :math:`-3 \leq x_1 \leq \infty`. 

Finally, since there are no bounds on :math:`x_0`, we must explicitly 

specify the bounds :math:`-\infty \leq x_0 \leq \infty`, as the 

default is for variables to be non-negative. After collecting coeffecients 

into arrays and tuples, the input for this problem is: 

 

>>> c = [-1, 4] 

>>> A = [[-3, 1], [1, 2]] 

>>> b = [6, 4] 

>>> x0_bounds = (None, None) 

>>> x1_bounds = (-3, None) 

>>> from scipy.optimize import linprog 

>>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]) 

 

Note that the default method for `linprog` is 'interior-point', which is 

approximate by nature. 

 

>>> print(res) 

con: array([], dtype=float64) 

fun: -21.99999984082494 # may vary 

message: 'Optimization terminated successfully.' 

nit: 6 # may vary 

slack: array([3.89999997e+01, 8.46872439e-08] # may vary 

status: 0 

success: True 

x: array([ 9.99999989, -2.99999999]) # may vary 

 

If you need greater accuracy, try 'revised simplex'. 

 

>>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds], method='revised simplex') 

>>> print(res) 

con: array([], dtype=float64) 

fun: -22.0 # may vary 

message: 'Optimization terminated successfully.' 

nit: 1 # may vary 

slack: array([39., 0.]) # may vary 

status: 0 

success: True 

x: array([10., -3.]) # may vary 

 

""" 

meth = method.lower() 

default_tol = 1e-12 if meth == 'simplex' else 1e-9 

 

if x0 is not None and meth != "revised simplex": 

warning_message = "x0 is used only when method is 'revised simplex'. " 

warn(warning_message, OptimizeWarning) 

 

c, A_ub, b_ub, A_eq, b_eq, bounds, solver_options, x0 = _parse_linprog( 

c, A_ub, b_ub, A_eq, b_eq, bounds, options, x0) 

tol = solver_options.get('tol', default_tol) 

 

iteration = 0 

complete = False # will become True if solved in presolve 

undo = [] 

 

# Keep the original arrays to calculate slack/residuals for original 

# problem. 

c_o, A_ub_o, b_ub_o, A_eq_o, b_eq_o = c.copy( 

), A_ub.copy(), b_ub.copy(), A_eq.copy(), b_eq.copy() 

 

# Solve trivial problem, eliminate variables, tighten bounds, etc... 

c0 = 0 # we might get a constant term in the objective 

if solver_options.pop('presolve', True): 

rr = solver_options.pop('rr', True) 

(c, c0, A_ub, b_ub, A_eq, b_eq, bounds, x, x0, undo, complete, status, 

message) = _presolve(c, A_ub, b_ub, A_eq, b_eq, bounds, x0, rr, tol) 

 

if not complete: 

A, b, c, c0, x0 = _get_Abc(c, c0, A_ub, b_ub, A_eq, 

b_eq, bounds, x0, undo) 

T_o = (c_o, A_ub_o, b_ub_o, A_eq_o, b_eq_o, bounds, undo) 

if meth == 'simplex': 

x, status, message, iteration = _linprog_simplex( 

c, c0=c0, A=A, b=b, callback=callback, _T_o=T_o, **solver_options) 

elif meth == 'interior-point': 

x, status, message, iteration = _linprog_ip( 

c, c0=c0, A=A, b=b, callback=callback, _T_o=T_o, **solver_options) 

elif meth == 'revised simplex': 

x, status, message, iteration = _linprog_rs( 

c, c0=c0, A=A, b=b, x0=x0, callback=callback, _T_o=T_o, **solver_options) 

else: 

raise ValueError('Unknown solver %s' % method) 

 

# Eliminate artificial variables, re-introduce presolved variables, etc... 

# need modified bounds here to translate variables appropriately 

disp = solver_options.get('disp', False) 

x, fun, slack, con, status, message = _postprocess( 

x, c_o, A_ub_o, b_ub_o, A_eq_o, b_eq_o, bounds, 

complete, undo, status, message, tol, iteration, disp) 

 

sol = { 

'x': x, 

'fun': fun, 

'slack': slack, 

'con': con, 

'status': status, 

'message': message, 

'nit': iteration, 

'success': status == 0} 

 

return OptimizeResult(sol)