Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1"""Constraints definition for minimize.""" 

2import numpy as np 

3from ._hessian_update_strategy import BFGS 

4from ._differentiable_functions import ( 

5 VectorFunction, LinearVectorFunction, IdentityVectorFunction) 

6from .optimize import OptimizeWarning 

7from warnings import warn 

8from numpy.testing import suppress_warnings 

9from scipy.sparse import issparse 

10 

11class NonlinearConstraint(object): 

12 """Nonlinear constraint on the variables. 

13 

14 The constraint has the general inequality form:: 

15 

16 lb <= fun(x) <= ub 

17 

18 Here the vector of independent variables x is passed as ndarray of shape 

19 (n,) and ``fun`` returns a vector with m components. 

20 

21 It is possible to use equal bounds to represent an equality constraint or 

22 infinite bounds to represent a one-sided constraint. 

23 

24 Parameters 

25 ---------- 

26 fun : callable 

27 The function defining the constraint. 

28 The signature is ``fun(x) -> array_like, shape (m,)``. 

29 lb, ub : array_like 

30 Lower and upper bounds on the constraint. Each array must have the 

31 shape (m,) or be a scalar, in the latter case a bound will be the same 

32 for all components of the constraint. Use ``np.inf`` with an 

33 appropriate sign to specify a one-sided constraint. 

34 Set components of `lb` and `ub` equal to represent an equality 

35 constraint. Note that you can mix constraints of different types: 

36 interval, one-sided or equality, by setting different components of 

37 `lb` and `ub` as necessary. 

38 jac : {callable, '2-point', '3-point', 'cs'}, optional 

39 Method of computing the Jacobian matrix (an m-by-n matrix, 

40 where element (i, j) is the partial derivative of f[i] with 

41 respect to x[j]). The keywords {'2-point', '3-point', 

42 'cs'} select a finite difference scheme for the numerical estimation. 

43 A callable must have the following signature: 

44 ``jac(x) -> {ndarray, sparse matrix}, shape (m, n)``. 

45 Default is '2-point'. 

46 hess : {callable, '2-point', '3-point', 'cs', HessianUpdateStrategy, None}, optional 

47 Method for computing the Hessian matrix. The keywords 

48 {'2-point', '3-point', 'cs'} select a finite difference scheme for 

49 numerical estimation. Alternatively, objects implementing 

50 `HessianUpdateStrategy` interface can be used to approximate the 

51 Hessian. Currently available implementations are: 

52 

53 - `BFGS` (default option) 

54 - `SR1` 

55 

56 A callable must return the Hessian matrix of ``dot(fun, v)`` and 

57 must have the following signature: 

58 ``hess(x, v) -> {LinearOperator, sparse matrix, array_like}, shape (n, n)``. 

59 Here ``v`` is ndarray with shape (m,) containing Lagrange multipliers. 

60 keep_feasible : array_like of bool, optional 

61 Whether to keep the constraint components feasible throughout 

62 iterations. A single value set this property for all components. 

63 Default is False. Has no effect for equality constraints. 

64 finite_diff_rel_step: None or array_like, optional 

65 Relative step size for the finite difference approximation. Default is 

66 None, which will select a reasonable value automatically depending 

67 on a finite difference scheme. 

68 finite_diff_jac_sparsity: {None, array_like, sparse matrix}, optional 

69 Defines the sparsity structure of the Jacobian matrix for finite 

70 difference estimation, its shape must be (m, n). If the Jacobian has 

71 only few non-zero elements in *each* row, providing the sparsity 

72 structure will greatly speed up the computations. A zero entry means 

73 that a corresponding element in the Jacobian is identically zero. 

74 If provided, forces the use of 'lsmr' trust-region solver. 

75 If None (default) then dense differencing will be used. 

76 

77 Notes 

78 ----- 

79 Finite difference schemes {'2-point', '3-point', 'cs'} may be used for 

80 approximating either the Jacobian or the Hessian. We, however, do not allow 

81 its use for approximating both simultaneously. Hence whenever the Jacobian 

82 is estimated via finite-differences, we require the Hessian to be estimated 

83 using one of the quasi-Newton strategies. 

84 

85 The scheme 'cs' is potentially the most accurate, but requires the function 

86 to correctly handles complex inputs and be analytically continuable to the 

87 complex plane. The scheme '3-point' is more accurate than '2-point' but 

88 requires twice as many operations. 

89 

90 Examples 

91 -------- 

92 Constrain ``x[0] < sin(x[1]) + 1.9`` 

93 

94 >>> from scipy.optimize import NonlinearConstraint 

95 >>> con = lambda x: x[0] - np.sin(x[1]) 

96 >>> nlc = NonlinearConstraint(con, -np.inf, 1.9) 

97 

98 """ 

99 def __init__(self, fun, lb, ub, jac='2-point', hess=BFGS(), 

100 keep_feasible=False, finite_diff_rel_step=None, 

101 finite_diff_jac_sparsity=None): 

102 self.fun = fun 

103 self.lb = lb 

104 self.ub = ub 

105 self.finite_diff_rel_step = finite_diff_rel_step 

106 self.finite_diff_jac_sparsity = finite_diff_jac_sparsity 

107 self.jac = jac 

108 self.hess = hess 

109 self.keep_feasible = keep_feasible 

110 

111 

112class LinearConstraint(object): 

113 """Linear constraint on the variables. 

114 

115 The constraint has the general inequality form:: 

116 

117 lb <= A.dot(x) <= ub 

118 

119 Here the vector of independent variables x is passed as ndarray of shape 

120 (n,) and the matrix A has shape (m, n). 

121 

122 It is possible to use equal bounds to represent an equality constraint or 

123 infinite bounds to represent a one-sided constraint. 

124 

125 Parameters 

126 ---------- 

127 A : {array_like, sparse matrix}, shape (m, n) 

128 Matrix defining the constraint. 

129 lb, ub : array_like 

130 Lower and upper bounds on the constraint. Each array must have the 

131 shape (m,) or be a scalar, in the latter case a bound will be the same 

132 for all components of the constraint. Use ``np.inf`` with an 

133 appropriate sign to specify a one-sided constraint. 

134 Set components of `lb` and `ub` equal to represent an equality 

135 constraint. Note that you can mix constraints of different types: 

136 interval, one-sided or equality, by setting different components of 

137 `lb` and `ub` as necessary. 

138 keep_feasible : array_like of bool, optional 

139 Whether to keep the constraint components feasible throughout 

140 iterations. A single value set this property for all components. 

141 Default is False. Has no effect for equality constraints. 

142 """ 

143 def __init__(self, A, lb, ub, keep_feasible=False): 

144 self.A = A 

145 self.lb = lb 

146 self.ub = ub 

147 self.keep_feasible = keep_feasible 

148 

149 

150class Bounds(object): 

151 """Bounds constraint on the variables. 

152 

153 The constraint has the general inequality form:: 

154 

155 lb <= x <= ub 

156 

157 It is possible to use equal bounds to represent an equality constraint or 

158 infinite bounds to represent a one-sided constraint. 

159 

160 Parameters 

161 ---------- 

162 lb, ub : array_like, optional 

163 Lower and upper bounds on independent variables. Each array must 

164 have the same size as x or be a scalar, in which case a bound will be 

165 the same for all the variables. Set components of `lb` and `ub` equal 

166 to fix a variable. Use ``np.inf`` with an appropriate sign to disable 

167 bounds on all or some variables. Note that you can mix constraints of 

168 different types: interval, one-sided or equality, by setting different 

169 components of `lb` and `ub` as necessary. 

170 keep_feasible : array_like of bool, optional 

171 Whether to keep the constraint components feasible throughout 

172 iterations. A single value set this property for all components. 

173 Default is False. Has no effect for equality constraints. 

174 """ 

175 def __init__(self, lb, ub, keep_feasible=False): 

176 self.lb = lb 

177 self.ub = ub 

178 self.keep_feasible = keep_feasible 

179 

180 def __repr__(self): 

181 if np.any(self.keep_feasible): 

182 return "{}({!r}, {!r}, keep_feasible={!r})".format(type(self).__name__, self.lb, self.ub, self.keep_feasible) 

183 else: 

184 return "{}({!r}, {!r})".format(type(self).__name__, self.lb, self.ub) 

185 

186 

187class PreparedConstraint(object): 

188 """Constraint prepared from a user defined constraint. 

189 

190 On creation it will check whether a constraint definition is valid and 

191 the initial point is feasible. If created successfully, it will contain 

192 the attributes listed below. 

193 

194 Parameters 

195 ---------- 

196 constraint : {NonlinearConstraint, LinearConstraint`, Bounds} 

197 Constraint to check and prepare. 

198 x0 : array_like 

199 Initial vector of independent variables. 

200 sparse_jacobian : bool or None, optional 

201 If bool, then the Jacobian of the constraint will be converted 

202 to the corresponded format if necessary. If None (default), such 

203 conversion is not made. 

204 finite_diff_bounds : 2-tuple, optional 

205 Lower and upper bounds on the independent variables for the finite 

206 difference approximation, if applicable. Defaults to no bounds. 

207 

208 Attributes 

209 ---------- 

210 fun : {VectorFunction, LinearVectorFunction, IdentityVectorFunction} 

211 Function defining the constraint wrapped by one of the convenience 

212 classes. 

213 bounds : 2-tuple 

214 Contains lower and upper bounds for the constraints --- lb and ub. 

215 These are converted to ndarray and have a size equal to the number of 

216 the constraints. 

217 keep_feasible : ndarray 

218 Array indicating which components must be kept feasible with a size 

219 equal to the number of the constraints. 

220 """ 

221 def __init__(self, constraint, x0, sparse_jacobian=None, 

222 finite_diff_bounds=(-np.inf, np.inf)): 

223 if isinstance(constraint, NonlinearConstraint): 

224 fun = VectorFunction(constraint.fun, x0, 

225 constraint.jac, constraint.hess, 

226 constraint.finite_diff_rel_step, 

227 constraint.finite_diff_jac_sparsity, 

228 finite_diff_bounds, sparse_jacobian) 

229 elif isinstance(constraint, LinearConstraint): 

230 fun = LinearVectorFunction(constraint.A, x0, sparse_jacobian) 

231 elif isinstance(constraint, Bounds): 

232 fun = IdentityVectorFunction(x0, sparse_jacobian) 

233 else: 

234 raise ValueError("`constraint` of an unknown type is passed.") 

235 

236 m = fun.m 

237 lb = np.asarray(constraint.lb, dtype=float) 

238 ub = np.asarray(constraint.ub, dtype=float) 

239 if lb.ndim == 0: 

240 lb = np.resize(lb, m) 

241 if ub.ndim == 0: 

242 ub = np.resize(ub, m) 

243 

244 keep_feasible = np.asarray(constraint.keep_feasible, dtype=bool) 

245 if keep_feasible.ndim == 0: 

246 keep_feasible = np.resize(keep_feasible, m) 

247 if keep_feasible.shape != (m,): 

248 raise ValueError("`keep_feasible` has a wrong shape.") 

249 

250 mask = keep_feasible & (lb != ub) 

251 f0 = fun.f 

252 if np.any(f0[mask] < lb[mask]) or np.any(f0[mask] > ub[mask]): 

253 raise ValueError("`x0` is infeasible with respect to some " 

254 "inequality constraint with `keep_feasible` " 

255 "set to True.") 

256 

257 self.fun = fun 

258 self.bounds = (lb, ub) 

259 self.keep_feasible = keep_feasible 

260 

261 def violation(self, x): 

262 """How much the constraint is exceeded by. 

263 

264 Parameters 

265 ---------- 

266 x : array-like 

267 Vector of independent variables 

268 

269 Returns 

270 ------- 

271 excess : array-like 

272 How much the constraint is exceeded by, for each of the 

273 constraints specified by `PreparedConstraint.fun`. 

274 """ 

275 with suppress_warnings() as sup: 

276 sup.filter(UserWarning) 

277 ev = self.fun.fun(np.asarray(x)) 

278 

279 excess_lb = np.maximum(self.bounds[0] - ev, 0) 

280 excess_ub = np.maximum(ev - self.bounds[1], 0) 

281 

282 return excess_lb + excess_ub 

283 

284 

285def new_bounds_to_old(lb, ub, n): 

286 """Convert the new bounds representation to the old one. 

287 

288 The new representation is a tuple (lb, ub) and the old one is a list 

289 containing n tuples, ith containing lower and upper bound on a ith 

290 variable. 

291 If any of the entries in lb/ub are -np.inf/np.inf they are replaced by 

292 None. 

293 """ 

294 lb = np.asarray(lb) 

295 ub = np.asarray(ub) 

296 if lb.ndim == 0: 

297 lb = np.resize(lb, n) 

298 if ub.ndim == 0: 

299 ub = np.resize(ub, n) 

300 

301 lb = [x if x > -np.inf else None for x in lb] 

302 ub = [x if x < np.inf else None for x in ub] 

303 

304 return list(zip(lb, ub)) 

305 

306 

307def old_bound_to_new(bounds): 

308 """Convert the old bounds representation to the new one. 

309 

310 The new representation is a tuple (lb, ub) and the old one is a list 

311 containing n tuples, ith containing lower and upper bound on a ith 

312 variable. 

313 If any of the entries in lb/ub are None they are replaced by 

314 -np.inf/np.inf. 

315 """ 

316 lb, ub = zip(*bounds) 

317 lb = np.array([x if x is not None else -np.inf for x in lb]) 

318 ub = np.array([x if x is not None else np.inf for x in ub]) 

319 return lb, ub 

320 

321 

322def strict_bounds(lb, ub, keep_feasible, n_vars): 

323 """Remove bounds which are not asked to be kept feasible.""" 

324 strict_lb = np.resize(lb, n_vars).astype(float) 

325 strict_ub = np.resize(ub, n_vars).astype(float) 

326 keep_feasible = np.resize(keep_feasible, n_vars) 

327 strict_lb[~keep_feasible] = -np.inf 

328 strict_ub[~keep_feasible] = np.inf 

329 return strict_lb, strict_ub 

330 

331 

332def new_constraint_to_old(con, x0): 

333 """ 

334 Converts new-style constraint objects to old-style constraint dictionaries. 

335 """ 

336 if isinstance(con, NonlinearConstraint): 

337 if (con.finite_diff_jac_sparsity is not None or 

338 con.finite_diff_rel_step is not None or 

339 not isinstance(con.hess, BFGS) or # misses user specified BFGS 

340 con.keep_feasible): 

341 warn("Constraint options `finite_diff_jac_sparsity`, " 

342 "`finite_diff_rel_step`, `keep_feasible`, and `hess`" 

343 "are ignored by this method.", OptimizeWarning) 

344 

345 fun = con.fun 

346 if callable(con.jac): 

347 jac = con.jac 

348 else: 

349 jac = None 

350 

351 else: # LinearConstraint 

352 if con.keep_feasible: 

353 warn("Constraint option `keep_feasible` is ignored by this " 

354 "method.", OptimizeWarning) 

355 

356 A = con.A 

357 if issparse(A): 

358 A = A.todense() 

359 fun = lambda x: np.dot(A, x) 

360 jac = lambda x: A 

361 

362 # FIXME: when bugs in VectorFunction/LinearVectorFunction are worked out, 

363 # use pcon.fun.fun and pcon.fun.jac. Until then, get fun/jac above. 

364 pcon = PreparedConstraint(con, x0) 

365 lb, ub = pcon.bounds 

366 

367 i_eq = lb == ub 

368 i_bound_below = np.logical_xor(lb != -np.inf, i_eq) 

369 i_bound_above = np.logical_xor(ub != np.inf, i_eq) 

370 i_unbounded = np.logical_and(lb == -np.inf, ub == np.inf) 

371 

372 if np.any(i_unbounded): 

373 warn("At least one constraint is unbounded above and below. Such " 

374 "constraints are ignored.", OptimizeWarning) 

375 

376 ceq = [] 

377 if np.any(i_eq): 

378 def f_eq(x): 

379 y = np.array(fun(x)).flatten() 

380 return y[i_eq] - lb[i_eq] 

381 ceq = [{"type": "eq", "fun": f_eq}] 

382 

383 if jac is not None: 

384 def j_eq(x): 

385 dy = jac(x) 

386 if issparse(dy): 

387 dy = dy.todense() 

388 dy = np.atleast_2d(dy) 

389 return dy[i_eq, :] 

390 ceq[0]["jac"] = j_eq 

391 

392 cineq = [] 

393 n_bound_below = np.sum(i_bound_below) 

394 n_bound_above = np.sum(i_bound_above) 

395 if n_bound_below + n_bound_above: 

396 def f_ineq(x): 

397 y = np.zeros(n_bound_below + n_bound_above) 

398 y_all = np.array(fun(x)).flatten() 

399 y[:n_bound_below] = y_all[i_bound_below] - lb[i_bound_below] 

400 y[n_bound_below:] = -(y_all[i_bound_above] - ub[i_bound_above]) 

401 return y 

402 cineq = [{"type": "ineq", "fun": f_ineq}] 

403 

404 if jac is not None: 

405 def j_ineq(x): 

406 dy = np.zeros((n_bound_below + n_bound_above, len(x0))) 

407 dy_all = jac(x) 

408 if issparse(dy_all): 

409 dy_all = dy_all.todense() 

410 dy_all = np.atleast_2d(dy_all) 

411 dy[:n_bound_below, :] = dy_all[i_bound_below] 

412 dy[n_bound_below:, :] = -dy_all[i_bound_above] 

413 return dy 

414 cineq[0]["jac"] = j_ineq 

415 

416 old_constraints = ceq + cineq 

417 

418 if len(old_constraints) > 1: 

419 warn("Equality and inequality constraints are specified in the same " 

420 "element of the constraint list. For efficient use with this " 

421 "method, equality and inequality constraints should be specified " 

422 "in separate elements of the constraint list. ", OptimizeWarning) 

423 return old_constraints 

424 

425 

426def old_constraint_to_new(ic, con): 

427 """ 

428 Converts old-style constraint dictionaries to new-style constraint objects. 

429 """ 

430 # check type 

431 try: 

432 ctype = con['type'].lower() 

433 except KeyError: 

434 raise KeyError('Constraint %d has no type defined.' % ic) 

435 except TypeError: 

436 raise TypeError('Constraints must be a sequence of dictionaries.') 

437 except AttributeError: 

438 raise TypeError("Constraint's type must be a string.") 

439 else: 

440 if ctype not in ['eq', 'ineq']: 

441 raise ValueError("Unknown constraint type '%s'." % con['type']) 

442 if 'fun' not in con: 

443 raise ValueError('Constraint %d has no function defined.' % ic) 

444 

445 lb = 0 

446 if ctype == 'eq': 

447 ub = 0 

448 else: 

449 ub = np.inf 

450 

451 jac = '2-point' 

452 if 'args' in con: 

453 args = con['args'] 

454 fun = lambda x: con['fun'](x, *args) 

455 if 'jac' in con: 

456 jac = lambda x: con['jac'](x, *args) 

457 else: 

458 fun = con['fun'] 

459 if 'jac' in con: 

460 jac = con['jac'] 

461 

462 return NonlinearConstraint(fun, lb, ub, jac)