NumericalOptimizationForFuncDesignerModels
From OpenOpt
Let me remember you - all 1st derivatives for nonlinear problems group are obtaining via Automatic differentiation, not to be confused with finite-difference derivatives approximation. For the list of problems and solvers available in OpenOpt framework see OpenOpt problems tree:
LLSP example
from FuncDesigner import * from openopt import LLSP # create some variables a, b, c = oovars('a', 'b', 'c') # or just a, b, c = oovars(3) # start point is unused by lapack_dgelss and lapack_dgelss # but it is required to set dimensions of variables # also, it is used for some converters e.g. r = p.solve('nlp:ralg') startPoint = {a:0, b:0, c:0} # in general case variables can be arrays, # e.g. startPoint = {a:zeros(100), b:[0, 2], c:ones(20000)} # overdetermined system of 4 linear equations with 3 variables # you can use "for" cycle, operations of sum() eg # startPoint = {a:[0,10], b:0, c:0} # f = [sum(a) + i * b + 4 * sqrt(i) * c - 2 * i**2 for i in xrange(40)] f = [2*a+3*b-4*c+5, 2*a+13*b+15, a+4*b+4*c-25, 20*a+30*b-4*c+50] # alternatively, you can use the following vectorized form measurements_a_koeff = [2, 2, 1, 20] measurements_b_koeff = [3, 13, 4, 30] measurements_c_koeff = [-4, 0, 4, -4] d = [5, 15, -25, 50] f = a * measurements_a_koeff + b * measurements_b_koeff + c * measurements_c_koeff + d # you can set several oofuns with different output sizes, # e.g. f = [myOOFun1, myOOFun2, ..., myOOfunN] # assign prob p = LLSP(f, startPoint) # solve r = p.solve('lsqr') a_sol, b_sol, c_sol = r(a, b, c) # print result print(r.xf) # Expected output: # {a: array([-0.3091145]), b: array([-0.86376906]), c: array([ 4.03827441])}
LP example
# Example of creating LP in FuncDesigner # and solving it via OpenOpt from FuncDesigner import * from openopt import LP # Define some oovars x, y, z = oovars(3) # Let's define some linear functions f1 = 4*x+5*y + 3*z + 5 f2 = f1.sum() + 2*x + 4*y + 15 f3 = 5*f1 + 4*f2 + 20 # Define objective; sum(a) and a.sum() are same as well as for numpy arrays obj = x.sum() + y - 50*z + sum(f3) + 2*f2.sum() + 4064.6 # Define some constraints via Python list or tuple or set of any length, probably via while/for cycles constraints = [x+5*y<15, x[0]<4, f1<[25, 35], f1>-100, 2*f1+4*z<[80, 800], 5*f2+4*z<100, -5<x, x<1, -20<y, y<20, -4000<z, z<4] # Start point - currently matters only size of variables, glpk, lpSolve and cvxopt_lp use start val = all-zeros startPoint = {x:[8, 15], y:25, z:80} # however, using numpy.arrays is more recommended than Python lists # Create prob p = LP(obj, startPoint, constraints = constraints) # Solve r = p.solve('glpk') # glpk is name of solver involved, see OOF doc for more arguments # Decode solution print('Solution: x = %s y = %f z = %f' % (str(x(r)), y(r), z(r))) # Solution: x = [-4.25 -4.25] y = -20.000000 z = 4.000000
See also another one FD LP example
SLE example
Simple example of solving a SLE by FuncDesigner
from FuncDesigner import * # create some variables a, b, c = oovars('a', 'b', 'c') # or just a, b, c = oovars(3) # Python list of 3 linear equations with 3 variables f = [2*a+3*b-2*c+5, 2*a+13*b+15, a+4*b+2*c-45] # alternatively, you could pass equations (the example below is valid for FD 0.18+): # f = [2*a+3*b-2*c == -5, 2*a+15 == -13*b, a+45==4*b+2*c] # assign SLE linSys = sle(f) r = linSys.solve() A, B, C = r(a, b, c) maxRes = r.ff # max residual # Expected result: # {b: array([-5.]), a: array([ 25.]), c: array([ 20.])}
More advanced SLE example
from FuncDesigner import * n = 100 # create some variables a, b, c = oovar(), oovar(size=n), oovar(size=2*n) # let's construct some linear functions R^i -> R^j # in Python range(m, k) is [m, m+1, ..., m+k-1] f1 = a + sum(b*range(5, n+5)) # R^(n+1) -> R f2 = a + 2*b + c.sum() # R^(2n+1) -> R^n # you could use size of oovars f3 = a + a.size + 2*c.size # R^(2n+1) -> R; a.size and c.size will be resolved into 1 and 2*n f4 = c + range(4, 2*n+4) + f1 + 0.5*f2.sum() + 4*f3 # We can use "for" cycle: for i in xrange(4): f4 = 0.5*f4 + a + f1 + 1 # Also, we could use matrix multiplication, eg f5 = dot(someMatrix, f4): rng = 1.5 + cos(range(2*n)).reshape(-1, 1) # define 2n x 1 vector R = dot(rng, rng.T) # create a matrix of shape 2n x 2n f4 = dot(R, f4) # involve matrix multiplication # Create Python list of linear equations f = [a+f4+5, 2*a+b*range(10, n+10)+15, a+4*b.sum()+2*c.sum()-45] # alternatively, you could pass equations: #f = [a+f4==-5, 2*a+b==-15, a==-4*b.sum()-2*c.sum()+45] linSys = sle(f) r = linSys.solve() # get result A, B, C = r(a,b,c) # A, B, C will be numpy arrays of sizes 1, n, 2*n # if failed to solve - A, B, C will contain numpy.nan(s) print('A=%f B[0]=%f C[15]=%f' % (A, B[0], C[15])) # for n=100 it prints A=-5.000000 B[0]=-0.500000 C[15]=-1883724.947909 # you could get residuals # here it will be Python list of length 3 # that is number of (vectorized) equations # with elements of type numpy.array maxResidual = r.ff residuals = [F(r) for F in f]
Example of solving sparse SLE
(requires SciPy installed)
from FuncDesigner import * from numpy import arange from time import time t = time() N = 15000 a, b, c = oovar(), oovar(size=N), oovar(size=2*N) f1 = a + sum(b*arange(5, N+5)) f2 = a + 2*b + c.sum() f3 = a + a.size + 2*c.size f4 = c + range(4, 2*N+4) + 4*f3 f = [a+f4+5, 2*a+b*range(10, N+10)+15, a+4*b.sum()+2*c.sum()-45] # alternatively, you could pass equations: #f = [a+f4==-5, 2*a+b==-15, a==-4*b.sum()-2*c.sum()+45] linSys = sle(f) print('Number of equations: ' + str(linSys.n)) # should print 3*N+1 r = linSys.solve() # i.e. using autoselect - solver numpy.linalg.solve for dense (for current numpy 1.4 it's LAPACK dgesv) # and scipy.sparse.linalg.spsolve for sparse SLEs (for current scipy 0.8 it's LAPACK dgessv) A, B, C = a(r), b(r), c(r) # or A, B, C = r[a], r[b], r[c] print('A=%f B[4]=%f B[first]=%f C[last]=%f' % (A, B[4], B[0], C[-1])) maxResidual = r.ff # Note - time may differ due to different matrices obtained from SLE rendering # because Python 2.6 doesn't has ordered sets (they are present in Python 3.x) # maybe I'll implement fixed rendering in future for 3.x, I don't want to deal # with quite difficult walkaround for 2.x print('time elapsed: %f' % (time()-t)) #A=-50992.657068 B[4]=7283.593867 B[first]=10197.031414 C[last]=-15048.714662 #time elapsed: 2.829374
Rendering FuncDesigner SLE into ordinary SLE Ax=b
from FuncDesigner import * # Create some variables a, b = oovars('a', 'b') c = oovar('c', size=2) # Python list of linear equations f = [2*a + 3*b - 2*(c+[1, 2]) * [3, 4] + 5, # R^nVars - > R^2 2*a+13*b+15, # R^nVars - > R a+4*b+2*c.sum()-45# R^nVars - > R ] # Alternatively, you could pass equations: # f = [2*a + 3*b - 2*(c+[1, 2]) * [3, 4]==-5, # 2*a+15==-13*b, a==-4*b-2*c.sum()+45] # Assign SLE linSys = sle(f) # Rendered matrices for Ax=b are linSys.A (numpy.array of shape 4 x 4) and linSys.b (numpy.array of length 4) A, B = linSys.A, linSys.b # B instead b for to prevent overriding the oovar b defined above # However, for large sparse problems they can have other type than numpy.ndarray, # e.g. scipy.sparse lil_matrix or coo_matrix (provided you have scipy installed); # elseware sometimes they can be mere out of your computer RAM. # You could cast it to ndarray for example via myArr = myMatrix.toarray() """ Now let's solve it, for this example I use numpy.linalg.solve (that is default dense solver for FD SLEs) but you can use any other linear solver e.g. cvxopt, scipy.lapack or scipy.sparse solvers, PyAMG etc. Also you can code it (via scipy.sparse or somehow else) into one of common sparse matrices formats, save it into a file and solve it via C, Fortran etc (BTW f2py or Cython could be useful here) Current drawback: for huge sparse SLEs you should have enough memory to handle dense A before casting to sparse will be performed. Maybe I'll fix it in future, currently for my purposes I have no deal with these situations """ from numpy import linalg xf = linalg.solve(A, B) # Decode solution sol = linSys.decode(xf) print(c(sol)[0]) print(sol) # Expected output: # 10.2 # {b: array([-7.72]), c: array([ 10.2, 6.4]), a: array([ 42.68])}
Eigenvalue analysis
(since v. 0.37)
See EIG for more details, including EIGs from automatic differentiation of general functions.
from FuncDesigner import * from numpy import arange n = 100 # create some variables a, b, c = oovar('a'), oovar('b', size=n), oovar('c', size=2*n) # let's construct some linear functions R^i -> R^j # in Python range(m, k) is [m, m+1, ..., m+k-1] f1 = a + sum(b*arange(5, n+5)) # R^(n+1) -> R f2 = a + 2*b + c.sum() # R^(2n+1) -> R^n # you could use size of oovars f3 = a + a.size + 2*c.size # R^(2n+1) -> R; a.size and c.size will be resolved into 1 and 2*n f4 = c + arange(4, 2*n+4) + f1 + 0.5*f2.sum() + 4*f3 # We can use "for" cycle: for i in range(4): f4 = 0.5*f4 + a + f1 + 1 # Also, we could use matrix multiplication, eg f5 = dot(someMatrix, f4): rng = 1.5 + cos(range(2*n)).reshape(-1, 1) # define 2n x 1 vector R = dot(rng, rng.T) # create a matrix of shape 2n x 2n f4 = dot(R, f4) # involve matrix multiplication # Create Python list of linear equations f = [a+f4+5, 2*a+b*arange(10, n+10)+15, a+4*b.sum()+2*c.sum()-45] # alternatively, you could pass equations: f = [a+f4==-5, 2*a+b==-15, a==-4*b.sum()-2*c.sum()+45] linSys = sle(f) print('dimension of the SLE: %d' % linSys.n) # dimension of the SLE: 301 # let's search for 4 largest magnitude eigenvalues r = linSys.eig(goal={'lm':4}, solver='arpack') # or goal={'largest magnitude':4}, with or without space inside, case-insensitive # for whole list of available goals see http://openopt.org/EIG print(r.eigenvalues) #[ -1.35516602e-05 -1.71948079e-05j -6.93570858e-01 +0.00000000e+00j # 1.73033511e+00 +0.00000000e+00j 4.88614250e+06 +0.00000000e+00j] # let's print eigenvector for last of the obtained eigenvalues: print(r.eigenvectors[-1]) #{a: (-0.10673471576669166+0j), b: array([ -0.08710844+0.j, -0.04627392+0.j, -0.02177422+0.j, -0.03613426+0.j,...-0.10085657+0.j], # c: array([ -1.02123985e-01+0.j, -6.83779258e-02+0.j, ..., -7.82334725e-06+0.j]} print(r.eigenvectors[-1][a]) #(-0.10673471576669166+0j) print(type(r.eigenvectors[-1][a])) #<type 'numpy.complex128'>
NLP example
from FuncDesigner import * from openopt import NLP a, b, c = oovars('a', 'b', 'c') f = sum(a*[1, 2])**2+b**2+c**2 startPoint = {a:[100, 12], b:2, c:40} # however, you'd better use numpy arrays instead of Python lists p = NLP(f, startPoint) p.constraints = [(2*c+a-10)**2 < 1.5 + 0.1*b, (a-10)**2<1.5, a[0]>8.9, a+b > [ 7.97999836, 7.8552538 ], \ a < 9, (c-2)**2 < 1, b < -1.02, c > 1.01, (b + c * log10(a).sum() - 1) ** 2==0] r = p.solve('ralg') a_opt, b_opt, c_opt = r(a, b, c) print(a_opt, b_opt, c_opt) # you can use any of the following as well: # a_opt, b_opt, c_opt = r(a), r(b), r(c) # r('a'), r('b'), r('c') (provided you have assigned the names to oovars as above) # a(r), b(r), c(r) """ Expected output: ... objFunValue: 717.75631 (feasible, max constraint = 7.44605e-07) array([ 8.99999881, 8.87525339]), -1.0199994691949312, 1.0614185705226642 """
NSP example
# Example for NSP # |x[0]| + 1.2*|x[1]| + 1.44*|x[2]| + ... + 1.2^74*|x[74]| + |y-15| +|y+15| + y^2 + -> min # with some constraints # coded in FuncDesigner # all 1st derivatives are obtained via Automatic Differentiation, # not to be confused with finite-difference derivatives approximation from numpy import cos, arange from FuncDesigner import * from openopt import NSP x, y = oovars('x y') N = 75 koeffs = arange(1, N+1) ** 1.2 # 1, 1.2, 1.44, ..., 1.2^m, ..., 1.2^N objective = sum(abs(x) * koeffs) + abs(y-15) + abs(y+15) + y**2 constraints = [(y-1)**2<1, abs(y) < 0.5] constraints.append((x - 0.01*arange(N))**2 < 0.1*arange(1, N+1)) # (x_0-0)**2 < 0.1, (x_1-0.01)**2 < 0.2, (x_2-0.02)**2 < 0.3,... startPoint = {x: cos(1+arange(N)), y:80} p = NSP(objective, startPoint, maxIter = 1e5, constraints = constraints) r = p.solve('ralg') x_opt, y_opt = x(r), y(r) print(max(abs(x_opt)), y_opt) ''' expected output: [...] from numpy import cos, arange from FuncDesigner import * from openopt import NSP x, y = oovars('x y') N = 75 koeffs = arange(1, N+1) ** 1.2 # 1, 1.2, 1.44, ..., 1.2^m, ..., 1.2^N objective = sum(abs(x) * koeffs) + abs(y-15) + abs(y+15) + y**2 # Python list of some constraints constraints = [(y-1)**2<1, abs(y) < 0.5, abs(x[0]) < 1e-5, abs(x[N-1]) < 1e-5] # we could use vectorized code (recommended, to speedup calculations) constraints.append((x - 0.01*arange(N))**2 < 0.1*arange(1, N+1)) # (x_0-0)**2 < 0.1, (x_1-0.01)**2 < 0.2, (x_2-0.02)**2 < 0.2,... startPoint = {x: cos(1+arange(N)), y:80} p = NSP(objective, startPoint, maxIter = 1e5, constraints = constraints) r = p.solve('ralg') x_opt, y_opt = x(r), y(r) print(max(abs(x_opt)), y_opt) ''' expected output: [...] 876 3.004e+01 -100.00 istop: 4 (|| F[k] - F[k-1] || < ftol) Solver: Time Elapsed = 7.98 CPU Time Elapsed = 7.97 objFunValue: 30.042539 (feasible, MaxResidual = 0) (6.6277698279489041e-06, 0.20306221768582972) '''
NLSP example
""" Solving system of equations: x**3 + y**3 - 9 = 0 x - 0.5*y = 0 cos(z) + x - 1.5 = 0 """ from FuncDesigner import * from openopt import SNLE x, y, z = oovars(3) equations = (x**3 + y**3 - 9, x - 0.5*y, cos(z) + x - 1.5) # alternatively, since FD 0.32: you can use equations and require custom tolerances equations = (x**3 + y**3 - 9==0, (x - 0.5*y==0)(tol=1e-9), (cos(z) + x == 1.5) (tol=1e-9)) # if no tol is assigned for an equation, p.ftol (default 10^-6) will be used for that one startPoint = {x:8, y:15, z:80} p = SNLE(equations, startPoint) # optional: we could set some constraints p.constraints = [z<70, z>50, z + sin(x) < 60] r = p.solve('nssolve') # nssolve is name of solver involved, see OOF doc for more arguments xs, ys, zs = r(x, y, z) print('Solution: x = %f y = %f z = %f' % (xs, ys, zs)) #Solution: x = 0.999999 y = 2.000000 z = 57.595865 """ More complex example """ # Solving non-linear system of size 152 from FuncDesigner import * from openopt import SNLE from numpy import arange, ones N = 150 x = oovar(size=N) y,z = oovars(2) f = (x.sum()**2 + y - 100*N, 1e-3*sum(x**2) - 5*y - 100*N - (z-1)**2, (x-15)*arange(N)/(100*N) - 4*z + 5) startPoint = {x: ones(N), y:15, z:80} p = SNLE(f, startPoint, maxIter=1e7) r = p.solve('scipy_fsolve') # nssolve is name of solver involved, see OOF doc for more arguments print('x[5]:%f y:%f z:%f' % (x(r)[5], y(r), z(r))) #x[5]:15.000000 y:-2107.773183 z:1.250000
MILP example
# Example of creating MILP in FuncDesigner # and solving it via OpenOpt from FuncDesigner import * from openopt import MILP # Define some oovars # old-style: #x, y, z = oovars('x y z') # new (OpenOpt v 0.37+): x = oovar('x') y, z = oovars('y z', domain = int)# also you may use domain = bool # Let's define some linear functions f1 = 4*x+5*y + 3*z + 5 f2 = f1.sum() + 2*x + 4*y + 15 f3 = 5*f1 + 4*f2 + 20 # Define objective; sum(a) and a.sum() are same as well as for numpy arrays obj = sum(x) + y + 50*z + sum(f3) + 2*f2.sum() + 4064.6 # Start point - currently matters only size of variables startPoint = {x:[8, 15], y:25, z:80} # however, using numpy.arrays is more recommended than Python lists # Define some constraints cons = [x+5*y<15, x[0]<-5, f1<[25, 35], f1>-100, 2*f1+4*z<[80, 800], 5*f2+4*z<100, [-5.5, -4.5]<x, x<1, -17<y, y<20, -4000<z, z<4] # Create prob # old-style: #p = MILP(obj, startPoint, intVars = [y, z], constraints=cons) # new (OpenOpt v 0.37+): p = MILP(obj, startPoint, constraints=cons) # Solve r = p.minimize('lpSolve', iprint=-1) # glpk is name of the solver involved, see OOF doc for more arguments # Decode solution s = r.xf print('Solution: x = %s y = %f z = %f' % (str(s[x]), s[y], s[z])) # Solution: x = [-5.25 -4.5 ] y = 3.000000 z = -33.000000
MINLP example
(since v. 0.33)
# Example of creating MINLP in FuncDesigner # and solving it via OpenOpt from FuncDesigner import * from openopt import MINLP a, b, c = oovars('a', 'b', 'c') d = oovar('d', domain = [1, 2, 3, 3.5, -0.5, -4]) # domain should be Python list/set/tuple of allowed values startPoint = {a:[100, 12], b:2, c:40, d:0} # however, you'd better use numpy arrays instead of Python lists f = sum(a*[1, 2])**2 + b**2 + c**2 + d**2 constraints = [(2*c+a-10)**2 < 1.5 + 0.1*b, (a-10)**2<1.5, a[0]>8.9, (a+b > [ 7.97999836, 7.8552538 ])('sum_a_b', tol=1.00000e-12), \ a < 9, b < -1.02, c > 1.01, ((b + c * log10(a).sum() - 1) ** 2==0)(tol=1e-6), b+d**2 < 100] p = MINLP(f, startPoint, constraints = constraints) r = p.minimize('branb', nlpSolver='ralg', plot=0, discrtol = 1e-6, xtol=1e-7) a_opt, b_opt, c_opt, d_opt = r(a, b, c, d) # or any of the following: # a_opt, b_opt, c_opt,d_opt = r(a), r(b), r(c),r(d) # r('a'), r('b'), r('c'), r('d') (provided you have assigned the names to oovars as above) # r('a', 'b', 'c', 'd') # a(r), b(r), c(r), d(r) print(a_opt, b_opt, c_opt, d_opt) """ Expected output: ... objFunValue: 718.00734 (feasible, max(residuals/requiredTolerances) = 0.135819) (array([ 8.99999879, 8.87525369]), array([-1.01999986]), array([ 1.06177278]), array([-0.5])) """
See also: example with specifiable accuracy (via interval analysis by interalg)
GLP example
""" GLP (GLobal Problem from OpenOpt set) example for FuncDesigner model: searching for global minimum of the func (x-1.5)**2 + sin(0.8 * y ** 2 + 15)**4 + cos(0.8 * z ** 2 + 15)**4 + (t-7.5)**4 subjected to some constraints See http://openopt.org/GLP for more info and examples. """ from openopt import GLP from FuncDesigner import * x, y, z, t = oovars(4) # define objective f = (x-1.5)**2 + sin(0.8 * y ** 2 + 15)**4 + cos(0.8 * z ** 2 + 15)**4 + (t-7.5)**4 # define some constraints constraints = [x<1, x>-1, y<1, y>-1, z<1, z>-1, t<1, t>-1, x+2*y>-1.5, sinh(x)+cosh(z)+sinh(t) <2.0] # add some more constraints via Python "for" cycle M = 10 for i in xrange(M): func = i*x+(M-i)*y+sinh(z)+cosh(t) constraints.append(func < i+1) # define start point. You can use variables with length > 1 as well startPoint = {x:0, y:0, z:0, t:0} # assign prob p = GLP(f, startPoint, constraints=constraints, maxIter = 1e3, maxFunEvals = 1e5, maxTime = 5, maxCPUTime = 5) #optional: graphic output #p.plot = 1 or p.solve(..., plot=1) or p = GLP(..., plot=1) # solve r = p.solve('de', plot=1) # try other solvers: galileo, pswarm x_opt, y_opt, z_opt, t_opt = r(x, y, z, t) # or # x_opt, y_opt, z_opt, t_opt = x(optPoint), y(optPoint), z(optPoint), t(optPoint)
Optimization with some fixed variables
It is performed via using arguments fixedVars or freeVars.
z=5+4*cos(x) sin(z)+2y<10
will be rendered as linear. Moreover, sometimes whole problem becomes linear and thus can be rendered as linear; you'd better use oosystem to involve automatic determination.
from FuncDesigner import * from openopt import NLP a, b, c = oovars('a', 'b', 'c') startPoint = {a:[100, 12], b:-0.9, c:40} # however, you'd better use numpy arrays instead of Python lists objective = ((a*[1, 2])**2).sum() + b**2 + c**2 # Optional: set come constraints constraints = [a>[1, 1], # it means a[0]>=1, a[1]>=1 (a.sum()+b)**2<10, (a+b+c)**2 < 15] # it means (a[i]+b+c)**2 < 15 for i in {0,1} fixedVars = b # or fixedVars = [b] / c / [b,c] / [a,c] etc p = NLP(objective, startPoint, fixedVars = fixedVars, constraints = constraints) r = p.solve('ralg') print(r.xf) # Alternatively, you can set freeVars ("optVars" for OOSuite versions <= 0.31) instead of fixedVars: freeVars = [a, c] # or optVars = [a] / c / [b,c] / [a,c] etc p = NLP(objective, startPoint, freeVars = freeVars, constraints = constraints) r = p.solve('ralg') print(r.xf) """ Expected output: ... objFunValue: 5.809995 (feasible, max constraint = 7.62729e-07) {b: -0.90000000000000002, a: array([ 1.00000032, 0.99999924]), c: array([ 0.00068568])} """