Ejemplo n.º 1
0
def fit_alpha(v0, tvec, signal):
    '''return best fit coefficient for fit to alpha-function, where
    v0 = [delay, tau_rise, tau_decay, amplitude]'''
    A = [
        [-1, 0, 0, 0, 0],
        [0, -1, 0, 0, 0],
        [0, 1, -1, 0, 0],
        [0, 0, 0, 0, 0],
        [0, 0, 0, 0, 0],
    ]
    b = [1.E-6, 1.E-6, 1.E-6, 0, 0]
    ub = [4., pl.inf, pl.inf, 100, pl.inf]
    lb = [1., -pl.inf, -pl.inf, -100, -pl.inf]

    def costfun(v, tvec, signal):
        return pl.sqrt((fp(v, tvec) - signal)**2).sum()

    objfnc = lambda v: costfun(v, tvec, signal)

    p = opt.NLP(f=objfnc, x0=v0, A=A, b=b, ub=ub, lb=lb)

    p.ftol = 1.E-9
    p.maxIter = 10000
    p.maxLineSearch = 5000
    r = p.solve('ralg')

    vf = r.xf

    return vf
Ejemplo n.º 2
0
def setupProblem(zero_start, lb, ub, Ac, bc, lbexp, ubexp, m_sumnot): 
    #XXX optimizer needs to move in lotsize steps
    if zero_start > 0: p = openopt.NLP(goal='max', f=objective, df=objective_grad, x0=numpy.zeros(num_secs), lb=lb, ub=ub, A=Ac, b=bc)
    else: p = openopt.NLP(goal='max', f=objective, df=objective_grad, x0=positions, lb=lb, ub=ub, A=Ac, b=bc)
    p.args.f = (kappa, slipConst, slipCoef, positions, mu, rvar, factors, fcov, advp, borrowRate, price, execFee)
    p.args.df = (kappa, slipConst, slipCoef, positions, mu, rvar, factors, fcov, advp, borrowRate, price, execFee)
    p.c = [constrain_by_capital]
    p.dc = [constrain_by_capital_grad]
    p.args.c = (positions, m_sumnot, factors, lbexp, ubexp, max_trdnot * m_sumnot * hard_limit)
    p.args.dc = (positions, m_sumnot, factors, lbexp, ubexp, max_trdnot * m_sumnot * hard_limit)
    p.ftol = 1e-7
    p.maxFunEvals = 1e9
    p.maxIter = max_iter
    p.minIter = min_iter
    p.callback = Terminator(50, 1, p.minIter)
    
    return p
Ejemplo n.º 3
0
def setupProblem(positions, mu, rvar, factors, fcov, advp, advpt, vol, mktcap,
                 borrowRate, price, lb, ub, Ac, bc, lbexp, ubexp,
                 untradeable_info, sumnot, zero_start):
    if zero_start > 0:
        p = openopt.NLP(goal='max',
                        f=objective,
                        df=objective_grad,
                        x0=numpy.zeros(len(positions)),
                        lb=lb,
                        ub=ub,
                        A=Ac,
                        b=bc,
                        plot=plotit)
    else:
        p = openopt.NLP(goal='max',
                        f=objective,
                        df=objective_grad,
                        x0=positions,
                        lb=lb,
                        ub=ub,
                        A=Ac,
                        b=bc,
                        plot=plotit)
    p.args.f = (kappa, slip_gamma, slip_nu, positions, mu, rvar, factors, fcov,
                advp, advpt, vol, mktcap, borrowRate, price, execFee,
                untradeable_info)
    p.args.df = (kappa, slip_gamma, slip_nu, positions, mu, rvar, factors,
                 fcov, advp, advpt, vol, mktcap, borrowRate, price, execFee,
                 untradeable_info)
    p.c = [constrain_by_capital]
    p.dc = [constrain_by_capital_grad]
    p.args.c = (positions, sumnot, factors, lbexp, ubexp, sumnot)
    p.args.dc = (positions, sumnot, factors, lbexp, ubexp, sumnot)
    p.ftol = 1e-6
    p.maxFunEvals = 1e9
    p.maxIter = max_iter
    p.minIter = min_iter
    p.callback = Terminator(50, 10, p.minIter)

    return p
Ejemplo n.º 4
0
def make_optimiser(config, model):
    if not model: return None

    # run with dummy values -- this doesn't really belong here, but temporarily...
    if config['wetrun']:
        print "--\nGenerating optimisable function"
        ff = model.optimisable(config['distance'], debug=config['debug'])
        print ff
        print "--\nAttempting single run of function"
        pp = [x.get('default', 0) for x in config['params']]
        ff(pp)
        return None

    mode = config.get('job_mode', JOB_MODE)
    weights = [config['weights'].get(x['name'], 1) for x in config['vars']]
    if mode == 'GLP':
        lb = [x.get('min', 0) for x in config['params']]
        ub = [x.get('max', 0) for x in config['params']]
        return openopt.GLP(
            model.optimisable(config['distance'], weights=weights),
            lb=lb,
            ub=ub,
            # TODO, possibly: support A and b args to define linear constraints
            maxIter=config['max_iter'])
    elif mode == 'NLP':
        lb = [x.get('min', 0) for x in config['params']]
        ub = [x.get('max', 0) for x in config['params']]
        return openopt.NLP(
            model.optimisable(config['distance'], weights=weights),
            lb=lb,
            ub=ub,
            # TODO, possibly: support A and b args to define linear constraints
            maxIter=config['max_iter'])
    elif mode == 'NSP':
        lb = [x.get('min', 0) for x in config['params']]
        ub = [x.get('max', 0) for x in config['params']]
        return openopt.NSP(
            model.optimisable(config['distance'], weights=weights),
            lb=lb,
            ub=ub,
            # TODO, possibly: support A and b args to define linear constraints
            maxIter=config['max_iter'])
    return None
Ejemplo n.º 5
0
    def _fminOopt( self, f_fnc, f_args, solver, x0, init_args={},
                    solve_args={}, plot=False):
        '''
        This function ...
    
        Aguments
        --------

        Keyword arguments
        -----------------
        '''
        
        fnc = lambda x: f_fnc(x, *f_args)

        # Check what solver is in use and put initial guess argument in right
        # dictionary
        if solver in glp_solvers:
            solve_args['plot'] = plot
            solve_args['x0'] = x0
            
            p = oopt.GLP(fnc, **init_args) # Set up openopt class
            r = p.solve(solver, **solve_args) # and solve

        elif solver in nlp_solvers:
            init_args['plot'] = plot
            init_args['x0'] = x0

            p = oopt.NLP(fnc, **init_args) # Set up openopt class
            r = p.solve(solver, **solve_args) # and solve
            
        else: 
            raise Exception, 'The solver %s is not recognized. Check' \
                'spelling!' % solver

        
        return r
Ejemplo n.º 6
0
    def optimize(self, X, Y, solver = 'ralg', ftol = 1e-4, gtol= 1e-4, contol = 1e-6, 
                       maxIter = 2500, maxFunEvals = 2500, maxtime = 3600., checkgrad = False, 
                       verbose = False):
        """Minimizes the negative log marginal likelihood. The positivity 
        constraints of several parameters is enforced by treating these in 
        exponential scale.

        Requires: openopt"""
        import openopt

        def checkretrain(params):
            try:
                self.machine.lmlfunc()
                if not (numpy.abs(self.getparams() - params) < 1e-12).all():
                    self.setparams(params)
                    self.train(X, Y)
            except AttributeError:
                self.setparams(params)
                self.train(X, Y)

        def convert(params):
            pconv = params.copy()
            pconv[:2+self.n] = numpy.exp(pconv[:2+self.n])
            return pconv

        def convertgrad(grad, params):
            grad[:2+self.n] = grad[:2+self.n] * convert(params)[:2+self.n]
            return grad

        def invert(params):
            pinv = params.copy()
            pinv[:2+self.n] = numpy.log(pinv[:2+self.n])
            return pinv

        def f(params):
            checkretrain(convert(params))
            return -self.lmlfunc()

        def df(params):
            checkretrain(convert(params))
            return convertgrad(-self.lmlgradient(), params)

        x0 = invert(self.getparams())

        if verbose:
            iprint = 10
            itercb = []
        else:
            iprint = -1

            def itercb(p):
                lib = ['-','\\','|','/']
                symbol = lib[p.iter % len(lib)]
                sys.stdout.write('%-40s\r' % ('%s iteration: %4d lml: %g' % (symbol, p.iter, p.fk)))
                sys.stdout.flush()
                return False

        p = openopt.NLP(f, x0, df = df, iprint = iprint, callback = itercb, 
                        gtol = gtol, contol = contol, ftol = ftol, 
                        maxIter = maxIter, maxFunEvals = maxFunEvals, maxTime = maxtime)

        if checkgrad:
            p.checkdf()

        r = p.solve(solver)

        self.setparams(convert(r.xf))

        if not verbose:
            sys.stdout.write('%40s\r' % '')
            sys.stdout.flush()

        return r 
Ejemplo n.º 7
0
def optTrans(dims, target, S):

    # Dimensions
    states = dims['states']
    vars = dims['vars']

    # Set starting trans matrix
    lmbda = np.ones((states)) / states
    T = np.tile(lmbda, (states, 1))
    np.random.seed(100)
    T = T + np.random.normal(0., 0.01, (states, states))

    # Initialize persistent S and target
    objective = objectiveTransMatrix(S, target)

    # Rewrite T as row vector.
    T0 = np.ravel(T, 'F')

    # Set linear equality constraints.
    Aeqs = {}
    beqs = {}

    Aeqs['RowSumOne'] = np.kron(np.eye(states), np.ones((1, states)))
    beqs['RowSumOne'] = np.ones(states)

    Aeqs['ColSumOne'] = np.tile(np.eye(states), (1, states))
    beqs['ColSumOne'] = np.ones(states)

    # Assemble constraint: Ax = b
    Aeq = np.empty((0, states * states))
    beq = np.array([])

    for AeqEle, beqEle in zip(Aeqs, beqs):
        Aeq = np.vstack((Aeq, Aeqs[AeqEle]))
        beq = np.concatenate((beq, beqs[beqEle]))

    slack = 0.7
    LB = np.ones((states * states)) * (1. / states) * slack
    UB = np.ones((states * states)) * (1. / states) * (1. / slack)

    def linearCosntr(x):
        return -scipy.linalg.norm(np.dot(Aeq, x).flat - beq)

    def lowerBoundCosntr(x):
        constrVec = np.array([x - LB * np.ones(np.size(x))])
        return scipy.linalg.norm(constrVec, ord=1)

    def upperBoundCosntr(x):
        constrVec = np.array([UB * np.ones(np.size(x)) - x])
        return scipy.linalg.norm(constrVec, ord=1)

    #Tvec = scipy.optimize.fmin_cobyla(objective, T0, [lowerBoundCosntr, upperBoundCosntr, linearCosntr], iprint = '2', rhobeg = 0.001, rhoend = 0.0001, maxfun = 100000)


#  Tvec = scipy.optimize.fmin_slsqp(objective, T0, eqcons=[linearCosntr], ieqcons=[], bounds=[ (slack / states , 1. / (slack * states)) for ix in range(len(T0)) ], fprime=None, fprime_eqcons=None, fprime_ieqcons=None, args=(), iter=100, acc=1e-06, iprint=10, full_output=0, epsilon=1.4901161193847656e-08)
#  fOpt = objective(Tvec)
    p = openopt.NLP(objective,
                    T0,
                    df=None,
                    c=None,
                    dc=None,
                    h=None,
                    dh=None,
                    A=None,
                    b=None,
                    Aeq=Aeq,
                    beq=beq,
                    lb=LB,
                    ub=UB,
                    gtol=1e-7,
                    xtol=1e-7,
                    ftol=1e-7,
                    contol=1e-7,
                    iprint=50,
                    maxIter=500,
                    maxFunEvals=1e7,
                    name='NLP_1')
    solver = 'ralg'
    #  solver = 'algencan'
    #solver = 'ipopt'
    #solver = 'scipy_slsqp'
    #  solver = 'lincher'
    #  solver = 'scipy_cobyla' # step size too large
    #  solver = 'mma'
    #  solver = 'gsubg'
    r = p.solve(solver, plot=0)  # string argument is solver name
    Tvec = p.xf
    fOpt = objective(Tvec)
    if fOpt > 1e-5:
        print('computeTransMatrix: Warning, fval is %g \n' % fOpt)

    Topt = np.reshape(Tvec, (states, states), 'F')

    curMchain = MkovM.MkovM(S, Topt)
    print(curMchain)

    # Report output
    for moment in target:
        print(
            'after optimization mchain moment vs target moment for moment %s are'
            % (moment))
        print(curMchain[moment])
        print(target[moment])

    return Topt
Ejemplo n.º 8
0
    def _solve(self, *args, **kwargs):

        #!!!!! TODO: determing LP, MILP, MINLP if possible
        try:
            import openopt
        except ImportError:
            raise FuncDesignerException(
                'to perform the operation you should have openopt installed')

        constraints = self._getAllConstraints()

        # TODO: mention it in doc
        if 'constraints' in kwargs:
            tmp = set(kwargs['constraints'])
            tmp.update(set(constraints))
            kwargs['constraints'] = tmp
        else:
            kwargs['constraints'] = constraints

        freeVars, fixedVars = kwargs.get('freeVars',
                                         None), kwargs.get('fixedVars', None)
        isSystemOfEquations = kwargs['goal'] == 'solution'

        isLinear = all([(c.oofun.getOrder(freeVars, fixedVars) < 2)
                        for c in constraints])

        if isSystemOfEquations:
            if isLinear:  # Linear equations system
                p = sle(list(kwargs['constraints']), *args, **kwargs)
            else:  # Nonlinear equations system
                f = kwargs['constraints']
                ############################################
                #cons = self._getAllConstraints()

                # OLD
                #                if not all([array_equal(c.lb, c.ub) for c in f]):
                #                    raise FuncDesignerException('Solving constrained nonlinear systems is not implemented for oosystem yet, use SNLE constructor instead')
                #                kwargs['constraints'] = ()

                # NEW
                C, F = [], []
                for c in f:
                    F.append(c) if array_equal(c.lb, c.ub) else C.append(c)
                kwargs['constraints'] = C
                f = F
                ############################################

                p = openopt.SNLE(f, *args, **kwargs)
                if 'nlpSolver' in kwargs:
                    p.solver = kwargs['nlpSolver']

        else:  # an optimization problem
            assert len(
                args) > 0, 'you should have objective function as 1st argument'
            objective = args[0]
            if isinstance(objective, BaseFDConstraint):
                raise FuncDesignerException(
                    "1st argument can't be of type 'FuncDesigner constraint', it should be 'FuncDesigner oofun'"
                )
            elif not isinstance(objective, oofun):
                raise FuncDesignerException(
                    '1st argument should be objective function of type "FuncDesigner oofun"'
                )

            isLinear &= objective.getOrder(freeVars, fixedVars) < 2

            if isLinear:
                p = openopt.LP(*args, **kwargs)
                if 'solver' not in kwargs:
                    for solver in self.lpSolvers:
                        if (':' not in solver
                                and not openopt.oosolver(solver).isInstalled
                            ) or (
                                solver == 'glpk' and
                                not openopt.oosolver('cvxopt_lp').isInstalled):
                            continue
                        if solver == 'glpk':
                            p2 = openopt.LP([1, -1], lb=[1, 1], ub=[10, 10])
                            try:
                                r = p2.solve('glpk', iprint=-5)
                            except:
                                continue
                            if r.istop < 0:
                                continue
                            else:
                                break
                    if ':' in solver:
                        pWarn(
                            'You have linear problem but no linear solver (lpSolve, glpk, cvxopt_lp) is installed; converter to NLP will be used.'
                        )
                    p.solver = solver
                else:
                    solverName = kwargs['solver']
                    if type(solverName) != str:
                        solverName = solverName.__name__
                    if solverName not in self.lpSolvers:
                        solverName = 'nlp:' + solverName
                        p.solver = solverName
                        p.warn(
                            'you are solving linear problem with non-linear solver'
                        )
            else:
                p = openopt.NLP(*args, **kwargs)
                if 'solver' not in kwargs:
                    p.solver = 'ralg'

        # TODO: solver autoselect
        #if p.iprint >= 0: p.disp('The optimization problem is  ' + p.probType)
        p._isFDmodel = lambda *args, **kwargs: True
        return p.solve() if kwargs.get('manage', False) in (False,
                                                            0) else p.manage()
Ejemplo n.º 9
0
def optimize(daily):
    global max_sumnot, p

    exposures = numpy.dot(factors, positions)
    lbexp = exposures
    lbexp = numpy.minimum(lbexp, -max_expnot * max_sumnot)
    lbexp = numpy.maximum(lbexp, -max_expnot * max_sumnot * hard_limit)
    ubexp = exposures
    ubexp = numpy.maximum(ubexp, max_expnot * max_sumnot)
    ubexp = numpy.minimum(ubexp, max_expnot * max_sumnot * hard_limit)

    sumnot = abs(positions).sum()
    max_sumnot = max(max_sumnot, sumnot)
    max_sumnot = min(max_sumnot, max_sumnot * hard_limit)

    #exposure constraints
    Ac = numpy.zeros((2 * num_factors, num_secs))
    bc = numpy.zeros(2 * num_factors)
    for i in xrange(num_factors):
        for j in xrange(num_secs):
            Ac[i, j] = factors[i, j]
            Ac[num_factors + i, j] = -factors[i, j]
        bc[i] = ubexp[i]
        bc[num_factors + i] = -lbexp[i]

    #XXX optimizer needs to move in lotsize steps
    p = openopt.NLP(goal='max',
                    f=objective,
                    df=objective_grad,
                    x0=positions,
                    lb=lbound,
                    ub=ubound,
                    A=Ac,
                    b=bc)
    p.args.f = (kappa, slippage, positions, mu, rvar, factors, fcov, advp,
                borrowRate)
    p.args.df = (kappa, slippage, positions, mu, rvar, factors, fcov)
    p.c = [constrain_by_capital]
    p.dc = [constrain_by_capital_grad]
    p.args.c = (positions, max_sumnot, factors, lbexp, ubexp,
                max_trdnot * max_sumnot * hard_limit)
    p.args.dc = (positions, max_sumnot, factors, lbexp, ubexp,
                 max_trdnot * max_sumnot * hard_limit)
    p.ftol = 1e-7
    p.maxIter = max_iter
    p.minIter = 400

    r = p.solve('ralg')

    #XXX need to check for small number of iterations!!!
    if r.stopcase == -1 or r.isFeasible == False:
        raise Exception("Optimization failed")

    target = r.xf
    dutil = numpy.zeros(len(target))
    dmu = numpy.zeros(len(target))
    eslip = numpy.zeros(len(target))
    costs = numpy.zeros(len(target))
    for ii in range(len(target)):
        targetwo = target.copy()
        targetdiff = positions.copy()
        targetdiff[ii] = target[ii]
        targetwo[ii] = positions[ii]
        dutil[ii] = objective(target, *p.args.f) - objective(
            targetwo, *p.args.f)
        trade = target[ii] - positions[ii]
        eslip[ii] = slippageFunc(slippage, trade)
        costs[ii] = costsFunc(positions, targetdiff, borrowRate)
        dmu[ii] = mu[ii] * trade

    printinfo(target)

    return (target, dutil, eslip, dmu, costs)
Ejemplo n.º 10
0
def optimize(daily):
    global max_sumnot
    
    exposures = numpy.dot(factors, positions)
    lbexp = exposures
    lbexp = numpy.minimum(lbexp, -max_expnot * max_sumnot)
    lbexp = numpy.maximum(lbexp, -max_expnot * max_sumnot * hard_limit)
    ubexp = exposures
    ubexp = numpy.maximum(ubexp, max_expnot * max_sumnot)
    ubexp = numpy.minimum(ubexp, max_expnot * max_sumnot * hard_limit)

    sumnot = abs(positions).sum()
    max_sumnot = max(max_sumnot, sumnot)
    max_sumnot = min(max_sumnot, max_sumnot * hard_limit)

    #XXX optimizer needs to move in lotsize steps
    p = openopt.NLP(goal='max', f=objective, x0=positions, lb=lbound, ub=ubound)
    p.args.f = (kappa, slippage, positions, mu, rvar, factors, fcov)
    p.c = [constrain_by_capital, constrain_by_exposures]
    
    if not daily:
        p.c.append(constrain_by_trdnot)
        
    p.args.c = (positions, max_sumnot, factors, lbexp, ubexp, max_trdnot * max_sumnot * hard_limit)
    p.ftol = 1e-7
    
    p.maxIter = max_iter
    #p.maxCPUTime = 360
    #p.maxTime = 240
    
#    config['max_iter'] = max_iter
#    config['stop_iter'] = stop_iter
#    config['min_iter'] = min_iter
#    config['stop_frac'] = stop_frac
#    p.user.config = config
#    p.callback = iterfn
    
#    p.plot = 1

#    print "pos: "
#    print positions
#    print "max_sumnot: "
#    print max_sumnot
#    print "factors: " 
#    print  factors
#    print "lbexp: " 
#    print  lbexp
#    print "ubexp: " 
#    print ubexp
#    print "max_tradenot: " 
#    print  max_trdnot
#    print "hard_limit: " 
#    print hard_limit

    r = p.solve('ralg')
    
#    r = p.solve('algencan')
    #r = p.solve('scipy_slsqp')
    #r = p.solve('scipy_lbfgsb')
    #r = p.solve('scipy_tnc')
#    r = p.solve('scipy_cobyla')
    
    #XXX need to check for small number of iterations!!!
    if r.stopcase == -1 or r.isFeasible == False:
        raise Exception("Optimization failed")

    target = r.xf
    dutil = numpy.zeros(len(target))
    dmu = numpy.zeros(len(target))
    eslip = numpy.zeros(len(target))
    for ii in range(len(target)):
        targetwo = target.copy()
        targetwo[ii] = positions[ii]
        dutil[ii] = objective(target, *p.args.f) - objective(targetwo, *p.args.f)
        trade = target[ii]-positions[ii]
        eslip[ii] = slippageFunc(slippage, trade)     
        dmu[ii] = mu[ii] * trade

    printinfo(target)

    return (target, dutil, eslip, dmu)