Ejemplo n.º 1
0
def make_optimiser(config, model):
    if not model: return None

    # run with dummy values -- this doesn't really belong here, but temporarily...
    if config['wetrun']:
        print "--\nGenerating optimisable function"
        ff = model.optimisable(config['distance'], debug=config['debug'])
        print ff
        print "--\nAttempting single run of function"
        pp = [x.get('default', 0) for x in config['params']]
        ff(pp)
        return None

    mode = config.get('job_mode', JOB_MODE)
    weights = [config['weights'].get(x['name'], 1) for x in config['vars']]
    if mode == 'GLP':
        lb = [x.get('min', 0) for x in config['params']]
        ub = [x.get('max', 0) for x in config['params']]
        return openopt.GLP(
            model.optimisable(config['distance'], weights=weights),
            lb=lb,
            ub=ub,
            # TODO, possibly: support A and b args to define linear constraints
            maxIter=config['max_iter'])
    elif mode == 'NLP':
        lb = [x.get('min', 0) for x in config['params']]
        ub = [x.get('max', 0) for x in config['params']]
        return openopt.NLP(
            model.optimisable(config['distance'], weights=weights),
            lb=lb,
            ub=ub,
            # TODO, possibly: support A and b args to define linear constraints
            maxIter=config['max_iter'])
    elif mode == 'NSP':
        lb = [x.get('min', 0) for x in config['params']]
        ub = [x.get('max', 0) for x in config['params']]
        return openopt.NSP(
            model.optimisable(config['distance'], weights=weights),
            lb=lb,
            ub=ub,
            # TODO, possibly: support A and b args to define linear constraints
            maxIter=config['max_iter'])
    return None
Ejemplo n.º 2
0
    "penalty: {}".format(penalty)
    print

    return sharpe - penalty


if args.weights is None:
    initial_weights = np.ones(len(forecasts)) * .5
else:
    initial_weights = np.array([float(x) for x in args.weights.split(",")])
lb = np.ones(len(forecasts)) * 0.0
ub = np.ones(len(forecasts))
plotit = False
p = openopt.NSP(goal='max',
                f=objective,
                x0=initial_weights,
                lb=lb,
                ub=ub,
                plot=plotit)
p.ftol = 0.001
p.maxFunEvals = 150
r = p.solve('ralg')

if (r.stopcase == -1 or r.isFeasible == False):
    print
    objective_detail(target, *g_params)
    raise Exception("Optimization failed")

print
r.xf
ii = 0
for fcast in forecasts:
Ejemplo n.º 3
0
    return (pret * 252) / np.sqrt(pvar * 252)


mean = 0
cnt = 0
gstart = pd.to_datetime("20110101")
start = pd.to_datetime("20110101")
end = pd.to_datetime("20110101") + timedelta(days=30)
while end < pd.to_datetime("20130101"):
    lb = np.ones(10) * 0.0
    ub = np.ones(10)
    plotit = False
    initial_weights = np.asarray([.5, .5, .5, .5, .5, .5, .5, .5, .5, .5])
    #initial_weights = np.asarray([0, 0, 0, 0, 1, 0, 0, 0, 0, 0])

    p = openopt.NSP(goal='max', f=fcn, x0=initial_weights, lb=lb, ub=ub)
    p.args.f = (start, end)
    p.ftol = 0.001
    p.maxFunEvals = 300
    r = p.solve('ralg')
    if (r.stopcase == -1 or r.isFeasible == False):
        print(objective_detail(target, *g_params))
        raise Exception("Optimization failed")

    print(r.xf)

    for ii in range(0, 10):
        print("{}: {}".format(cols[ii], r.xf[ii]))
        ii += 1

    wtrecent = r.xf