def newton(f,start,step,precision):
    f_old = float('Inf')
    x = np.array(start)
    steps = []
    f_new = f(x)
    while abs(f_old-f_new)>precision:
        f_old = f_new
        H_inv = np.linalg.inv(np.matrix(ad.gh(f)[1](x)))
        d = (-H_inv*(np.matrix(ad.gh(f)[0](x)).transpose())).transpose()
        #Change the type from np.matrix to np.array so that we can use it in our function
        x = np.array(x+d*step)[0]
        f_new = f(x)
        steps.append(list(x))
    return x,f_new,steps
Пример #2
0
def AD_vLJ_Optimize(x):
    N = len(x)
    AD_vLJ_gradient = gh(AD_vLJ_vec)[0]

    AD_BFGSres = optimize.minimize(AD_vLJ_vec, np.ravel(x),  \
                                   method='L-BFGS-B',        \
                                   jac = AD_vLJ_gradient,     \
                                   options={'disp': False})
    return np.reshape(AD_BFGSres.x, (N, D))
Пример #3
0
def AD_vLJ_Optimize(x):
    N = len(x)
    AD_vLJ_gradient = gh(AD_vLJ_vec)[0]
    
    AD_BFGSres = optimize.minimize(AD_vLJ_vec, np.ravel(x),  \
                                   method='L-BFGS-B',        \
                                   jac = AD_vLJ_gradient,     \
                                   options={'disp': False})
    return np.reshape(AD_BFGSres.x, (N,D)) 
def steepest_descent(f,start,step,precision):
    f_old = float('Inf')
    x = np.array(start)
    steps = []
    f_new = f(x)
    while abs(f_old-f_new)>precision:
        f_old = f_new
        d = -np.array(ad.gh(f)[0](x))
        x = x+d*step
        f_new = f(x)
        steps.append(list(x))
    return x,f_new,steps
Пример #5
0
def projected_gradient_method(f, A, start, step, precision):
    f_old = float('Inf')
    x = np.array(start)
    steps = []
    f_new = f(x)
    while abs(f_old - f_new) > precision:
        f_old = f_new
        gradient = ad.gh(f)[0](x)
        grad_proj = project_vector(A, [-i for i in gradient])  	#The only changes to steepest..
        grad_proj = np.array(grad_proj.transpose())[0] 			#... descent are here!
        x = x + grad_proj * step
        f_new = f(x)
        steps.append(list(x))
    return x, f_new, steps
Пример #6
0
def calc_ideal(f):
    ideal = [0]*2 #Because three objectives
    solutions = [] #list for storing the actual solutions, which give the ideal
    bounds = ((1.,20.),(1.,13.)) #Bounds of the problem
    for i in range(2):
        res=minimize(
            #Minimize each objective at the time
            lambda x: f(x[0],x[1])[i], [1,1], method='SLSQP'
            #Jacobian using automatic differentiation
            ,jac=ad.gh(lambda x: f(x[0],x[1])[i])[0]
            #bounds given above
            ,bounds = bounds
            ,options = {'disp':True, 'ftol': 1e-20, 'maxiter': 1000})
        solutions.append(f(res.x[0],res.x[1]))
        ideal[i]=res.fun
    return ideal,solutions
Пример #7
0
def e_constraint_method(f,eps,z_ideal,z_nadir):
    points = []
    for epsi in eps:
        bounds = ((1.,epsi[0]*(z_nadir[0]-z_ideal[0])+z_ideal[0]),
                  ((epsi[1]*(z_nadir[1]-z_ideal[1])+z_ideal[1]),
                   40.)) #Added bounds for 2nd objective
        res=minimize(
            #Second objective
            lambda x: f(x[0],x[1])[0],
            [1,1], method='SLSQP'
            #Jacobian using automatic differentiation
            ,jac=ad.gh(lambda x: f(x[0],x[1])[0])[0]
            #bounds given above
            ,bounds = bounds,options = {'disp':False})
        if res.success:
            points.append(res.x)
    return points
def optimize():
    bounds = ((0.0, 1.), ) * 5  # Bounds of the problem
    # [100%, 80%, 40% ... 0.1%]  return
    # exp_return_constraint = [1.0, 0.8, 0.4, 0.3, 0.2, 0.1, 0.01, 0.009, 0.008, 0.007, 0.006, 0.005, 0.004, 0.003, 0.002, 0.001]
    exp_return_constraint = [
        0.007, 0.006, 0.005, 0.004, 0.003, 0.002, 0.001, 0.0009, 0.0008,
        0.0007, 0.0006, 0.0005, 0.0004, 0.0003, 0.0002, 0.0001
    ]
    results_comparison_dict = {}
    for i in range(len(exp_return_constraint)):
        res = minimize(
            # Objective function defined here
            lambda x: var_cov_matrix(df, x),
            weights,
            method='SLSQP'
            # Jacobian using automatic differentiation
            ,
            jac=ad.gh(lambda x: var_cov_matrix(df, x))[0]
            # bounds given above
            ,
            bounds=bounds,
            options={
                'disp': True,
                'ftol': 1e-20,
                'maxiter': 1000
            },
            constraints=[{
                'type': 'eq',
                'fun': lambda x: sum(x) - 1.0
            }, {
                'type':
                'eq',
                'fun':
                lambda x: calc_exp_returns(returns, x) - exp_return_constraint[
                    i]
            }])
        returns_key = round(exp_return_constraint[i] * 100, 2)
        results_comparison_dict.update({returns_key: [res.fun, res.x]})
    return res, results_comparison_dict
def optimize():
    bounds = ((0.0, 1.), ) * len(coins)  # bounds of the problem
    # [0.7%, 0.6% , 0.5% ... 0.1%] returns
    exp_return_constraint = [
        0.007, 0.006, 0.005, 0.004, 0.003, 0.002, 0.001, 0.0009, 0.0008,
        0.0007, 0.0006, 0.0005, 0.0004, 0.0003, 0.0002, 0.0001
    ]
    # exp_return_constraint = [ 0.15, 0.14, 0.13, 0.12, 0.11, 0.10, 0.09, 0.08, 0.07, 0.06,0.05,0.04,0.03]
    results_comparison_dict = {}
    for i in range(len(exp_return_constraint)):
        print(i, 'optimizeinz')
        res = minimize(
            # object function defined here
            lambda x: var_cov_matrix(df, x),
            weigths,
            method='SLSQP',
            # jacobian using automatic differentiation
            jac=ad.gh(lambda x: var_cov_matrix(df, x))[0],
            bounds=bounds,
            options={
                'disp': True,
                'ftol': 1e-20,
                'maxiter': 500
            },
            constraints=[{
                'type': 'eq',
                'fun': lambda x: sum(x) - 1.0
            }, {
                'type':
                'eq',
                'fun':
                lambda x: calc_exp_returns(returns, x) - exp_return_constraint[
                    i]
            }])
        return_key = round(exp_return_constraint[i] * 100, 2)
        results_comparison_dict.update({return_key: [res.fun, res.x]})
    return res, results_comparison_dict
Пример #10
0
##################################################
# Parameters
n = int(1e2)  # monte carlo samples
my_case = 0  # Problem selection
my_base = 1  # Basis selection

# Define basis functions
if my_base == 1:
    Phi = [
        lambda x: x[0], lambda x: x[1], lambda x: x[0]**2, lambda x: x[1]**2,
        lambda x: log(abs(x[0])), lambda x: log(abs(x[1]))
    ]
else:
    Phi = [lambda x: x[0], lambda x: x[1]]
# Gradients
dPhi = [gh(f)[0] for f in Phi]

#################################################
## Experiment Cases
#################################################
if my_case == 1:
    # Quadratic
    m = 2
    fcn = lambda x: x[0]**2 - 2.0 * x[1]**2
    grad, _ = gh(fcn)
    # Expected basis
    B = np.array([[0.], [0.], [0.], [0.], [4.], [1.]])
elif my_case == 2:
    # Mixed Terms
    m = 2
    fcn = lambda x: x[0] - 2.0 * x[1]**2
Пример #11
0
def get_objective(my_case,dim,full=False):
    """Return an m-dimensional objective function
    Usage
        fcn, grad, name = get_objective(my_case,dim)
        fcn, grad, name, opt = get_objective(my_case,dim,full=True)
    Arguments
        my_case = type of function to return:
                    0 = Single-Ridge Function
                    1 = Double-Ridge Function
                    2 = Mixed Function
        dim     = dimension of input
    Keyword Arguments
        full    = full return flag
    Outputs
        fcn  = scalar function handle
        grad = gradient function handle
        name = name of my_case
        opt  = random parameters used to generate fcn, grad
    """
    # Double-Ridge Function
    if my_case == 1:
        # Random vector
        A = normalize([rs() for i in range(dim)])
        B = normalize([rs() for i in range(dim)])
        # Construct function
        fcn = lambda x: sum([x[i]*A[i] for i in range(len(A))])**2 + \
                        sum([x[i]*B[i] for i in range(len(B))])**2
        grad, _ = gh(fcn)
        # Optional outputs
        opt = [A,B]
        # Function type name
        name = "Double-Ridge"
    # Mixed Function
    elif my_case == 2:
        n = dim/2
        m = dim-n
        # Sparse random vector
        A = normalize([rs() for i in range(n)] + [0]*m)
        # Complementary sparse random vector
        B = normalize([0]*n + [rs() for i in range(m)])
        # Construct function
        fcn = lambda x: sum([x[i]*A[i] for i in range(len(A))]) + \
                        sum([x[i]**2*B[i] for i in range(len(B))])
        grad, _ = gh(fcn)
        # Optional outputs
        opt = [A,B]
        # Function type name
        name = "Mixed"
    # Randomly-Mixed Function
    elif my_case == 3:
        # Sparse random vector
        A = normalize([rs()*randint(0,1) for i in range(dim)])
        # Complementary sparse random vector
        B = normalize([(A[i]==0)*rs()*randint(0,1) for i in range(dim)])
        # Construct function
        fcn = lambda x: sum([x[i]*A[i] for i in range(len(A))]) + \
                        sum([x[i]**2*B[i] for i in range(len(B))])
        grad, _ = gh(fcn)
        # Optional outputs
        opt = [A,B]
        # Function type name
        name = "Random-Mixed"
    # Single-Ridge Function
    else:
        # Random vector
        A = normalize([rs() for i in range(dim)])
        # Construct function
        fcn = lambda x: sum([x[i]*A[i] for i in range(len(A))])**2
        grad, _ = gh(fcn)
        # Optional outputs
        opt = A
        # Function type name
        name = "Single-Ridge"

    # Minimal return
    if full == False:
        return fcn, grad, name
    # Full return
    else:
        return fcn, grad, name, opt
Пример #12
0
def efficient_frontier():
    values = request.get_json()
    timeseries = values.get('timeseries')

    potfolio_size = len(timeseries) - 1
    weigths = np.random.dirichlet(
        alpha=np.ones(potfolio_size),
        size=1)  # makes sure that weights sums upto 1.
    EXP_RETURN_CONSTRAINT = [
        0.007, 0.006, 0.005, 0.004, 0.003, 0.002, 0.001, 0.0009, 0.0008,
        0.0007, 0.0006, 0.0005, 0.0004, 0.0003, 0.0002, 0.0001
    ]
    BOUNDS = ((0.0, 1.), ) * potfolio_size  # bounds of the problem

    df = pd.DataFrame(timeseries)
    df.set_index('date', inplace=True)
    df = df.pct_change()
    df = df.replace([np.inf, -np.inf], np.nan)
    df = df.dropna()

    returns = df.mean()
    results_comparison_dict = {}
    for i in range(len(EXP_RETURN_CONSTRAINT)):
        res = minimize(
            # object function defined here
            fun=lambda x: var_cov_matrix(df, x),
            x0=weigths,
            method='SLSQP',
            # jacobian using automatic differentiation
            jac=ad.gh(lambda x: var_cov_matrix(df, x))[0],
            bounds=BOUNDS,
            options={
                'disp': True,
                'ftol': 1e-20,
                'maxiter': 1000
            },
            constraints=[{
                'type': 'eq',
                'fun': lambda x: sum(x) - 1.0
            }, {
                'type':
                'eq',
                'fun':
                lambda x: calc_exp_returns(returns, x) - EXP_RETURN_CONSTRAINT[
                    i]
            }])
        return_key = round(EXP_RETURN_CONSTRAINT[i] * 100, 2)
        results_comparison_dict.update({return_key: [res.fun, res.x]})

    z = [[x, results_comparison_dict[x][0] * 100]
         for x in results_comparison_dict]
    objects, risk_vals = list(zip(*z))
    # t_pos = np.arange(len(objects))
    data = go.Scatter(x=risk_vals,
                      y=objects,
                      mode='markers',
                      marker=dict(size=20))
    offline.plot({
        'data': [data],
        'layout': layout
    },
                 filename='docs/efficient_frontier_{}.html'.format(
                     datetime.datetime.now().date()))

    keys = sorted(list(results_comparison_dict.keys()))
    index = 0
    x_itemns = list(df.columns)
    # x_itemns.remove('date')

    fig = tools.make_subplots(rows=4, cols=4, subplot_titles=(keys))
    for i in range(1, 5):
        for j in range(1, 5):
            trace = go.Bar(x=x_itemns,
                           y=results_comparison_dict[keys[index]][1],
                           name='{} %'.format(keys[index]))
            fig.add_trace(trace, row=i, col=j)
            index += 1
    fig['layout'].update(
        title='Weights per asset at different expected returns (%)',
        font=dict(color='rgb(255, 255, 255)', size=14),
        paper_bgcolor='#2d2929',
        plot_bgcolor='#2d2929')
    offline.plot(fig,
                 filename='docs/weights_{}.html'.format(
                     datetime.datetime.now().date()))
    return 'docs/weights_{}.html'.format(datetime.datetime.now().date()), 201
Пример #13
0
def gradf(f):
    return ad.gh(f)[0]
Пример #14
0
def returns():
    """Compute and plot the returns. Timeseries must be given via http post"""

    values = request.get_json()
    timeseries = values.get('timeseries')
    if timeseries == {}:
        return 'Select your portfolio'
    df = pd.DataFrame(timeseries)
    df.set_index('date', inplace=True)

    data = []
    returns_list = []
    for coin in df.columns:
        returns = df[coin].pct_change()[1:]
        trace = go.Scatter(x=df.index, y=returns * 100, name=coin)
        data.append(trace)
        returns_list.append(returns)
    layout = build_layout(title='Portfolio Returns',
                          x_axis_title='',
                          y_axis_title='Retuns (%)')
    offline.plot({
        'data': data,
        'layout': layout
    },
                 filename='app/public/returns.html')

    portfolio_returns = sum(returns_list)
    sample_mean = np.mean(portfolio_returns)
    sample_std_dev = np.std(portfolio_returns)
    _, pvalue, _, _ = stattools.jarque_bera(portfolio_returns)
    layout = build_layout(
        title='The returns are likely normal.'
        if pvalue > 0.05 else 'The returns are likely not normal.',
        x_axis_title='Value',
        y_axis_title='Occurrences')
    x = np.linspace(-(sample_mean + 4 * sample_std_dev),
                    (sample_mean + 4 * sample_std_dev), len(portfolio_returns))
    sample_distribution = (
        (1 / np.sqrt(sample_std_dev * sample_std_dev * 2 * np.pi)) *
        np.exp(-(x - sample_mean) * (x - sample_mean) /
               (2 * sample_std_dev * sample_std_dev)))
    data = [
        go.Histogram(x=portfolio_returns,
                     nbinsx=len(portfolio_returns),
                     name='Returns'),
        go.Scatter(x=x, y=sample_distribution, name='Normal Distribution')
    ]
    offline.plot({
        'data': data,
        'layout': layout
    },
                 filename='app/public/histogram.html')

    for cor in ['pearson', 'kendall', 'spearman']:
        heatmap = go.Heatmap(z=df.pct_change().corr(method=cor).values,
                             x=df.pct_change().columns,
                             y=df.pct_change().columns,
                             colorscale=[[0, 'rgb(255,0,0)'],
                                         [1, 'rgb(0,255,0)']],
                             zmin=-1.0,
                             zmax=1.0)
        layout = build_layout(title='{} Correlation'.format(cor.title()),
                              x_axis_title='',
                              y_axis_title='')
        offline.plot({
            'data': [heatmap],
            'layout': layout
        },
                     filename='app/public/correlation_{}.html'.format(cor))

    #
    # ROLLING CORRELLATION
    #
    # rolling_correlation = df.rolling(30).corr(pairwise=True)
    # for col in rolling_correlation.columns:
    #     unstacked_df = rolling_correlation.unstack(level=1)[col]
    #     data = []
    #     for unstacked_col in unstacked_df.columns:
    #         if not unstacked_col == col:
    #             trace = go.Scatter(x=unstacked_df.index,
    #                             y=unstacked_df[unstacked_col],
    #                             name=col+'/'+unstacked_col)
    #             data.append(trace)
    #     layout = build_layout(title='{} 30 Days Rolling Correlation'.format(col),
    #                         x_axis_title='',
    #                         y_axis_title='Correlation')
    #     offline.plot({'data': data,
    #                 'layout': layout},
    #                 filename='app/public/rolling_corr_{}.html'.format(col))

    potfolio_size = len(timeseries) - 1
    weigths = np.random.dirichlet(
        alpha=np.ones(potfolio_size),
        size=1)  # makes sure that weights sums upto 1.
    BOUNDS = ((0.0, 1.), ) * potfolio_size  # bounds of the problem

    #
    # HERE THE DF CHANGES
    #
    df = df.pct_change()
    df = df.replace([np.inf, -np.inf], np.nan)
    df = df.dropna()

    returns = df.mean()
    results_comparison_dict = {}
    for i in range(len(EXP_RETURN_CONSTRAINT)):
        res = minimize(
            # object function defined here
            fun=lambda x: var_cov_matrix(df, x),
            x0=weigths,
            method='SLSQP',
            # jacobian using automatic differentiation
            jac=ad.gh(lambda x: var_cov_matrix(df, x))[0],
            bounds=BOUNDS,
            options={
                'disp': True,
                'ftol': 1e-20,
                'maxiter': 1000
            },
            constraints=[{
                'type': 'eq',
                'fun': lambda x: sum(x) - 1.0
            }, {
                'type':
                'eq',
                'fun':
                lambda x: calc_exp_returns(returns, x) - EXP_RETURN_CONSTRAINT[
                    i]
            }])
        return_key = round(EXP_RETURN_CONSTRAINT[i] * 100, 2)
        results_comparison_dict.update({return_key: [res.fun, res.x]})

    z = [[x, results_comparison_dict[x][0] * 100]
         for x in results_comparison_dict]
    objects, risk_vals = list(zip(*z))
    # t_pos = np.arange(len(objects))
    data = go.Scatter(x=risk_vals,
                      y=objects,
                      mode='markers',
                      marker=dict(size=20))
    layout = build_layout(
        title='Risk associated with different levels of returns',
        x_axis_title='Risk %',
        y_axis_title='Expected returns %')
    offline.plot({
        'data': [data],
        'layout': layout
    },
                 filename='app/public/efficient_frontier.html')

    keys = sorted(list(results_comparison_dict.keys()))
    index = 0
    x_itemns = list(df.columns)
    # x_itemns.remove('date')

    fig = tools.make_subplots(rows=4, cols=4, subplot_titles=(keys))
    for i in range(1, 5):
        for j in range(1, 5):
            trace = go.Bar(x=x_itemns,
                           y=results_comparison_dict[keys[index]][1],
                           name='{} %'.format(keys[index]))
            fig.add_trace(trace, row=i, col=j)
            index += 1
    fig['layout'].update(
        title='Weights per asset at different expected returns (%)',
        font=dict(color='rgb(255, 255, 255)', size=14),
        paper_bgcolor='#2d2929',
        plot_bgcolor='#2d2929')
    offline.plot(fig, filename='app/public/weights.html')
    return 'ok'
def diff_L(f,x,m,k):
    #Define the lagrangian for given m and f
    L = lambda x_: f(x_)[0] + (np.matrix(f(x_)[2])*np.matrix(m).transpose())[0,0]
    return ad.gh(L)[k](x)
Пример #16
0
frame2 = cam.read()
pnts2 = cam.projectPoints()
mask2 = cam.mask()

mask = np.logical_and(mask1, mask2)
pntlist = np.array([pnts1[mask], pnts2[mask]])

R = rotateX  #np.identity(3)
t = np.zeros(3)
#pts3 = np.ones((pntlist.shape[0],3))
pts3 = np.array(cam.pointCloud)[mask, :]
x0 = pack(R, t, pts3)

resid = error_func(pntlist)  #ptlist.shape = [:, 2, 2], [pt, image, position]
grad, hess = gh(resid)


def cb(x):
    print "step"


res = optimize.minimize(resid,
                        x0,
                        method='Newton-CG',
                        jac=grad,
                        hess=hess,
                        callback=cb)

print np.sqrt(res.fun) / pntlist.shape[0] / 2
Пример #17
0

if __name__ == "__main__":
    # # Test ad on an external objective
    # from ad import gh
    # from example1 import g
    # s  = 1e-1
    # G0 = lambda x: ext_obj(g(adnumber(x)),s)
    # dG0, _ = gh(G0)
    # # Return ordinary values
    # G = lambda x: G0(x).x
    # dG= lambda x: dG0(x).T

    # # Evaluate
    # x0 = np.array([2.5,2.5])
    # val = G0(x0)
    # dif = dG0(x0)
    # # Run BFGS
    # from scipy.optimize import minimize
    # res = minimize(G, x0, method='BFGS', jac=dG0)

    # Test ad on interior objective
    from ad import gh
    from example1 import g, f
    s = 1e-1
    fcn = lambda x: f(x) + log_barrier(g(x)) / 5.0
    dfcn, _ = gh(fcn)
    # Run BFGS
    from scipy.optimize import minimize
    x0 = [0, 0]
    res = minimize(fcn, x0, method='BFGS', jac=dfcn)
Пример #18
0
def gradf(f):
    return ad.gh(f)[0]
Пример #19
0
def laplace(experiment, params, prevOptimRes = None, returnOptimRes = True, verbose = False, optimMethod = 'Newton-CG'):
    '''
    laplaceInfRes, -post_lik = laplace(experiment, params)
    '''
    ridge = 0

    [ydim,T] = np.shape(experiment.data[0]['Y'])
    [ydim, xdim] = np.shape(params['C'])
    numTrials = len(experiment.data)
    trialDur = experiment.trialDur
    binSize = experiment.binSize

    # make big parameters
    C_big, d_big = util.makeCd_big(params,T)
    K_big, K = util.makeK_big(params, trialDur, binSize)
    K_bigInv = np.linalg.inv(K_big)
    
    x_post_mean = []
    x_post_cov = []
    x_vsmGP = []
    x_vsm = []

    post_lik = 0
    
    # store current optimization result to use as initialization for inference in next EM iteration
    lapOptimRes = []

    for trial in range(numTrials):
        if verbose: print('laplace inference trajectory of trial ' +str(trial+1) +'...')
        y = experiment.data[trial]['Y']
        ybar = np.ndarray.flatten(np.reshape(y, ydim*T))

        if prevOptimRes == None:
            xInit = np.ndarray.flatten(np.zeros([xdim*T,1]))
        else:
            xInit = prevOptimRes[trial]

        # Automatic differentiation doesn't work
        if False:
            from ad import gh
            def objective(x): return negLogPosteriorUnNorm(x,ybar,C_big,d_big,K_bigInv,xdim,ydim)
            grad,hess = gh(objective)
            pdb.set_trace()
            resLap = op.minimize(
                fun = objective,
                x0 = xInit,
                method=optimMethod,
                # args = (ybar, C_big, d_big, K_bigInv, xdim, ydim),
                jac = grad,
                hess = hess,
                options = {'disp': False,'maxiter': 10000})

        resLap = op.minimize(
            fun = negLogPosteriorUnNorm,
            x0 = xInit,
            method=optimMethod,
            args = (ybar, C_big, d_big, K_bigInv, xdim, ydim),
            jac = negLogPosteriorUnNorm_grad,
            hess = negLogPosteriorUnNorm_hess,
            options = {'disp': False,'maxiter': 10000})
        lapOptimRes.append(resLap.x)
        post_lik = post_lik + resLap.fun
        x_post_mean.append(np.reshape(resLap.x,[xdim,T]))
        hess = negLogPosteriorUnNorm_hess(resLap.x, ybar, C_big, d_big, K_bigInv, xdim, ydim)
        PostCovGP = np.linalg.inv(hess)
        # PostCovGP = hess

        # resNCG = op.fmin_ncg(
        #     f = negLogPosteriorUnNorm,
        #     x0 = xInit,
        #     fprime = negLogPosteriorUnNorm_grad,
        #     fhess = negLogPosteriorUnNorm_hess,
        #     args = (ybar, C_big, d_big, K_bigInv, xdim, ydim),
        #     disp = False,
        #     full_output = True)
        # lapOptimRes.append(resNCG[0]) 
        # post_lik = post_lik + resNCG[1]
        # x_post_mean.append(np.reshape(resNCG[0],[xdim,T]))
        # hess = -negLogPosteriorUnNorm_hess(resNCG[0], ybar, C_big, d_big, K_bigInv, xdim, ydim)
        # PostCovGP = -np.linalg.inv(hess)

        # resLaplace = op.minimize(
        #     fun = negLogPosteriorUnNorm,
        #     x0 = xInit,
        #     method='TNC',
        #     args = (ybar, C_big, d_big, K_bigInv, xdim, ydim),
        #     jac = negLogPosteriorUnNorm_grad,
        #     hess = negLogPosteriorUnNorm_hess,
        #     options = {'disp': False})
        # post_lik = post_lik + resLaplace.fun
        # x_post_mean.append(np.reshape(resLaplace.x,[xdim,T]))
        # hess = negLogPosteriorUnNorm_hess(resLaplace.x, ybar, C_big, d_big, K_bigInv, xdim, ydim)
        # PostCovGP = -np.linalg.inv(hess)
        
        PostCovGP = PostCovGP + ridge*np.diag(np.ones(xdim*T))
        x_post_cov.append(PostCovGP)

        temp_vsmGP = np.zeros([T,T,xdim])
        for kk in range(xdim):
            temp_vsmGP[:,:,kk] = PostCovGP[kk*T:(kk+1)*T, kk*T:(kk+1)*T]
        x_vsmGP.append(temp_vsmGP)

        temp_vsm = np.zeros([T,xdim,xdim])
        for kk in range(T):
            temp_vsm[kk][:,:] = PostCovGP[kk::T,kk::T]
        x_vsm.append(temp_vsm)
        # pdb.set_trace()

    post_lik = post_lik / numTrials
    laplaceInfRes = {
        'post_mean': x_post_mean,
        'post_cov' : x_post_cov,
        'post_vsm': x_vsm,
        'post_vsmGP': x_vsmGP}

    if returnOptimRes == True:
        return laplaceInfRes, -post_lik, lapOptimRes
    else:
        return laplaceInfRes, -post_lik
Пример #20
0
import ad
import ad.admath

# test function
def get_longitude(rbcf):
    longitude = ad.admath.atan2(rbcf[1], rbcf[0])
    #d_longitude = longitude.gradient(rbcf)
    return longitude


# test script
x = ad.adnumber(0.2) # create a scalar number for algorithmic differentation
y = ad.adnumber(0.4)
z = ad.admath.atan2(y,x) # ad.admath has math functions that can be used with AD
# z is an ad object
# z.x is just the number
# z.d(x) is dz/dx

rbcf = ad.adnumber(np.array([[0.5], [1.2], [0.4]]))
print(rbcf)

get_longitude_d, get_longitude_dd = ad.gh(get_longitude) # gradient and hessian functions

longitude = get_longitude(rbcf)
print(longitude)
longitude_d = get_longitude_d(rbcf)



print(longitude_d)
Пример #21
0
    return res

if __name__ == "__main__":
    # # Test ad on an external objective
    # from ad import gh
    # from example1 import g
    # s  = 1e-1
    # G0 = lambda x: ext_obj(g(adnumber(x)),s)
    # dG0, _ = gh(G0)
    # # Return ordinary values
    # G = lambda x: G0(x).x
    # dG= lambda x: dG0(x).T

    # # Evaluate
    # x0 = np.array([2.5,2.5])
    # val = G0(x0)
    # dif = dG0(x0)
    # # Run BFGS
    # from scipy.optimize import minimize
    # res = minimize(G, x0, method='BFGS', jac=dG0)

    # Test ad on interior objective
    from ad import gh
    from example1 import g,f
    s = 1e-1
    fcn = lambda x: f(x) + log_barrier(g(x))/5.0
    dfcn, _ = gh(fcn)
    # Run BFGS
    from scipy.optimize import minimize
    x0 = [0,0]
    res = minimize(fcn, x0, method='BFGS', jac=dfcn)
Пример #22
0
def AD_dvLJ(x):
    AD_vLJ_gradient = gh(AD_vLJ_vec)[0]
    return AD_vLJ_gradient(np.ravel(x))
Пример #23
0
def AD_dvLJ(x):
    AD_vLJ_gradient = gh(AD_vLJ_vec)[0]
    return AD_vLJ_gradient(np.ravel(x))
Пример #24
0
def laplace(experiment,
            params,
            prevOptimRes=None,
            returnOptimRes=True,
            verbose=False,
            optimMethod='Newton-CG'):
    '''
    laplaceInfRes, -post_lik = laplace(experiment, params)
    '''
    ridge = 0

    [ydim, T] = np.shape(experiment.data[0]['Y'])
    [ydim, xdim] = np.shape(params['C'])
    numTrials = len(experiment.data)
    trialDur = experiment.trialDur
    binSize = experiment.binSize

    # make big parameters
    #print("params")
    #print(params)
    #print("\n")
    _, xdim = np.shape(params['C'])
    #if xdim == 0:
    #    print("Dealing with 0 latent variables")
    import importlib
    importlib.reload(util)

    C_big, d_big = util.makeCd_big(params, T)
    K_big, K = util.makeK_big(params, trialDur, binSize)
    K_bigInv = np.linalg.inv(K_big)

    x_post_mean = []
    x_post_cov = []
    x_vsmGP = []
    x_vsm = []

    post_lik = 0

    # store current optimization result to use as initialization for inference in next EM iteration
    lapOptimRes = []

    for trial in range(numTrials):
        if verbose:
            print('laplace inference trajectory of trial ' + str(trial + 1) +
                  '...')
        y = experiment.data[trial]['Y']
        ybar = np.ndarray.flatten(np.reshape(y, ydim * T))

        if prevOptimRes == None:
            xInit = np.ndarray.flatten(np.zeros([xdim * T, 1]))
        else:
            xInit = prevOptimRes[trial]

        #print("x0")
        #print(xInit)
        #print("\n")

        # Automatic differentiation doesn't work
        if False:
            from ad import gh

            def objective(x):
                return negLogPosteriorUnNorm(x, ybar, C_big, d_big, K_bigInv,
                                             xdim, ydim)

            grad, hess = gh(objective)
            pdb.set_trace()
            resLap = op.minimize(
                fun=objective,
                x0=xInit,
                method=optimMethod,
                # args = (ybar, C_big, d_big, K_bigInv, xdim, ydim),
                jac=grad,
                hess=hess,
                options={
                    'disp': False,
                    'maxiter': 10000
                })

        resLap = op.minimize(fun=negLogPosteriorUnNorm,
                             x0=xInit,
                             method=optimMethod,
                             args=(ybar, C_big, d_big, K_bigInv, xdim, ydim),
                             jac=negLogPosteriorUnNorm_grad,
                             hess=negLogPosteriorUnNorm_hess,
                             options={
                                 'disp': False,
                                 'maxiter': 10000
                             })
        lapOptimRes.append(resLap.x)
        post_lik = post_lik + resLap.fun
        x_post_mean.append(np.reshape(resLap.x, [xdim, T]))
        hess = negLogPosteriorUnNorm_hess(resLap.x, ybar, C_big, d_big,
                                          K_bigInv, xdim, ydim)
        PostCovGP = np.linalg.inv(hess)
        # PostCovGP = hess

        # resNCG = op.fmin_ncg(
        #     f = negLogPosteriorUnNorm,
        #     x0 = xInit,
        #     fprime = negLogPosteriorUnNorm_grad,
        #     fhess = negLogPosteriorUnNorm_hess,
        #     args = (ybar, C_big, d_big, K_bigInv, xdim, ydim),
        #     disp = False,
        #     full_output = True)
        # lapOptimRes.append(resNCG[0])
        # post_lik = post_lik + resNCG[1]
        # x_post_mean.append(np.reshape(resNCG[0],[xdim,T]))
        # hess = -negLogPosteriorUnNorm_hess(resNCG[0], ybar, C_big, d_big, K_bigInv, xdim, ydim)
        # PostCovGP = -np.linalg.inv(hess)

        # resLaplace = op.minimize(
        #     fun = negLogPosteriorUnNorm,
        #     x0 = xInit,
        #     method='TNC',
        #     args = (ybar, C_big, d_big, K_bigInv, xdim, ydim),
        #     jac = negLogPosteriorUnNorm_grad,
        #     hess = negLogPosteriorUnNorm_hess,
        #     options = {'disp': False})
        # post_lik = post_lik + resLaplace.fun
        # x_post_mean.append(np.reshape(resLaplace.x,[xdim,T]))
        # hess = negLogPosteriorUnNorm_hess(resLaplace.x, ybar, C_big, d_big, K_bigInv, xdim, ydim)
        # PostCovGP = -np.linalg.inv(hess)

        PostCovGP = PostCovGP + ridge * np.diag(np.ones(xdim * T))
        x_post_cov.append(PostCovGP)

        temp_vsmGP = np.zeros([T, T, xdim])
        for kk in range(xdim):
            temp_vsmGP[:, :, kk] = PostCovGP[kk * T:(kk + 1) * T,
                                             kk * T:(kk + 1) * T]
        x_vsmGP.append(temp_vsmGP)

        temp_vsm = np.zeros([T, xdim, xdim])
        for kk in range(T):
            temp_vsm[kk][:, :] = PostCovGP[kk::T, kk::T]
        x_vsm.append(temp_vsm)
        # pdb.set_trace()

    post_lik = post_lik / numTrials
    laplaceInfRes = {
        'post_mean': x_post_mean,
        'post_cov': x_post_cov,
        'post_vsm': x_vsm,
        'post_vsmGP': x_vsmGP
    }

    if returnOptimRes == True:
        return laplaceInfRes, -post_lik, lapOptimRes
    else:
        return laplaceInfRes, -post_lik
Пример #25
0
def fcn(x):
    # P_0 = x[0]
    # P_e = x[1]
    # A_0 = x[2]
    # A_e = x[3]
    # U_0 = x[4]
    # U_e = x[5]
    # a_0 = x[6]

    gam = 1.4; f = 0.05

    return gam*x[2]*x[0]*x[4]**2/x[6]**2*((1+f)*x[5]/x[4]+1) + \
           x[3]*(x[1]-x[0])

grad,_ = gh(fcn)

if __name__ == "__main__":
    # Make nominal values global, len=6
    P_0 = 101e3                     # Freestream pressure, Pa
    P_e = P_0 * 10                  # Exit pressure, Pa
    A_0 = 1.5                       # Capture area, m^2
    A_e = 1.                        # Nozzle exit area, m^2
    U_0 = 100.                      # Freestream velocity, m/s
    U_e = U_0 * 2                   # Exit velocity, m/s
    a_0 = 343.                      # Ambient sonic speed, m/s

    X_nom = [P_0,P_e,A_0,A_e,U_0,U_e,a_0]

    f = fcn(X_nom)
    g = grad(X_nom)
Пример #26
0
from ad import gh  # the gradient and hessian function generator


def objective(x):
    return (x[0] - 10.0)**2 + (x[1] + 5.0)**2


grad, hess = gh(objective)
Пример #27
0
if len(sys.argv) > 2:
    my_base = int(sys.argv[2])
else:
    print("Default basis selected...")
    my_base = 0

# Choose basis
Phi, dPhi, Labels = get_basis(my_base, m)

#################################################
## Experiment Cases
#################################################
if my_case == 1:
    # Quadratic
    fcn = lambda x: x[0]**2 + x[1]**2 - 2.0 * x[2]**2
    grad, _ = gh(fcn)
elif my_case == 2:
    # Mixed Terms
    fcn = lambda x: x[0] + x[1] - 2.0 * x[2]**2
    grad, _ = gh(fcn)
else:
    # Ridge Function
    fcn = lambda x: 0.5 * (0.3 * x[0] + 0.3 * x[1] + 0.7 * x[2])**2
    grad, _ = gh(fcn)

#################################################
## Monte Carlo Method
#################################################
# Draw samples
X = 2.0 * (random([n, m]) - 0.5)  # Uniform on [-1,1]
# Build matrix
def grad_h(f,x):
    return  [ad.gh(lambda y:
                   f(y)[2][i])[0](x) for i in range(len(f(x)[2]))]
    for geno in range(0, 2+1):
        # locus 1 known, locus 2 unknown
        n_geno = np.sum((obs_geno_locus1 == geno) & np.isnan(obs_geno_locus2))
        log_lik += n_geno * log(sum(geno_freq[geno, :]))

        # locus 1 unknown, locus 2 known
        n_geno = np.sum(np.isnan(obs_geno_locus1) & (obs_geno_locus2 == geno))
        log_lik += n_geno * log(sum(geno_freq[:, geno]))

    return -log_lik


# In[ ]:

# create the Jacobian and Hessian of the negative log likelihood
grad_neg_log_lik, hess_neg_log_lik = ad.gh(neg_log_lik)


# In[ ]:

# We don't need equality constraints since we rescale the variables to get
# frequencies
#
# def eq_constraints(x):
#     """Only equality constraint is that the frequencies sum to 1"""
#     return np.array([np.sum(x) - 1.])

# jac_eq_constraints, hess_eq_constraints = ad.gh(eq_constraints)


# In[ ]:
Пример #30
0
def get_basis(my_base,dim):
    """Returns a set of basis functions for AM pursuit
    Usage
        Phi, dPhi, name, Labels = get_basis(my_base)
    Arguments
        my_base = integer selection
    Returns
        Phi    = scalar basis functions
        dPhi   = gradients of basis functions
        name   = name of basis type
        Labels = string label for each basis function
    """
    # Define basis functions
    if my_base == 1:
        # Second Order
        Phi = [lambda x, i=i: x[i] for i in range(dim)] + \
              [lambda x, i=i: x[i]**2 for i in range(dim)] + \
              [lambda x, i=i: log(abs(x[i])) for i in range(dim)]
        # Labels
        Labels = ["L_"+str(i) for i in range(dim)] + \
                 ["Q_"+str(i) for i in range(dim)] + \
                 ["G_"+str(i) for i in range(dim)]
        # Gradients 
        dPhi = [gh(f)[0] for f in Phi]
        # Name
        name = "Second-Order"
    elif my_base == 2:
        # Third Order
        Phi = [lambda x, i=i: x[i] for i in range(dim)] + \
              [lambda x, i=i: x[i]**2 for i in range(dim)] + \
              [lambda x, i=i: x[i]**3 for i in range(dim)] + \
              [lambda x, i=i: log(abs(x[i])) for i in range(dim)] + \
              [lambda x, i=i: x[i]**(-1) for i in range(dim)]
        # Labels
        Labels = ["L_"+str(i) for i in range(dim)] + \
                 ["Q_"+str(i) for i in range(dim)] + \
                 ["C_"+str(i) for i in range(dim)] + \
                 ["G_"+str(i) for i in range(dim)] + \
                 ["I_"+str(i) for i in range(dim)]
        # Gradients 
        dPhi = [gh(f)[0] for f in Phi]
        # Name
        name = "Third-Order"
    elif my_base == 3:
        # 2nd Order Legendre Basis
        Phi = [lambda x, i=i: x[i] for i in range(dim)] + \
              [lambda x, i=i: 0.5*x[i]**2 for i in range(dim)] + \
              [lambda x, i=i: 0.5*(x[i]**3-x[i]) for i in range(dim)]
        # Labels
        Labels = ["P_0_"+str(i) for i in range(dim)] + \
                 ["P_1_"+str(i) for i in range(dim)] + \
                 ["P_2_"+str(i) for i in range(dim)]
        # Gradients 
        dPhi = [gh(f)[0] for f in Phi]
        # Name
        name = "Legendre Second-Order"
    elif my_base == 4:
        # 3nd Order Legendre Basis
        Phi = [lambda x, i=i: x[i] for i in range(dim)] + \
              [lambda x, i=i: 0.5*x[i]**2 for i in range(dim)] + \
              [lambda x, i=i: 0.5*(x[i]**3-x[i]) for i in range(dim)] + \
              [lambda x, i=i: 0.5*(5./4.*x[i]**4-3./2.*x[i]**2) for i in range(dim)]
        # Labels
        Labels = ["P_0_"+str(i) for i in range(dim)] + \
                 ["P_1_"+str(i) for i in range(dim)] + \
                 ["P_2_"+str(i) for i in range(dim)] + \
                 ["P_3_"+str(i) for i in range(dim)]
        # Gradients 
        dPhi = [gh(f)[0] for f in Phi]
        # Name
        name = "Legendre Third-Order"
    else:
        # Active Subspace
        Phi = [lambda x: x[i] for i in range(dim)]
        # Labels
        Labels = ["L_"+str(i) for i in range(dim)]
        # Gradients 
        dPhi = [gh(f)[0] for f in Phi]
        # Name
        name = "Linear"

    return Phi, dPhi, name, Labels
def asf(f,ref,b_tol,x_start,z_ideal,z_nadir,rho):
    """
    Implementation of achievement scalarizing function.

    Parameters
    ----------
    f : function
        objective functions.
    ref : list
        reference point.
    b_tol : float
        tolerance for beta constraint.
    x_start : list
        starting point.
    z_ideal : list
        ideal vector.
    z_nadir : list
        nadir vector.
    rho : float
        augmentation parameter.

    Returns
    -------
    scipy.optimize.optimize.OptimizeResult
        result of optimization.
    
    Examples
    --------

    """
    
    # bounds and constraints
    b = [(0,1)]*len(x_start)
    betas = [beta(c) for c in range(len(x_start))]
    t = b_tol # tolerance for beta constraint
    
    c = (
         # sum of weights = 1
         {'type':'eq','fun':lambda x: 1-sum(x)}, 
         
         # sum of beta = 1
         # transforming a strict equality constraint into two inequality constraints, 
         # to relax the constraint.
         {'type':'ineq','fun': lambda x: 1+t-sum(np.array(x)*betas)}, 
         {'type':'ineq','fun': lambda x: sum(np.array(x)*betas)-1+t}
        )
    
    # normalizing the reference point
    ref_norm = [(refi-z_ideali)/(z_nadiri-z_ideali) 
                for (refi,z_ideali,z_nadiri) in zip(ref,z_ideal,z_nadir)]
    
    # scalarized function
    def obj(x):
        return np.max(np.array(f(x,z_ideal,z_nadir))-ref_norm)\
           +rho*np.sum(f(x,z_ideal,z_nadir))
    
    start = x_start
    res=minimize(
        #Objective function defined above
        obj, 
        start, method='SLSQP'
        #Jacobian using automatic differentiation
        ,jac=ad.gh(obj)[0]
        #bounds given above
        ,bounds = b
        ,constraints = c
        ,options = {'disp':True, 'ftol': 1e-20,
                'maxiter': 1000})
    return res