Ejemplo n.º 1
0
    def optimize_dm(self):
        """
        Calculate more precise value of the DM by interpolating between DM values to maximise the SNR

        Note:
            This function has not been fully tested.

        Returns:
            optimnised DM, optimised SNR
        """
        if self.data is None:
            return None

        def dm2snr(dm):
            time_series = self.dedispersets(dm)
            return -self.get_snr(time_series)

        try:
            out = golden(
                dm2snr,
                full_output=True,
                brack=(-self.dm / 2, self.dm, 2 * self.dm),
                tol=1e-3,
            )
        except (ValueError, TypeError):
            out = golden(dm2snr, full_output=True, tol=1e-3)
        self.dm_opt = out[0]
        self.snr_opt = -out[1]
        return out[0], -out[1]
def main():
    with open('data.dat', 'r') as f:
        data = [tuple(map(float, line.strip().split())) for line in f]

    data = np.array(data)
    x, y = data.T

    roots = optimize.newton(f1, x0=[-1.0, 0.0, 1.0])
    minima = [
        optimize.golden(f2, brack=(-2.0, 0.0)),
        optimize.golden(f2, brack=(0.0, 1.0))
    ]
    result = fit(x, y, n=3)
    coeffs = result.x
    y_fit = model(coeffs, x)

    print('1. Roots of x^3 - x + 0.25 = 0:')
    print('\n'.join("   {:.4f}".format(x) for x in roots))
    print('\n3. Minima of (x^2 - 1)^2 + x:')
    print('\n'.join("   {:.4f}".format(x) for x in minima))
    print('\n5. Fit of data')
    print('   Params:      {}'.format(coeffs))
    print('   Iterations:  {}'.format(result.nit))

    warning = " (DEMO ONLY. PLEASE RUN `make` INSTEAD.)"
    plot(x, y, y_fit, 'plot_1.png', 'Data fit' + warning)
Ejemplo n.º 3
0
    def test_golden(self):
        x = optimize.golden(self.fun)
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.golden(self.fun, brack=(-3, -2))
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.golden(self.fun, full_output=True)
        assert_allclose(x[0], self.solution, atol=1e-6)

        x = optimize.golden(self.fun, brack=(-15, -1, 15))
        assert_allclose(x, self.solution, atol=1e-6)
Ejemplo n.º 4
0
    def test_golden(self):
        x = optimize.golden(self.fun)
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.golden(self.fun, brack=(-3, -2))
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.golden(self.fun, full_output=True)
        assert_allclose(x[0], self.solution, atol=1e-6)

        x = optimize.golden(self.fun, brack=(-15, -1, 15))
        assert_allclose(x, self.solution, atol=1e-6)
Ejemplo n.º 5
0
def BF5(x):
    it = 0
    flag = True
    B = np.identity(x.shape[0])
    while (flag):
        p = pp(B, df5(x))
        a = sopt.golden(f5d, args=(
            x,
            p,
        ))
        xo = x
        xn = x + a * p
        s = ss(xo, xn)
        y = yy5(xo, xn)
        B = BB(B, y, s)
        x = xn
        it = it + 1
        # print('it=',it,'\n')
        # print('p=',p,'\n')
        # print('a=',a,'\n')
        # print('x=',x,'\n')
        x1.append(x[0])
        x2.append(x[1])
        pv.append(np.linalg.norm(df5(x)))
        if (con5(x) < e or it >= mi):
            flag = False
            print(it, '\n')
            return x
Ejemplo n.º 6
0
def _inverse_analytic_encircled_energy(fno,
                                       wavelength,
                                       energy=FIRST_AIRY_ENCIRCLED):
    def optfcn(x):
        return (_analytical_encircled_energy(fno, wavelength, x) - energy)**2

    return optimize.golden(optfcn)
Ejemplo n.º 7
0
def line_search(x1,x2,n):
    x = Symbol('x'); y = Symbol('y')
    p1 = Symbol('p1'); p2 = Symbol('p2')
    alpha = Symbol('alpha') 
    x_k = Matrix([[x1], [x2]])
    for i in range(n):

        R =  Res(x_k[0],x_k[1])
        J =  Jacob(x_k[0],x_k[1])
        p =  linear_system(J,R)
        #a = Diff(x_k[0],x_k[1],p[0],p[1])
        #pprint (a)
        
        #y = solve(a,alpha)
        #print y
        #total = []
        #for root in y:
            #if "I" not in str(root):
                #total.append(root.evalf())
        #alpha = min(total)
        alpha = golden(lambda z: f(x_k[0],x_k[1],p[0],p[1],z))
        #alpha = basinhopping(lambda z: f(x_k[0],x_k[1],p[0],p[1],z),0)
        x = x_k + p*alpha
        f_ = R.T*R
        #print 'x_k[0],x_k[1],f[0],alpha'
        print  i,x_k[0],x_k[1],f_[0],alpha
        x_k = x
    return       
Ejemplo n.º 8
0
def get_response_content(fs):
    # read the alignment
    try:
        alignment = Fasta.Alignment(fs.fasta.splitlines())
    except Fasta.AlignmentError as e:
        raise HandlingError('fasta alignment error: ' + str(e))
    if alignment.get_sequence_count() != 2:
        raise HandlingError('expected a sequence pair')
    # read the rate matrix
    R = fs.matrix
    # read the ordered states
    ordered_states = Util.get_stripped_lines(fs.states.splitlines())
    if len(ordered_states) != len(R):
        msg_a = 'the number of ordered states must be the same '
        msg_b = 'as the number of rows in the rate matrix'
        raise HandlingError(msg_a + msg_b)
    if len(set(ordered_states)) != len(ordered_states):
        raise HandlingError('the ordered states must be unique')
    # create the rate matrix object using the ordered states
    rate_matrix_object = RateMatrix.RateMatrix(R.tolist(), ordered_states) 
    # create the objective function
    objective = Objective(alignment.sequences, rate_matrix_object)
    # Use golden section search to find the mle distance.
    # The bracket is just a suggestion.
    bracket = (0.51, 2.01)
    mle_distance = optimize.golden(objective, brack=bracket)
    # write the response
    out = StringIO()
    print >> out, 'maximum likelihood distance:', mle_distance
    #distances = (mle_distance, 0.2, 2.0, 20.0)
    #for distance in distances:
        #print >> out, 'f(%s): %s' % (distance, objective(distance))
    return out.getvalue()
def steepest_descent():
    X_0 = np.array([10.0, -3.0, 0.0])
    num_itr = 128
    X = X_0
    alpha_0 = 1e-2 * np.ones(3)
    prev_G = test_grad(X)
    prev_alpha = alpha_0
    prev_J = test_obj(X)
    eps_a, eps_r, eps_g = 1e-3, 1e-3, 1e-3

    for i in range(num_itr):

        print("Itr", i, "X", X, "obj function", test_obj(X), "gradient", test_grad(X))

        J = test_obj(X)
        G = test_grad(X)
        S = -G/(np.linalg.norm(G))

        # if np.abs(J - prev_J) > eps_a + eps_r * np.abs(prev_J) or np.linalg.norm(G) > eps_g:
        #     print("Converged!")
        #     break
        def f1d(alpha):
            return test_obj(X + alpha*S)
        alpha = sopt.golden(f1d)
        print("Alpha: ", alpha)
        # alpha = prev_alpha * (prev_G.T @ prev_G)/(G.T @ G)
        X += alpha*S
        prev_alpha = alpha
        prev_G = G
        prev_J = J

    pass
Ejemplo n.º 10
0
def imscale(data, levels, y1):
    global n, x0, x1, x2
    x0, x1, x2 = levels
    if y1 == 0.5:
        k = (x2 - 2 * x1 + x0) / float(x1 - x0)**2
    else:
        n = 1 / y1
        k = abs(golden(da))

    r1 = np.log10(k * (x2 - x0) + 1)

    v = np.ravel(data)
    v = get_clip(v, 0, None)

    d = k * (v - x0) + 1
    d = get_clip(d, 1e-30, None)

    z = np.log10(d) / r1
    z = np.clip(z, 0, 1)
    z.shape = data.shape

    z = z * 255
    z = z.astype('uint8')

    return z
Ejemplo n.º 11
0
    def fit_optimize_gcv(self,
                         y,
                         x=None,
                         weights=None,
                         tol=1.0e-03,
                         brack=(-100, 20)):
        """
        Fit smoothing spline trying to optimize GCV.

        Try to find a bracketing interval for scipy.optimize.golden
        based on bracket.

        It is probably best to use target_df instead, as it is
        sometimes difficult to find a bracketing interval.

        INPUTS:
           y       -- response variable
           x       -- if None, uses self.x
           df      -- target degrees of freedom
           weights -- optional array of weights
           tol     -- (relative) tolerance for convergence
           brack   -- an initial guess at the bracketing interval

        OUTPUTS: None
           The smoothing spline is determined by self.coef,
           subsequent calls of __call__ will be the smoothing spline.

        """
        def _gcv(pen, y, x):
            self.fit(y, x=x, pen=np.exp(pen))
            a = self.gcv()
            return a

        a = golden(_gcv, args=(y, x), brack=brack, tol=tol)
Ejemplo n.º 12
0
def get_mle_rates(tree, alignment, rate_matrix):
    """
    @param tree: a tree with branch lengths
    @param alignment: a nucleotide alignment
    @param rate_matrix: a nucleotide rate matrix object
    @return: a list giving the maximum likelihood rate for each column
    """
    # define the objective function
    objective_function = Objective(tree, rate_matrix)
    # create the cache so each unique column is evaluated only once
    column_to_mle_rate = {}
    # look for maximum likelihood rates
    mle_rates = []
    for column in alignment.columns:
        column_tuple = tuple(column)
        if column_tuple in column_to_mle_rate:
            mle_rate = column_to_mle_rate[column_tuple]
        else:
            if len(set(column)) == 1:
                # If the column is homogeneous
                # then we know that the mle rate is zero.
                mle_rate = 0
            else:
                # redecorate the tree with nucleotide states at the tips
                name_to_state = dict(zip(alignment.headers, column))
                for tip in tree.gen_tips():
                    tip.state = name_to_state[tip.name]
                # Get the maximum likelihood rate for the column
                # using a golden section search.
                # The bracket is just a suggestion.
                bracket = (0.51, 2.01)
                mle_rate = optimize.golden(objective_function, brack=bracket)
            column_to_mle_rate[column_tuple] = mle_rate
        mle_rates.append(mle_rate)
    return mle_rates
Ejemplo n.º 13
0
def imscale2(data, levels, y1):
    # x0, x1, x2  YIELD  0, y1, 1,  RESPECTIVELY
    # y1 = noiselum
    global n, x0, x1, x2  # So that golden can use them
    #print 'data', data
    #print 'levels', levels
    # Normalize?  No.  Unless the data is all ~1e-40 or something...
    #data = data / levels[-1]
    #levels = array(levels) / levels[-1]
    x0, x1, x2 = levels  
    if y1 == 0.5:
        k = (x2 - 2 * x1 + x0) / float(x1 - x0) ** 2
    else:
        n = 1 / y1
        #print 'n x0 x1 x2', n, x0, x1, x2
        #k = golden(da)
        k = abs(golden(da))
        #print 'k', k
        #pause()
    r1 = log10( k * (x2 - x0) + 1)
    v = ravel(data)
    v = clip2(v, 0, None)
    d = k * (v - x0) + 1
    d = clip2(d, 1e-30, None)
    z = log10(d) / r1
    z = clip(z, 0, 1)
    z.shape = data.shape
    z = z * 255
    #z = z.astype(int)
    z = z.astype(uint8)
    return z
Ejemplo n.º 14
0
def bfgs(x0, errors=None, xhistory=None):
    x = x0.copy()
    B = np.eye(2)
    C = np.eye(2)
    
    for k in range(100):

        s = -C @ df(x)

        def f1d(alpha):
            return f(x + alpha*s)
        alpha = sopt.golden(f1d)

        xnew = x + alpha * s
        
        y = df(xnew) - df(x)
        
        Bnew = B + (1/np.dot(y, s))*np.outer(y, y) - (1/np.dot(B@s, s))*np.outer(B@s, B@s)
        
        u = s - C @ y
        Cnew = C + (1/np.dot(s,y))*np.outer(u, s) + (1/np.dot(s,y))*np.outer(s, u) - (np.dot(y,u)/np.dot(s,y)**2)*np.outer(s,s)
        
        B = Bnew
        x = xnew
        C = Cnew

        errors.append(np.linalg.norm(x - xstar))
        xhistory.append(x)
        if errors[-1] < 1e-12:
            return x
    return x
Ejemplo n.º 15
0
    def fit_optimize_gcv(self, y, x=None, weights=None, tol=1.0e-03,
                         brack=(-100,20)):
        """
        Fit smoothing spline trying to optimize GCV.

        Try to find a bracketing interval for scipy.optimize.golden
        based on bracket.

        It is probably best to use target_df instead, as it is
        sometimes difficult to find a bracketing interval.

        INPUTS:
           y       -- response variable
           x       -- if None, uses self.x
           df      -- target degrees of freedom
           weights -- optional array of weights
           tol     -- (relative) tolerance for convergence
           brack   -- an initial guess at the bracketing interval

        OUTPUTS: None
           The smoothing spline is determined by self.coef,
           subsequent calls of __call__ will be the smoothing spline.

        """

        def _gcv(pen, y, x):
            self.fit(y, x=x, pen=np.exp(pen))
            a = self.gcv()
            return a

        a = golden(_gcv, args=(y,x), brack=bracket, tol=tol)
Ejemplo n.º 16
0
def minimize_lambda(a, b, f_a_b, f_grad_a, f_grad_b):
    # argmin l: F(x + l * grad F(x))
    # argmin l: F(a + l * grad_a(x), b + l * grad_b(x))

    min_func = lambda l: f_a_b(a - l * f_grad_a(a, b), b - l * f_grad_b(a, b))
    # _, _, _, argmim, _ = dm.golden_search(lambda l: f_a_b(a - l * f_grad_a(a, b), b - l * f_grad_b(a, b)), -1, 1)
    # return argmim
    return optimize.golden(min_func, brack=(-1, 1))
Ejemplo n.º 17
0
def sensitivityAnalysis(parameter,values,is_time_vary):
    '''
    Perform a sensitivity analysis by varying a chosen parameter over given values
    and re-estimating the model at each.  Only works for perpetual youth version.
    Saves numeric results in a file named SensitivityPARAMETER.txt.
    
    Parameters
    ----------
    parameter : string
        Name of an attribute/parameter of cstwMPCagent on which to perform a
        sensitivity analysis.  The attribute should be a single float.
    values : [np.array]
        Array of values that the parameter should take on in the analysis.
    is_time_vary : boolean
        Indicator for whether the parameter of analysis is time_varying (i.e. 
        is an element of cstwMPCagent.time_vary).  While the sensitivity analysis
        should only be used for the perpetual youth model, some parameters are
        still considered "time varying" in the consumption-saving model and 
        are encapsulated in a (length=1) list.
        
    Returns
    -------
    none
    '''
    fit_list = []
    DiscFac_list = []
    nabla_list = []
    kappa_list = []
    for value in values:
        print('Now estimating model with ' + parameter + ' = ' + str(value))
        Params.diff_save = 1000000.0
        old_value_storage = []
        for this_type in est_type_list:
            old_value_storage.append(getattr(this_type,parameter))
            if is_time_vary:
                setattr(this_type,parameter,[value])
            else:
                setattr(this_type,parameter,value)
            this_type.update()
        output = golden(betaDistObjective,brack=bracket,tol=10**(-4),full_output=True)
        nabla = output[0]
        fit = output[1]
        DiscFac = Params.DiscFac_save
        kappa = calcKappaMean(DiscFac,nabla)
        DiscFac_list.append(DiscFac)
        nabla_list.append(nabla)
        fit_list.append(fit)
        kappa_list.append(kappa)
    with open('./Results/Sensitivity' + parameter + '.txt','w') as f:
        my_writer = csv.writer(f, delimiter='\t',)
        for j in range(len(DiscFac_list)):
            my_writer.writerow([values[j], kappa_list[j], DiscFac_list[j], nabla_list[j], fit_list[j]])
        f.close()
    j = 0
    for this_type in est_type_list:
        setattr(this_type,parameter,old_value_storage[j])
        this_type.update()
        j += 1   
Ejemplo n.º 18
0
def sensitivityAnalysis(parameter,values,is_time_vary):
    '''
    Perform a sensitivity analysis by varying a chosen parameter over given values
    and re-estimating the model at each.  Only works for perpetual youth version.
    Saves numeric results in a file named SensitivityPARAMETER.txt.

    Parameters
    ----------
    parameter : string
        Name of an attribute/parameter of cstwMPCagent on which to perform a
        sensitivity analysis.  The attribute should be a single float.
    values : [np.array]
        Array of values that the parameter should take on in the analysis.
    is_time_vary : boolean
        Indicator for whether the parameter of analysis is time_varying (i.e.
        is an element of cstwMPCagent.time_vary).  While the sensitivity analysis
        should only be used for the perpetual youth model, some parameters are
        still considered "time varying" in the consumption-saving model and
        are encapsulated in a (length=1) list.

    Returns
    -------
    none
    '''
    fit_list = []
    DiscFac_list = []
    nabla_list = []
    kappa_list = []
    for value in values:
        print('Now estimating model with ' + parameter + ' = ' + str(value))
        Params.diff_save = 1000000.0
        old_value_storage = []
        for this_type in est_type_list:
            old_value_storage.append(getattr(this_type,parameter))
            if is_time_vary:
                setattr(this_type,parameter,[value])
            else:
                setattr(this_type,parameter,value)
            this_type.update()
        output = golden(betaDistObjective,brack=bracket,tol=10**(-4),full_output=True)
        nabla = output[0]
        fit = output[1]
        DiscFac = Params.DiscFac_save
        kappa = calcKappaMean(DiscFac,nabla)
        DiscFac_list.append(DiscFac)
        nabla_list.append(nabla)
        fit_list.append(fit)
        kappa_list.append(kappa)
    with open('./Results/Sensitivity' + parameter + '.txt','w') as f:
        my_writer = csv.writer(f, delimiter='\t',)
        for j in range(len(DiscFac_list)):
            my_writer.writerow([values[j], kappa_list[j], DiscFac_list[j], nabla_list[j], fit_list[j]])
        f.close()
    j = 0
    for this_type in est_type_list:
        setattr(this_type,parameter,old_value_storage[j])
        this_type.update()
        j += 1
Ejemplo n.º 19
0
    def fit(self, y, x=None, weights=None, tol=1.0e-03,
            bracket=(0,1.0e-03)):
    
        def _gcv(pen, y, x):
            smoothing_spline.fit(y, x=x, pen=N.exp(pen), weights=weights)
            a = self.gcv()
            return a

        a = golden(_gcv, args=(y,x), brack=(-100,20), tol=tol)
Ejemplo n.º 20
0
 def optimize(self):
     '''
     Main function to optimize theta using theta likelihood function, according to Ewens
     model.
     '''
     theta_like   = lambda x: -self._ewens_theta_likelihood (x)
     self._parameters['theta'] = golden (theta_like, brack=[.01/self.community.J, self.community.J])
     self._parameters['m']     = self.theta / self.community.J / 2
     self._parameters['I']     = self.m * (self.community.J - 1) / (1 - self.m)
     self._lnL                = self.likelihood (self.theta)
Ejemplo n.º 21
0
def tps_rpm_zrot(x_nd, y_md, n_iter = 5, reg_init = .1, reg_final = .001, rad_init = .2, rad_final = .001, plotting = False, verbose=True):
    """
    Do tps_rpm algorithm for each z angle rotation
    Then don't reestimate affine part in tps optimization
    """
    n,d = x_nd.shape
    regs = loglinspace(reg_init, reg_final, n_iter)
    rads = loglinspace(rad_init, rad_final, n_iter)
    zrots = np.linspace(-np.pi/3, pi/3, 7)

    displacement = np.median(y_md,axis=0) - np.median(x_nd, axis=0) 
    
    costs = []
    
    if plotting:
        plotter = FuncPlotter()
        
    zrot2func = {}
    def fit0(zrot):
        f = ThinPlateSplineFixedRot(np.array([[cos(zrot), sin(zrot), 0], [-sin(zrot), cos(zrot), 0], [0, 0, 1]]))        
        f.a_Dd[3, :3] = displacement
        
        for i in xrange(n_iter):
    
            if f.d==2 and i%plotting==0: 
                import matplotlib.pyplot as plt            
                plt.clf()
    
            xwarped_nd = f.transform_points(x_nd)
            corr_nm = calc_correspondence_matrix(xwarped_nd, y_md, r=rads[i], p=.2)
            
            wt_n = corr_nm.sum(axis=1)
            targ_nd = np.dot(corr_nm/wt_n[:,None], y_md)
            f.fit(x_nd, targ_nd, regs[i], wt_n = wt_n, verbose=verbose)
    
            if plotting and i%plotting==0:
                plot_orig_and_warped_clouds(f.transform_points, x_nd, y_md)

        print "zrot: %.3e, cost: %.3e,  meancorr: %.3e"%(zrot, f.cost, corr_nm.mean())
        cost = abs(zrot)/2 + f.cost
        if plotting: plotter.addpt(zrot, cost)
        zrot2func[zrot] = f
        return cost
    
    
    costs = [fit0(zrot) for zrot in zrots]
    i_best = np.argmin(costs)

    import scipy.optimize as so
    zspacing = zrots[1] - zrots[0]  
    print (zrots[i_best] - zspacing, zrots[i_best], zrots[i_best] + zspacing)
    zrot_best = so.golden(fit0, brack = (zrots[i_best] - zspacing, zrots[i_best], zrots[i_best] + zspacing), tol = .15)
    f_best = zrot2func[zrot_best]
    return f_best
Ejemplo n.º 22
0
def gauss_method(iter_count, a, b, f, f_grad_a, f_grad_b):
    current_iter = 0
    points = [(a, b)]
    ls = [-1]
    while True:
        cur_a, cur_b = points[-1]
        if len(points) % 2 == 0:
            l = optimize.golden(lambda l1: f(l1, cur_b), brack=(-1, 1))
            next_a = l
            next_b = cur_b
        else:
            l = optimize.golden(lambda l1: f(cur_a, l1), brack=(-1, 1))
            next_a = cur_a
            next_b = l
        ls.append(l)
        points.append((next_a, next_b))
        current_iter += 1
        if abs(f(cur_a, cur_b) - f(next_a, next_b)) < EPS or current_iter > iter_count:
            break
    return points, ls, current_iter
Ejemplo n.º 23
0
	def optimizeWidth(self,aperture):
		self.propagateToGrating(aperture.wavelength)
		def f(width):
			aperture.width = width
			setAperturePoints(aperture,21)
			setCosMode(aperture)
			propagateTo(aperture)
			a = fractionCoupledInto(aperture)
			print 'fraction =', a
			return -fractionCoupledInto(aperture)
		return optimize.golden(f,brack = (100e-9,10e-6), tol=1e-6)		
    def best_value_indices(self, indices, key='loss', assign_at_end=False,
                           give_history=True):
        """Given indices, use Golden Ratio search to compute the best model_tensor[indices]."""

        # remember the original value
        orig_value = self.get_value(indices=indices)

        # function to compute the loss
        loss_closure = self.get_closure_loss(indices=indices, key=key)

        orig_loss = loss_closure(orig_value)['loss']

        # history of metrics
        history = []

        def closure_with_history(x, give_history=give_history):
            """Given a value, compute the loss, store the value (optionally) and return by key."""
            result = loss_closure(x)

            if give_history:
                history.append(result['metrics'])

            return result['loss']

        params = {x: y for x, y in self.golden_params.items()}
        if 'smartbracket' in self.golden_params:
            del params['smartbracket']
            params['brack'] = tuple([orig_value + t for t in self.golden_params['smartbracket']])

        # running optimization
        best_value, best_loss, iterations = golden(closure_with_history,
                                                   full_output=True, **params)

        # restoring the value if requested
        if not assign_at_end:
            self.set_value(indices=indices, val=orig_value)
        else:
            if best_loss < orig_loss:
                self.set_value(indices=indices, val=best_value)
            else:
                if not self.silent:
                    logging.warning(f"No improvement {indices} {key} {assign_at_end}")
                self.set_value(indices=indices, val=orig_value)
                best_value = orig_value
                best_loss = orig_loss

        return {
            'assigned': assign_at_end,
            'history': history,
            'orig_value_param': orig_value,
            'best_value': best_value,
            'best_loss': best_loss,
            'iterations': iterations
        }
def target_to_kappa(d, alpha, beta, x):
    target, val = d
    N = x.shape[0]
    def obj(kappa):
        exp_edges = np.sum(edge_probabilities(alpha, beta, kappa, x))
        if target == 'degree':
            exp_degree = exp_edges / (1.0 * N)
            return abs(exp_degree - val)
        elif target == 'density':
            exp_density = exp_edges / (1.0 * N ** 2)
            return abs(exp_density - val)
    return opt.golden(obj)
Ejemplo n.º 26
0
def bfgs(obj, grad, hessian, X_0, eps_a=1e-12, eps_r=1e-16, eps_g=1e-8, num_itr=500):

    X = X_0
    B_inv_prev = np.linalg.pinv(hessian(X_0))
    # H = hessian(rosen)
    # B_inv_prev = H(X)s
    # print(B_inv_prev)
    # B_prev = None
    G = grad(X)
    alpha_min = 1e-8
    for i in range(num_itr):

        print("Itr", i, "X", X, "obj function", obj(X), "gradient", G)

        if np.linalg.norm(G) < eps_g:
            print("converged")
            break

        p = -(B_inv_prev @ G)
        alpha = sopt.golden(lambda t: obj(X + t*p), maxiter=1000)
        # alpha = sopt.line_search(obj, grad, X, p, maxiter=1000000)
        # alpha = newtons_method(grad, hessian, X, p, 10)
        
        # alpha = max(alpha, alpha_min)
        # alpha = gss(obj, X, p)
        # print(alpha)
        # alpha, _, _ = strongwolfe(obj, grad, p, X, obj(X), grad(X))
        s = alpha * p
        X_next = X + s
        lhs = np.abs(room.objective_function(X) - room.objective_function(X_next))
        rhs = eps_r*room.objective_function(X)
        # print('conv check: ', lhs, rhs)
        # if lhs < rhs:
        #     print("converged")
        #     break
        # if np.linalg.norm(G) < 1e-5:
        #     print("converged")
        #     break

        # print("Itr", i, "X_next", X_next, "alpha", alpha, "p", p)

        G_next = grad(X_next)
        y = G_next - G
        sy = s.T @ y
        # print(sy)
        second = ((sy + y.T @ B_inv_prev @ y)/(sy*sy))*(s @ s.T)
        third = ((B_inv_prev @ y @ s.T) + (s @ (y.T @ B_inv_prev)))/sy
        B_inv_prev = B_inv_prev + second - third

        X = X_next
        G = G_next

    return X
Ejemplo n.º 27
0
def steepest_descent(x, beta, gamma, theta=0.001):
    while True:
        dx = dPsi(x, beta, gamma)

        def aux(p):
            return (Psi(x - p * dx, beta, gamma))

        p_opt = sopt.golden(aux)
        if (Psi(x, beta, gamma) - Psi(x - p_opt * dx, beta, gamma) < theta):
            break
        x = x - p_opt * dx
    return x
Ejemplo n.º 28
0
def steepest_descent(poits: list, epsx: np.float, s: np.ndarray = np.nan, alpha_opt: np.float = np.nan,
                     counter=1) -> np.ndarray:
    def f1d(alpha):
        return fun(x + alpha*s)
    x = poits[-1]
    s = -dfun(x)
    alpha_opt = sopt.golden(f1d)
    next_guess = x + alpha_opt * s
    poits.append(next_guess)

    if(abs(next_guess - poits[-2])[0] < epsx and abs(next_guess - poits[-2])[1] < epsx):
        return poits
    return steepest_descent(poits, epsx, s, alpha_opt, counter+1)
Ejemplo n.º 29
0
    def ee_radius(self, energy=FIRST_AIRY_ENCIRCLED):
        """Radius associated with a certain amount of enclosed energy."""
        k, v = list(self._ee.keys()), list(self._ee.values())
        if energy in v:
            idx = v.index(energy)
            return k[idx]

        def optfcn(x):
            return (self.encircled_energy(x) - energy)**2

        # golden seems to perform best in presence of shallow local minima as in
        # the encircled energy
        return optimize.golden(optfcn)
Ejemplo n.º 30
0
def target_to_kappa(d, alpha, beta, x):
    target, val = d
    N = x.shape[0]

    def obj(kappa):
        exp_edges = np.sum(edge_probabilities(alpha, beta, kappa, x))
        if target == 'degree':
            exp_degree = exp_edges / (1.0 * N)
            return abs(exp_degree - val)
        elif target == 'density':
            exp_density = exp_edges / (1.0 * N**2)
            return abs(exp_density - val)

    return opt.golden(obj)
Ejemplo n.º 31
0
    def fit(self, using='likelihood', bins=None, iprint=0, opt='golden'):
        '''
        
    >>  Fit the kernel length scale parameter
        [ epsilon ] 
    
    
    >>  Possible keyword arguments and default values:
        
        using : string
            Fit by either maximising the log likelihood function
            i.e. using = 'likelihood'
            OR
            by minimising the cross-validation error
            i.e. using = 'cross-validate'
            
        bins : integer
            Only if:
            using = 'cross-validate'
            The number of random cross-vaildation sample bins used to calculate
            the cross-validation error
        
        '''

        if using == 'cross-validate':
            # minimise the cross validation error
            f_obj = lambda x: self.cross_validate(
                abs(self.epsilon + x), bins, iprint=iprint)
        else:
            # maximise the log likelihood function
            f_obj = lambda x: -self.log_likelihood(abs(self.epsilon + x),
                                                   iprint=iprint)

        from scipy.optimize import fmin_slsqp, golden

        if opt == 'golden':
            xopt = golden(f_obj)
        else:
            xopt = fmin_slsqp(f_obj, 0, iprint=iprint)

        self.kwdict['epsilon'] = abs(self.epsilon + xopt)

        # set the RBF functions
        self._set_funct()
        #
        # calculate the weights
        self._calc_weights()
Ejemplo n.º 32
0
def get_response_content(fs):
    """
    @param fs: a FieldStorage object containing the cgi arguments
    @return: a (response_headers, response_text) pair
    """
    # read the alignment
    try:
        alignment = Fasta.Alignment(StringIO(fs.fasta))
    except Fasta.AlignmentError as e:
        raise HandlingError('fasta alignment error: ' + str(e))
    if alignment.get_sequence_count() < 2:
        raise HandlingError('expected at least two sequences')
    # read the rate matrix
    R = fs.matrix
    # read the ordered states
    ordered_states = Util.get_stripped_lines(StringIO(fs.states))
    if len(ordered_states) != len(R):
        msg_a = 'the number of ordered states must be the same '
        msg_b = 'as the number of rows in the rate matrix'
        raise HandlingError(msg_a + msg_b)
    if len(set(ordered_states)) != len(ordered_states):
        raise HandlingError('the ordered states must be unique')
    # create the rate matrix object using the ordered states
    rate_matrix_object = RateMatrix.RateMatrix(R.tolist(), ordered_states)
    # create the distance matrix
    n = alignment.get_sequence_count()
    row_major_distance_matrix = [[0] * n for i in range(n)]
    for i, sequence_a in enumerate(alignment.sequences):
        for j, sequence_b in enumerate(alignment.sequences):
            if i < j:
                # create the objective function using the sequence pair
                objective = Objective((sequence_a, sequence_b),
                                      rate_matrix_object)
                # Use golden section search to find the mle distance.
                # The bracket is just a suggestion.
                bracket = (0.51, 2.01)
                mle_distance = optimize.golden(objective, brack=bracket)
                # fill two elements of the matrix
                row_major_distance_matrix[i][j] = mle_distance
                row_major_distance_matrix[j][i] = mle_distance
    # write the response
    out = StringIO()
    print >> out, 'maximum likelihood distance matrix:'
    print >> out, MatrixUtil.m_to_string(row_major_distance_matrix)
    return out.getvalue()
Ejemplo n.º 33
0
def get_response_content(fs):
    """
    @param fs: a FieldStorage object containing the cgi arguments
    @return: a (response_headers, response_text) pair
    """
    # read the alignment
    try:
        alignment = Fasta.Alignment(StringIO(fs.fasta))
    except Fasta.AlignmentError as e:
        raise HandlingError('fasta alignment error: ' + str(e))
    if alignment.get_sequence_count() < 2:
        raise HandlingError('expected at least two sequences')
    # read the rate matrix
    R = fs.matrix
    # read the ordered states
    ordered_states = Util.get_stripped_lines(StringIO(fs.states))
    if len(ordered_states) != len(R):
        msg_a = 'the number of ordered states must be the same '
        msg_b = 'as the number of rows in the rate matrix'
        raise HandlingError(msg_a + msg_b)
    if len(set(ordered_states)) != len(ordered_states):
        raise HandlingError('the ordered states must be unique')
    # create the rate matrix object using the ordered states
    rate_matrix_object = RateMatrix.RateMatrix(R.tolist(), ordered_states) 
    # create the distance matrix
    n = alignment.get_sequence_count()
    row_major_distance_matrix = [[0]*n for i in range(n)]
    for i, sequence_a in enumerate(alignment.sequences):
        for j, sequence_b in enumerate(alignment.sequences):
            if i < j:
                # create the objective function using the sequence pair
                objective = Objective(
                        (sequence_a, sequence_b), rate_matrix_object)
                # Use golden section search to find the mle distance.
                # The bracket is just a suggestion.
                bracket = (0.51, 2.01)
                mle_distance = optimize.golden(objective, brack=bracket)
                # fill two elements of the matrix
                row_major_distance_matrix[i][j] = mle_distance
                row_major_distance_matrix[j][i] = mle_distance
    # write the response
    out = StringIO()
    print >> out, 'maximum likelihood distance matrix:'
    print >> out, MatrixUtil.m_to_string(row_major_distance_matrix)
    return out.getvalue()
Ejemplo n.º 34
0
def sd(x0, errors=None, xhistory=None):
    x = x0.copy()
    
    for k in range(1000):

        s = -df(x)

        def f1d(alpha):
            return f(x + alpha*s)
        alpha = sopt.golden(f1d)

        x = x + alpha * s

        errors.append(np.linalg.norm(x - xstar))
        xhistory.append(x)
        if errors[-1] < 1e-12:
            return x
    return x
Ejemplo n.º 35
0
	def findPeakTransmissionAngleAt(self,wavelength,aperture):
		self.propagateToGrating(wavelength)
		print 'Ed',np.abs(self.grating.E)
		N = su.fwhm(np.abs(self.grating.E))
		deltaLambdaEff = wavelength/(self.order*N)/self.neff(wavelength)
		a = self.grating.pitch
		m = self.order
		lambdaEff = wavelength/self.neff(wavelength)
		inputAngle = self.input.angle
		startAngle = np.arcsin(m/a*(lambdaEff-deltaLambdaEff/2)+np.sin(inputAngle))
		endAngle   = np.arcsin(m/a*(lambdaEff+deltaLambdaEff/2)+np.sin(inputAngle))
		def f(angle):
			aperture.setAngle(angle+np.pi)
			self.setApertureCenterOnRowlandCircle(aperture)
			aperture.makePoints()
			self.propagateTo(aperture)
			return -self.fractionCoupledInto(aperture)
		return optimize.golden(f,brack = (startAngle,endAngle), tol=1e-6,full_output=True)
Ejemplo n.º 36
0
def main():
    # Test functions for all of the problems.
    plt.ion()
    g = lambda x: np.exp(x) - 4 * x
    a = 0
    b = 3
    domain = np.linspace(a, b, 200)
    # Plot prob1
    #plt.plot(domain, g(domain))
    print("Problem 1: ")
    print(golden_section(g, a, b, maxiters=100))
    print(opt.golden(g, brack=(0, 3), tol=1e-5))

    dfn = lambda x: 2 * x + 5 * np.cos(5 * x)
    d2fn = lambda x: 2 - 25 * np.sin(5 * x)
    print("\nProblem 2: ")
    print(newton1d(dfn, d2fn, 0, tol=1e-10, maxiters=500))
    print(opt.newton(dfn, x0=0, fprime=d2fn, tol=1e-10, maxiter=500))

    fs = lambda x: x**2 + np.sin(x) + np.sin(10 * x)
    dfs = lambda x: 2 * x + np.cos(x) + 10 * np.cos(10 * x)
    dom_s = np.linspace(-6, 0, 200)
    # Plot prob3
    plt.plot(dom_s, fs(dom_s))
    plt.grid()
    print("\nProblem 3: ")
    s = secant1d(dfs, 0, -1, tol=1e-10, maxiters=500)[0]
    n = opt.newton(dfs, x0=0, tol=1e-10, maxiter=500)
    print(s)
    print(n)
    print(fs(s), fs(n))

    fb = lambda x: x[0]**2 + x[1]**2 + x[2]**2
    Dfb = lambda x: np.array([2 * x[0], 2 * x[1], 2 * x[2]])
    x = anp.array([150., .03, 40.])
    p = anp.array([-.5, -100., -4.5])
    phi = lambda alpha: fb(x + alpha * p)
    dphi = grad(phi)
    print("\nProblem 4: ")
    alpha, _ = opt.linesearch.scalar_search_armijo(phi, phi(0.), dphi(0.))
    print(alpha)
    print(backtracking(fb, Dfb, x, p))
Ejemplo n.º 37
0
def imscale2(data, levels, y1):
    # x0, x1, x2  YIELD  0, y1, 1,  RESPECTIVELY
    global n, x0, x1, x2  # So that golden can use them
    x0, x1, x2 = levels
    if y1 == 0.5:
        k = (x2 - 2 * x1 + x0) / float(x1 - x0)**2
    else:
        n = 1 / y1
        k = abs(golden(da))
    r1 = log10(k * (x2 - x0) + 1)
    v = ravel(data)
    v = clip2(v, 0, None)
    d = k * (v - x0) + 1
    d = clip2(d, 1e-30, None)
    z = log10(d) / r1
    z = clip(z, 0, 1)
    z.shape = data.shape
    z = z * 255
    z = z.astype(uint8)
    return z
Ejemplo n.º 38
0
def SD5(x):
    it = 0
    flag = True
    while (flag):
        p = -df5(x)
        a = sopt.golden(f5d, args=(x, ))
        x = x + a * p
        it = it + 1
        # print('it=',it,'\n')
        # print('p=',p,'\n')
        # print('a=',a,'\n')
        # print('x=',x,'\n')
        x1.append(x[0])
        x2.append(x[1])
        av.append(a)
        pv.append(np.linalg.norm(df5(x)))
        if (con5(x) < e or it >= mi):
            flag = False
            print(it, '\n')
            return x
Ejemplo n.º 39
0
def imscale2(data, levels, y1):
    # x0, x1, x2  YIELD  0, y1, 1,  RESPECTIVELY
    global n, x0, x1, x2  # So that golden can use them
    x0, x1, x2 = levels  
    if y1 == 0.5:
        k = (x2 - 2 * x1 + x0) / float(x1 - x0) ** 2
    else:
        n = 1 / y1
        k = abs(golden(da))
    r1 = log10( k * (x2 - x0) + 1)
    v = ravel(data)
    v = clip2(v, 0, None)
    d = k * (v - x0) + 1
    d = clip2(d, 1e-30, None)
    z = log10(d) / r1
    z = clip(z, 0, 1)
    z.shape = data.shape
    z = z * 255
    z = z.astype(uint8)
    return z
Ejemplo n.º 40
0
def limiting_z(apparent_mag, absolute_mag, k_corr=None):
    # solve for redshift of source given its apparent & absolute mags
    # use k-correction for an f_lambda standard: k = -2.5 log10(1./(1+z))
    # see Hogg 99 eqn. 27

    if k_corr is None:
        k_corr = lambda z: -2.5 * N.log10(1. / (1. + z))

    def f(z):
        if z > 0:
            # abs to use minimization routines rather than root finding
            return N.abs(absolute_mag +
                         cp.magnitudes.distance_modulus(z, **cp.fidcosmo) + k_corr(z) -
                         apparent_mag)
        else:
            # don't let it pass negative values
            return N.inf

    #res = brute(f, ((1e-8,10),), finish=fmin, full_output=True)
    res = golden(f)
    return res
Ejemplo n.º 41
0
    def autofocus(self, position):
        '''
        Autofocus on cell at the clicked position
        '''
        pixel_per_um = getattr(self.camera, 'pixel_per_um', None)
        if pixel_per_um is None:
            pixel_per_um = self.calibrated_unit.stage.pixel_per_um()[0]
        size = self.config.autofocus_size * pixel_per_um
        width, height = self.camera.width, self.camera.height

        x, y, z = position
        bracket = (z - 100., z + 100.
                   )  # 200 um around the current focus position

        def image_variance(z):
            self.microscope.absolute_move(z)
            sleep(self.config.autofocus_sleep)  # more?
            image = self.camera.snap()
            frame = image[int(y + height / 2 - size / 2):int(y + height / 2 +
                                                             size / 2),
                          int(x + width / 2 -
                              size / 2):int(x + width / 2 + size /
                                            2)]  # is there a third dimension?
            variance = frame.var()
            return -variance

        z = golden(image_variance, brack=bracket, tol=0.0001)
        #z = minimize_scalar(image_variance, bounds=bracket, tol=0.01, method='bounded')
        #zlist = arange(z-100., z+100.,20)
        #variances = -array([image_variance(z0) for z0 in zlist])
        #i = variances.argmax()
        #z = zlist[i]
        self.microscope.absolute_move(z)
        sleep(self.config.autofocus_sleep)

        relative_z = (z -
                      self.microscope.floor_Z) * self.microscope.up_direction

        self.debug('Focused at position {} above floor'.format(relative_z))
Ejemplo n.º 42
0
    def test_golden(self):
        x = optimize.golden(self.fun)
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.golden(self.fun, brack=(-3, -2))
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.golden(self.fun, full_output=True)
        assert_allclose(x[0], self.solution, atol=1e-6)

        x = optimize.golden(self.fun, brack=(-15, -1, 15))
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.golden(self.fun, tol=0)
        assert_allclose(x, self.solution)

        maxiter_test_cases = [0, 1, 5]
        for maxiter in maxiter_test_cases:
            x0 = optimize.golden(self.fun, maxiter=0, full_output=True)
            x = optimize.golden(self.fun, maxiter=maxiter, full_output=True)
            nfev0, nfev = x0[2], x[2]
            assert_equal(nfev - nfev0, maxiter)
Ejemplo n.º 43
0
    def test_golden(self):
        x = optimize.golden(self.fun)
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.golden(self.fun, brack=(-3, -2))
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.golden(self.fun, full_output=True)
        assert_allclose(x[0], self.solution, atol=1e-6)

        x = optimize.golden(self.fun, brack=(-15, -1, 15))
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.golden(self.fun, tol=0)
        assert_allclose(x, self.solution)

        maxiter_test_cases = [0, 1, 5]
        for maxiter in maxiter_test_cases:
            x0 = optimize.golden(self.fun, maxiter=0, full_output=True)
            x = optimize.golden(self.fun, maxiter=maxiter, full_output=True)
            nfev0, nfev = x0[2], x[2]
            assert_equal(nfev - nfev0, maxiter)
Ejemplo n.º 44
0
def get_mle_rates(tree, alignment, rate_matrix):
    """
    @param tree: a tree with branch lengths
    @param alignment: a nucleotide alignment
    @param rate_matrix: a nucleotide rate matrix object
    @return: a list giving the maximum likelihood rate for each column
    """
    # define the objective function
    objective_function = Objective(tree, rate_matrix)
    # create the cache so each unique column is evaluated only once
    column_to_mle_rate = {}
    # look for maximum likelihood rates
    mle_rates = []
    for column in alignment.columns:
        column_tuple = tuple(column)
        if column_tuple in column_to_mle_rate:
            mle_rate = column_to_mle_rate[column_tuple]
        else:
            if len(set(column)) == 1:
                # If the column is homogeneous
                # then we know that the mle rate is zero.
                mle_rate = 0
            else:
                # redecorate the tree with nucleotide states at the tips
                name_to_state = dict(zip(alignment.headers, column))
                for tip in tree.gen_tips():
                    tip.state = name_to_state[tip.name]
                # Get the maximum likelihood rate for the column
                # using a golden section search.
                # The bracket is just a suggestion.
                bracket = (0.51, 2.01)
                mle_rate = optimize.golden(
                        objective_function, brack=bracket)
            column_to_mle_rate[column_tuple] = mle_rate
        mle_rates.append(mle_rate)
    return mle_rates
Ejemplo n.º 45
0
init_path = np.array([(1.0, 1.0), (1.3, 4.5), (2.4, 3.5), (4.3, 5.1),
                      (5.2, 5.5), (5.8, 4.9), (6.8, 6.0), (7.9, 6.5),
                      (8.2, 7.5), (8.7, 9.3)],
                     dtype=np.float64)
obstacles = 10 * np.random.rand(k, 2)
r = obstacles
x = init_path
c2 = 1

print_list = [10, 25, 50, 100, 200, 400]

pt.figure(figsize=(8, 8))
pt.plot(x[:, 0], x[:, 1], label="Initial")

for iter in range(400):
    c1 = 1000 / (100 + iter)

    def f(a):
        arg = x - a * dobj(x, r, c1, c2)
        return (obj(arg, r, c1, c2))

    a = sopt.golden(f)
    x = x - a * dobj(x, r, c1, c2)
    if iter + 1 in print_list:
        label = 'iter = %s' % (iter + 1)
        pt.plot(x[:, 0], x[:, 1], label=label)

pt.plot(obstacles[:, 0], obstacles[:, 1], "o", markersize=5, label="Obstacles")
pt.legend(loc="best")
pt.show()
Ejemplo n.º 46
0
     param_range = [0.95,0.995]
     spread_range = [0.006,0.008]
 else:
     print('Parameter range for ' + Params.param_name + ' has not been defined!')
 
 if Params.do_param_dist:
     # Run the param-dist estimation
     paramDistObjective = lambda spread : findLorenzDistanceAtTargetKY(
                                                     Economy = EstimationEconomy,
                                                     param_name = Params.param_name,
                                                     param_count = Params.pref_type_count,
                                                     center_range = param_range,
                                                     spread = spread,
                                                     dist_type = Params.dist_type)
     t_start = clock()
     spread_estimate = golden(paramDistObjective,brack=spread_range,tol=1e-4)
     center_estimate = EstimationEconomy.center_save
     t_end = clock()
 else:
     # Run the param-point estimation only
     paramPointObjective = lambda center : getKYratioDifference(Economy = EstimationEconomy,
                                          param_name = Params.param_name,
                                          param_count = Params.pref_type_count,
                                          center = center,
                                          spread = 0.0,
                                          dist_type = Params.dist_type)
     t_start = clock()
     center_estimate = brentq(paramPointObjective,param_range[0],param_range[1],xtol=1e-6)
     spread_estimate = 0.0
     t_end = clock()
     
Ejemplo n.º 47
0
    return -1.0 * (x[0]**alpha)*(x[1]**(1-alpha))

x0 = array([.4,.4])
optX = opt.fmin_bfgs(reparam_utility, x0, args=(p,alpha))
reparam_utility(optX, p, alpha, printX=True)

def optim_target5(x, hyperparams):
    c1,c2,c3 = hyperparams
    return c1*x**2 + c2*x + c3

hyperp = array([1.0, -2.0, 3])
opt.fminbound(optim_target5, -10, 10, args=(hyperp,))
opt.fminbound(optim_target5, -10, 0, args=(hyperp,))

hyperp = array([1.0, -2.0, 3])
opt.golden(optim_target5, args=(hyperp,))
opt.golden(optim_target5, args=(hyperp,), brack=[-10.0,10.0])

opt.brent(optim_target5, args=(hyperp,))

def nlls_objective(beta, y, X):
    b0 = beta[0]
    b1 = beta[1]
    b2 = beta[2]
    return y - b0 - b1 * (X**b2)

X = 10 *rand(1000)
e = randn(1000)
y = 10 + 2 * X**(1.5) + e
beta0 = array([10.0,2.0,1.5])
opt.leastsq(nlls_objective, beta0, args = (y, X))
Ejemplo n.º 48
0
     if my_diff < Params.diff_save:
         Params.beta_save = beta_new
     return my_diff
 
 
 
 # =================================================================
 # ========= Estimating the model ==================================
 #==================================================================
 
 if Params.run_estimation:
     # Estimate the model and time it
     t_start = time()
     if Params.do_beta_dist:
         bracket = (0,0.015) # large nablas break IH version
         nabla = golden(betaDistObjective,brack=bracket,tol=10**(-4))        
         beta = Params.beta_save
         spec_name = spec_add + 'betaDist' + wealth_measure
     else:
         nabla = 0
         if Params.do_tractable:
             top = 0.991
         else:
             top = 1.0
         beta = brentq(betaPointObjective,0.90,top,xtol=10**(-8))
         spec_name = spec_add + 'betaPoint' + wealth_measure
     t_end = time()
     print('Estimate is beta=' + str(beta) + ', nabla=' + str(nabla) + ', took ' + str(t_end-t_start) + ' seconds.')
     #spec_name=None
     makeCSTWresults(beta,nabla,spec_name)
 
Ejemplo n.º 49
0
     if my_diff < Params.diff_save:
         Params.DiscFac_save = DiscFac_new
     return my_diff
 
 
 
 # =================================================================
 # ========= Estimating the model ==================================
 #==================================================================
 
 if Params.run_estimation:
     # Estimate the model and time it
     t_start = time()
     if Params.do_beta_dist:
         bracket = (0,0.015) # large nablas break IH version
         nabla = golden(betaDistObjective,brack=bracket,tol=10**(-4))        
         DiscFac = Params.DiscFac_save
         spec_name = spec_add + 'betaDist' + wealth_measure
     else:
         nabla = 0
         if Params.do_tractable:
             bot = 0.9
             top = 0.98
         else:
             bot = 0.9
             top = 1.0
         DiscFac = brentq(betaPointObjective,bot,top,xtol=10**(-8))
         spec_name = spec_add + 'betaPoint' + wealth_measure
     t_end = time()
     print('Estimate is DiscFac=' + str(DiscFac) + ', nabla=' + str(nabla) + ', took ' + str(t_end-t_start) + ' seconds.')
     #spec_name=None
Ejemplo n.º 50
0
iterates = [x0]
gradients = [df(x0)]
directions = [-df(x0)]


# In[183]:

# Evaluate this cell many times in-place

x = iterates[-1]
s = directions[-1]

def f1d(alpha):
    return f(x + alpha*s)

alpha_opt = sopt.golden(f1d)
next_x = x + alpha_opt*s

g = df(next_x)
last_g = gradients[-1]
gradients.append(g)

beta = np.dot(g, g)/np.dot(last_g, last_g)
directions.append(-g + beta*directions[-1])

print f(next_x)

iterates.append(next_x)

# plot function and iterates
pt.axis("equal")
Ejemplo n.º 51
0
# ([0.99999999999999978, 2.0], 0.0)

# =======================
# problem 3
# =======================
def f1d(alpha):
    return f(x0 + alpha*s)

epsilon=.0001
n=0
N=20
x0=[0,0]
while (abs(f(x0))>epsilon and n<N):
    n=n+1
    s = -df(x0)
    t_crit = sopt.golden(f1d) # Return the minimum of a function
    x1 = x0 + t_crit * s
    x0 = x1
    print(x0, f(x0))

# --------------- x ----------------, f(x)
# (array([ 1.10638298,  1.93617021]), 0.021276595744676996)
# (array([ 0.99881793,  1.99763597]), 2.5149427976600691e-05)

# =======================
# problem 4
# =======================
def f1d0(alpha):
    return f([x0[0]+alpha,x0[1]])

def f1d1(alpha):