Example #1
0
def opt_ps(data, obj_fun):
    obj_0 = lambda x, data: obj_fun([x,0], data)
    phase0 = minimize(obj_0, (0,), method='Nelder-Mead', args=(data,)).x[0]
    
    obj_1 = lambda x, data: obj_fun([phase0,x], data)
    phase1 = minimize(obj_1, (0,), method='Nelder-Mead', args=(data,)).x[0]
    return phase0*18000,phase1*18000
def blending_weight_optimize(predictions, labels, function_name):
    def loss_func(weights):
        final_prediction = 0
        for weight, prediction in zip(weights, predictions):
            final_prediction += weight * prediction
        return evaluation_functions.evaluate_function(labels, final_prediction, 'rmsle')

    if function_name == "cobyla":
        starting_values = [1.0] * len(predictions)
        # cons = ({'type':'eq','fun':lambda w: 1-sum(w)})
        res = minimize(loss_func, starting_values, method='COBYLA')
    elif function_name == "slsqp":
        starting_values = [1.0] * len(predictions)
        cons = ({'type': 'eq', 'fun': lambda w: 1 - sum(w)})
        bounds = [(0, 1)] * len(predictions)
        res = minimize(loss_func, starting_values, method='SLSQP',
                       bounds=bounds, constraints=cons)
    elif function_name == "tnc":
        starting_values = [1.0] * len(predictions)
        res = optimize.minimize(
            loss_func, starting_values, method='TNC', tol=1e-10)
    elif function_name == "":
        pass
    elif function_name == "":
        pass
    print res['x']
    return res['fun'], res['x']
Example #3
0
    def fitSAXS(self, q_obs, Iq_obs, sd_obs, p0=[0.1,1e-4], beamprofile=None):
        # create a local function to return Chi^2
        def chisq(par,obs,calc,sd):
            s = par[0]
            b = par[1]
            Nobs = obs.size
            scalc = s*calc + b # computed profile
            chi2  = (obs - scalc)**2 / sd**2
            return chi2.sum()/Nobs
        
        if not self.saxscalc:
            # compute profiles if not yet
            if beamprofile is not None:
                self.calc_profiles(q=q_obs, beamprofile=beamprofile)
            else:
                self.calc_profiles(q=q_obs)

            calc = self.Iq
            opt = minimize(chisq, p0, args=(Iq_obs,calc,sd_obs,),method='L-BFGS-B')
        
        elif self.saxscalc:
            opt = minimize(chisq, p0, args=(Iq_obs,calc,sd_obs,),method='L-BFGS-B')

        print "Chi-sq value {:7.3f}".format(opt.fun)

        self.saxs_scale = opt.x[0]
        self.saxs_bg    = opt.x[1]

        return opt.fun
Example #4
0
    def run(self):
        self.pybert.status = "Co-optimizing..."
        max_iter  = self.pybert.max_iter
        vals  = []
        if(self.pybert.pretap_tune_enable):
            vals.append(self.pybert.pretap_tune)
        if(self.pybert.posttap_tune_enable):
            vals.append(self.pybert.posttap_tune)
        if(self.pybert.posttap2_tune_enable):
            vals.append(self.pybert.posttap2_tune)
        if(self.pybert.posttap3_tune_enable):
            vals.append(self.pybert.posttap3_tune)
        vals.append(self.pybert.peak_mag_tune)
        vals.append(self.pybert.peak_freq_tune)

        cons = (
          { 'type': 'ineq',
              'fun' : lambda x: 1 - sum(abs(x[:-2]))
          },
          { 'type': 'ineq',
            'fun' : lambda x: gMaxCTLEPeak - x[-2]
          },
          { 'type': 'ineq',
            'fun' : lambda x: gMaxCTLEFreq - x[-1]
          },
        )

        if(gDebugOptimize):
            res  = minimize(do_coopt, vals, args=(self.pybert, ), constraints=cons,
                            options={'disp' : True, 'maxiter' : max_iter})
        else:
            res  = minimize(do_coopt, vals, args=(self.pybert, ), constraints=cons,
                            options={'disp' : False, 'maxiter' : max_iter})
        self.pybert.status = "Ready."
    def train(self, data):
        f = self.create_eval(data, self.C)
        g = self.create_gradient(data, self.C)
        method = 'L-BFGS-B'
        options = {
            'disp': False,
            'maxiter': np.inf,
            'maxfun': np.inf
        }
        w0 = np.zeros(data.p+1)
        x = data.x
        a = np.squeeze(data.a)
        b = np.squeeze(data.b)
        args = (x,a,b,self.C,self.max_value)
        results = optimize.minimize(f,w0,method=method,jac=g,options=options,args=args)
        compare_results = False
        if compare_results:
            results2 = optimize.minimize(f,w0,method=method,options=options,args=args)
            err = results.x - results2.x
            print 'Rel Error - w: ' + str(norm(err[1:])/norm(results2.x[1:]))
            print 'Rel Error - bias: ' + str(norm(err[0])/norm(results2.x[0]))
            print 'Rel Error - f(w*): ' + str(norm(results.fun-results2.fun)/norm(results2.fun))
            results = results2

        self.w = results.x[1:]
        self.b = results.x[0]
        pass
def solve():

    res_cons = optimize.minimize(loss, x0, jac=jac, constraints=cons, method="SLSQP", options=opt)

    res_uncons = optimize.minimize(loss, x0, jac=jac, method="SLSQP", options=opt)

    print "\nConstrained:"
    print res_cons

    print "\nUnconstrained:"
    print res_uncons

    x1, x2 = res_cons["x"]
    f = res_cons["fun"]

    x1_unc, x2_unc = res_uncons["x"]
    f_unc = res_uncons["fun"]

    # plotting
    xgrid = np.mgrid[-2:4:0.1, 1.5:5.5:0.1]
    xvec = xgrid.reshape(2, -1).T
    F = np.vstack([loss(xi) for xi in xvec]).reshape(xgrid.shape[1:])

    ax = plt.axes(projection="3d")
    ax.hold(True)
    ax.plot_surface(xgrid[0], xgrid[1], F, rstride=1, cstride=1, cmap=plt.cm.jet, shade=True, alpha=0.9, linewidth=0)
    ax.plot3D([x1], [x2], [f], "og", mec="w", label="Constrained minimum")
    ax.plot3D([x1_unc], [x2_unc], [f_unc], "oy", mec="w", label="Unconstrained minimum")
    ax.legend(fancybox=True, numpoints=1)
    ax.set_xlabel("x1")
    ax.set_ylabel("x2")
    ax.set_zlabel("F")
Example #7
0
 def test_solver_concordance(self):
     # Assert that dogleg uses fewer iterations than ncg on the Rosenbrock
     # test function, although this does not necessarily mean
     # that dogleg is faster or better than ncg even for this function
     # and especially not for other test functions.
     f = rosen
     g = rosen_der
     h = rosen_hess
     for x0 in (self.easy_guess, self.hard_guess):
         r_dogleg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
                             method='dogleg', options={'return_all': True})
         r_trust_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
                                method='trust-ncg',
                                options={'return_all': True})
         r_trust_krylov = minimize(f, x0, jac=g, hess=h, tol=1e-8,
                                method='trust-krylov',
                                options={'return_all': True})
         r_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
                          method='newton-cg', options={'return_all': True})
         r_iterative = minimize(f, x0, jac=g, hess=h, tol=1e-8,
                                method='trust-exact',
                                options={'return_all': True})
         assert_allclose(self.x_opt, r_dogleg['x'])
         assert_allclose(self.x_opt, r_trust_ncg['x'])
         assert_allclose(self.x_opt, r_trust_krylov['x'])
         assert_allclose(self.x_opt, r_ncg['x'])
         assert_allclose(self.x_opt, r_iterative['x'])
         assert_(len(r_dogleg['allvecs']) < len(r_ncg['allvecs']))
Example #8
0
    def test_minimize_tol_parameter(self):
        # Check that the minimize() tol= argument does something
        def func(z):
            x, y = z
            return x**2*y**2 + x**4 + 1

        def dfunc(z):
            x, y = z
            return np.array([2*x*y**2 + 4*x**3, 2*x**2*y])

        for method in ['nelder-mead', 'powell', 'cg', 'bfgs',
                       'newton-cg', 'anneal', 'l-bfgs-b', 'tnc',
                       'cobyla', 'slsqp']:
            if method in ('nelder-mead', 'powell', 'anneal', 'cobyla'):
                jac = None
            else:
                jac = dfunc

            with warnings.catch_warnings():
                # suppress deprecation warning for 'anneal'
                warnings.filterwarnings('ignore', category=DeprecationWarning)
                sol1 = optimize.minimize(func, [1,1], jac=jac, tol=1e-10,
                                         method=method)
                sol2 = optimize.minimize(func, [1,1], jac=jac, tol=1.0,
                                         method=method)
                assert_(func(sol1.x) < func(sol2.x),
                        "%s: %s vs. %s" % (method, func(sol1.x), func(sol2.x)))
Example #9
0
def fit_poly(x1, y1, xref, yref, num_free_param , p0x = None, p0y=None, minim=True, weightx=None, weighty =None, leg=False):
    '''
    Assumes input is 2 matched starlists (x1, y1, x2, y2)
    free_param is number of free parameters to be used in the fit
    returns coefficients for best fit polynomial in both x and y
    '''

    if p0x ==None:
        p0x = np.zeros(num_free_param)
    if p0y == None:
        p0y = np.zeros(num_free_param)


    if not minim:
        print  'weight of first point ',weightx[0],  weighty[0]
        c_x, cov_x = curve_fit(poly, np.array([x1,y1]), xref, p0=p0x, sigma=weightx, absolute_sigma=True)
        print 'weight of first point ', weightx[0],  weighty[0]
        c_y, cov_y = curve_fit(poly, np.array([x1,y1]), yref, p0=p0y, sigma=weighty, absolute_sigma=True)
    else:
        for i in range(len(p0x)):
            p0x[i] = p0x[i]# + i * .01
            p0y[i] = p0y[i]# + i * .01
        resx = minimize(poly_min,p0x, args=(x1,y1, xref, weightx, leg), method='CG')
        resy = minimize(poly_min,p0y, args=(x1,y1, yref, weighty, leg), method='CG')
        c_x = resx.x
        c_y = resy.x
    print c_y, c_x
    return c_x, c_y  
Example #10
0
    def fit(self, pxx, tryis=100):
        print("Models:", [self.f(px) for px in pxx])

        def weights_loss_func(weights):
            return self.f(self._apply_weights(weights, pxx))

        starting_values = np.ones(len(pxx)) / (len(pxx))
        bounds = tuple((0, 1) for w in starting_values)
        cons = ({'type': 'eq', 'fun': lambda w: 1 - sum(w)})

        res = minimize(weights_loss_func, starting_values,
                       method='SLSQP', bounds=bounds, constraints=cons)

        self.best_score = res['fun']
        self.best_weights = res['x']

        for i in xrange(tryis):
            starting_values = np.random.uniform(0,1,size=len(pxx))
            res = minimize(weights_loss_func, starting_values,
                           method='SLSQP', bounds=bounds, constraints=cons)

            if res['fun']<self.best_score and res['success']:
                self.best_score = res['fun']
                self.best_weights = res['x']

        print('Ensamble: {best_score} = {weights}'.format(
            best_score=self.best_score, weights=self.best_weights))
        return self.best_score
    def reconstruct(cls, t, x, y, z):
        """Reconstruct angles for many detections

        :param t#: arrival times in the detectors in ns.
        :param x#,y#,z#: position of the detectors in m.
        :return: theta as given by Montanus2014 eq 21,
                 phi as given by Montanus2014 eq 22.

        """
        if not logic_checks(t, x, y, z):
            return nan, nan

        dt = make_relative(t)[1:]
        dx = make_relative(x)[1:]
        dy = make_relative(y)[1:]
        dz = make_relative(z)[1:]

        cons = {'type': 'eq', 'fun': cls.constraint_normal_vector}

        fit = minimize(cls.best_fit, x0=(0.1, 0.1, .989, 0.),
                       args=(dt, dx, dy, dz), method="SLSQP",
                       bounds=((-1, 1), (-1, 1), (-1, 1), (None, None)),
                       constraints=cons,
                       options={'ftol': 1e-9, 'eps': 1e-7, 'maxiter': 50})
        if fit.success:
            phi1 = arctan2(fit.x[1], fit.x[0])
            theta1 = arccos(fit.x[2])
        else:
            phi1 = nan
            theta1 = nan

        fit = minimize(cls.best_fit, x0=(-0.1, -0.1, -.989, 0.),
                       args=(dt, dx, dy, dz), method="SLSQP",
                       bounds=((-1, 1), (-1, 1), (-1, 1), (None, None)),
                       constraints=cons,
                       options={'ftol': 1e-9, 'eps': 1e-7, 'maxiter': 50})
        if fit.success:
            phi2 = arctan2(fit.x[1], fit.x[0])
            theta2 = arccos(fit.x[2])
        else:
            phi2 = nan
            theta2 = nan

        # in case one of the theta's is smaller than pi/2 (shower from above)
        # and one larger than pi/2 (shower from below),
        # the first one is considered correct.
        # if both come from above (or from below), both theta's are rejected
        # the check is preceeded by a check if the fit has not delivered nans.

        if theta1 <= pi / 2. and (isnan(theta2) or theta2 > pi / 2.):
            theta = theta1
            phi = phi1
        elif (isnan(theta1) or theta1 > pi / 2.) and theta2 <= pi / 2.:
            theta = theta2
            phi = phi2
        else:
            theta = nan
            phi = nan

        return theta, phi
Example #12
0
def design_for_dv(foil_simulator, dv_goal, rpm, r, dr, u_0, B):
    C_L, C_D, phi = precalc(foil_simulator, dv_goal, 0, 0, (rpm/60) * 2 * pi, r, dr, u_0, B)
    print  C_L, C_D, degrees(phi)
    x0 = [phi, dv_goal, 0.002] # theta, dv, a_prime
    constraints = [
        {'type': 'ineq', 'fun': lambda x: x[0] - (phi-radians(8))},
        {'type': 'ineq', 'fun': lambda x: (phi+radians(10)) - x[0]},
        {'type': 'ineq', 'fun': lambda x: x[1] - dv_goal/2},
        {'type': 'ineq', 'fun': lambda x: 2*dv_goal - x[1]},
        {'type': 'ineq', 'fun': lambda x: x[2]},
        {'type': 'ineq', 'fun': lambda x: 0.2 - x[2]}]
    res = minimize(min_dv, x0, args=(dv_goal, rpm, r, dr, u_0, B, foil_simulator), tol=1e-10, \
        #method='COBYLA', constraints=constraints, options={'disp': True, 'maxiter': 1000})
        method='SLSQP', constraints=constraints, options={'disp': True, 'maxiter': 1000})
        #method='BFGS', options={'gtol': 1e-6, 'eps': [1e-3, 1e-2, 1e-6], 'disp': True, 'maxiter': 1000})
          #method='Nelder-Mead', options={'initial_simplex': initial_simplex_all(x0), \
              #'xatol': 1e-7, 'disp': False, 'maxiter': 10000})
    if (res.fun > 0.1):
        x0 = [phi, dv_goal, 0.02] # theta, dv, a_prime
        ## Restart optimization around previous best
        res = minimize(min_dv, x0, args=(dv_goal, rpm, r, dr, u_0, B, foil_simulator), tol=1e-8, \
            method='COBYLA', constraints=constraints, options={'disp': True, 'maxiter': 1000})
        
    logger.info("dv: {}, goal: {} a_prime={}".format(res.x[1], dv_goal, res.x[2]))
    return res.x, res.fun
Example #13
0
def prob1():
    """Use the minimize() function in the scipy.optimize package to find the
    minimum of the Rosenbrock function (scipy.optimize.rosen) using the
    following methods:
        Nelder-Mead
        CG
        BFGS
    Use x0 = np.array([4., -2.5]) for the initial guess for each test.
    
    For each method, print whether it converged, and if so, print how many 
        iterations it took.
    """
    # Set up the initial guess.
    x0 = np.array([4.0,-2.5])

    # Test each method.
    info = {}
    info["Nelder-Mead"] = opt.minimize(opt.rosen, x0, method='Nelder-Mead')
    info["CG"] = opt.minimize(opt.rosen, x0, method='CG')
    info["BFGS"] = opt.minimize(opt.rosen, x0, method='BFGS')

    # Report the info.
    for method in info:
        print("Method:\t{}\nConverged:\t{} "
                                    .format(method, info[method]['success']))
        if info[method]['success']:
            print "Number of Iterations:", info[method]['nit'], '\n'
	def run_opt(self, hyp):
		if isinstance(self.kernel, kernels.Composite):
			min_hyp = minimize(self.opt_hyp, hyp, method='l-bfgs-b', bounds=((0.01,10.0),(0.5,10.0),(0.01,10.0),(0.5,10.0)), options={'disp':False}) 
		else:
			min_hyp = minimize(self.opt_hyp, hyp, method='l-bfgs-b', bounds=((0.01,10.0),(0.5,7.0)), options={'disp':False}) 
		#log_likelihoods = [] # save for plotting / get function values from optimizer?
		return min_hyp.x, min_hyp.fun, self.jitter
    def _minimise_parameters_of_given_album(self, album):
        """
        function minimises the parameters of the given album - one for galpar and one for imgpar 
        """
        # minimise galaxy parameters
        galpar0 = album.get_all_images()[0].galaxy.get_parameters_vector()
        gaus_num = len(galpar0) / 10
        if self.iter_num != 0:
            result = op.minimize(self._estimate_likelihood_and_jacobian, galpar0, args=(album), method='NEWTON-CG', jac=True) 
        else:
            result = op.minimize(album, galpar0, method='BFGS')
            print "normal"
        galpar = result['x']
        self.iter_num += 1
        likelihood_in_album = album(galpar)
        self.f_likelihood.write("Gauss num:%s, iter num:%s, after album likelihood:%s\n" % (gaus_num, self.iter_num, likelihood_in_album))
        self.f_likelihood.flush()
        self._plot_album(album, "%s/gauss_%s_iter_%s_after_album.pdf" % (self.image_dir, str(gaus_num).zfill(3), str(self.iter_num).zfill(3)))

        # minimise image parameters
        for image in album:
            imgpar0 = image.get_parameters_vector()
            #result = op.minimize(self._estimate_likelihood_and_jacobian, imgpar0, args=(image), method='NEWTON-CG', jac=True)
            result = op.minimize(image, imgpar0, method='BFGS')
            imgpar = result['x']
            likelihood_in_image = image(imgpar)
        likelihood_in_images = album(galpar)
        self.iter_num += 1
        self.f_likelihood.write("Gauss num:%s, iter num:%s, after image likelihood:%s\n" % (gaus_num, self.iter_num, likelihood_in_images))
        self.f_likelihood.flush()
        self._plot_album(album, "%s/gauss_%s_iter_%s_after_images.pdf" % (self.image_dir, str(gaus_num).zfill(3), str(self.iter_num).zfill(3)))

        return likelihood_in_images, album 
Example #16
0
def mkmin(covinv, coords, dimcov):
    """Minimization of chi^2 section of fitter.
    Return minimized result.
    """
    if not METHOD in set(['L-BFGS-B']):
        RESULT_MIN = minimize(chi_sq, START_PARAMS, (covinv, coords),
                              method=METHOD)
                          #method='BFGS')
                          #method='L-BFGS-B',
                          #bounds=BINDS,
                          #options={'disp': True})
    if METHOD in set(['L-BFGS-B']):
        RESULT_MIN = minimize(chi_sq, START_PARAMS, (covinv, coords),
                              method=METHOD, bounds=BINDS,
                              options={'disp': True})
        print "number of iterations = ", RESULT_MIN.nit
    print "minimized params = ", RESULT_MIN.x
    print "successfully minimized = ", RESULT_MIN.success
    print "status of optimizer = ", RESULT_MIN.status
    print "message of optimizer = ", RESULT_MIN.message
    print "chi^2 minimized = ", RESULT_MIN.fun
    if RESULT_MIN.fun < 0:
        print "***ERROR***"
        print "Chi^2 minimizer failed. Chi^2 found to be less than zero."
    print "chi^2 reduced = ", RESULT_MIN.fun/(dimcov-len(START_PARAMS))
    print "degrees of freedom = ", dimcov-len(START_PARAMS)
    return RESULT_MIN
Example #17
0
    def optimize(self):
        """
        Optimizes the marginal log likelihood and returns the best found
        hyperparameter configuration theta.

        Returns
        -------
        theta : np.ndarray(H)
            Hyperparameter vector that maximizes the marginal log likelihood
        """
        # Start optimization from the previous hyperparameter configuration
        p0 = self.gp.kernel.vector
        p0 = np.append(p0, np.log(self.noise))

        if self.use_gradients:
            theta, _, _ = optimize.minimize(self.nll, p0,
                                            method="BFGS",
                                            jac=self.grad_nll)
        else:
            try:
                results = optimize.minimize(self.nll, p0, method='L-BFGS-B')
                theta = results.x
            except ValueError:
                logging.error("Could not find a valid hyperparameter configuration! Use initial configuration")
                theta = p0

        return theta
Example #18
0
def optimize_gap_single(h,direct=True):
  """Return the gap, just one time"""
  hkgen = h.get_hk_gen() # get generator
  dim = h.dimensionality # dimensionality
  if direct: # returnt the direct gap
    def fg(k): # minimize the gap
      es = lg.eigvalsh(hkgen(k)) # eigenvalues
      return np.min(es[es>0.])-np.max(es[es<0.]) # return gap
    x0 = np.random.random(dim) # random point
    bounds = [(0,1.) for i in range(dim)] # bounds
    result = opt.minimize(fg,x0,bounds=bounds,method="SLSQP")
    x = result.x # position of the minimum gap
    return (fg(x),x)
  else: # indirect gap
    def fg(k): # minimize the gap
      k1 = np.array([k[2*i] for i in range(dim)])
      k2 = np.array([k[2*i+1] for i in range(dim)])
      es1 = lg.eigvalsh(hkgen(k1)) # eigenvalues
      es2 = lg.eigvalsh(hkgen(k2)) # eigenvalues
      return np.min(es1[es1>0.])-np.max(es2[es2<0.]) # return gap
    x0 = np.random.random(dim*2) # random point
    bounds = [(0,1.) for i in range(2*dim)] # bounds
    result = opt.minimize(fg,x0,bounds=bounds,method="SLSQP")
    x = result.x # position of the minimum gap
    return (fg(x),x)
Example #19
0
def optimize_portfolio(number_of_sims):
	#Defining bounds for optimization
	#Bounds to be [0,1] for any given weight 
	bounds = tuple((0, 1) for x in range(noa))
	constraints = ({'type': 'eq', 'fun': lambda x: (np.sum(x) - 1)})

	#Use evenly distributed weights as the initial starting point 
	init_weights = noa * [1.0 / noa]
	
	#optimize portfolios
	opt_sharpe = spo.minimize(min_func_sharpe, init_weights, method = 'SLSQP', bounds = bounds, constraints = constraints)
	opt_variance = spo.minimize(min_func_variance, init_weights, method = 'SLSQP', bounds = bounds, constraints = constraints)

	#Building Efficient Frontier
	target_rets = np.linspace(0.0,(statistics(opt_sharpe['x'])[0]),50)
	target_volits = []
	for target_ret in target_rets:

		#Defining optimization constraints as a dictionary 
		#constraints to be that the sum of weights must equal 1 and that the expectedReturn of the portfolio meets the target
		cons = ({'type': 'eq', 'fun': lambda x: (np.sum(x) - 1)},
				   	   {'type': 'eq', 'fun': lambda x: statistics(x)[0] - target_ret})

		res = spo.minimize(min_func_variance, init_weights, method = 'SLSQP', bounds = bounds, constraints = cons)

		#Creating points for efficient frontier
		target_volits.append(res['fun'])

	#result_sharpe now holds the result of weights for the portfolio that has the highest Sharpe ratio FOR the specified target
	target_volits = np.array(target_volits)

	plot_data(number_of_sims, target_rets, target_volits, opt_variance, opt_sharpe)
	return opt_variance['x'].round(4), opt_sharpe['x'].round(4)
Example #20
0
 def fit_freq2(self,src_freq,kpts,a0=80.,b0=10.,a1=8.,b1=1.,eps0=1.0,crys=True,method='Powell'):
     """
     To use this function, one must have set all 2nd n.n already.
     See fit_freq above.
     a1,b1: float
         initial guess of second n.n. force constants
     """
     from scipy.optimize import minimize
     np.set_printoptions(precision=3)
     np.set_printoptions(suppress=True)
     self.set_kpts(kpts,crys=crys)
     self.src_freq = np.sort(src_freq)
     assert len(self.src_freq) == self.nkpt
     if self.ecalc != None:
         self.m_ewald = self.ecalc.get_dyn(self.mass,kpts,crys=crys)
         res = minimize(self.__fit_ewald2,x0=np.array([a0,b0,a1,b1,eps0]),method=method)
         a,b,a1,b1,eps = abs(res.x)
         print "Fitting routine returns: state (%s)" % res.success
         print "alpha = %10.6f; beta = %10.6f; alpha1 = %10.6f; beta1 = %10.6f;eps = %10.6f" % (a,b,a1,b1,eps)
         self.set_bulk_fc2([a,a1],[b,b1]); self.__set_dyn(); self.eps = eps
         print "Fitted frequencies:"
         print np.sort(self.get_ph_disp())
     else:
         res = minimize(self.__fit_no_ewald2,x0=np.array([a0,b0,a1,b1]),method=method)
         a,b,a1,b1 = abs(res.x)
         print "Fitting routine returns: state (%s)" % res.success
         print "alpha = %10.6f; beta = %10.6f; alpha1 = %10.6f; beta1 = %10.6f" % (a,b,a1,b1)
         self.set_bulk_fc2([a,a1],[b,b1])
         print "Fitted frequencies:"
         print np.sort(self.get_ph_disp())
     if not res.success: print res.message
     self.__log_fit(res,filename="log_fit2.txt")
     del minimize
Example #21
0
 def SolveEq(self):
     self.Odm=self.Ocb-self.Obh2/(self.h**2)
     self.Ob=self.Ocb-self.Odm
     if (self.lam==0):
         self.fractoday, self.Or=self.xfrac, 0.0
     if (self.xfrac==1.0):
         res=minimize(lambda x:self.FuncMin_([1.0,x[0]]),[0.001],tol=1e-5)
         self.Or=res.x[0]
         self.fractoday=1.0
     else:
         res=minimize(self.FuncMin_,[self.xfrac, 0.001], tol=1e-5)
         self.fractoday, self.Or=res.x
         #print fmin.__doc__
         #print "lam=",self.lam
         #print "res=",res
         #stop
     ## stupid interp1d doesn't take inverses
     self.Ocb_std=self.Ob+self.Odm*(1-self.fractoday)
     self.Odm_dec=self.Odm*self.fractoday
     yinit= array([self.Odm_dec, self.Or])
     sol =odeint(self.RHS_,yinit,self.logar)
     
     self.sol=sol
     self.rx=interp1d(self.ilogar,sol[::-1,0])
     self.rr=interp1d(self.ilogar,sol[::-1,1])
     ## take early time solution
     self.Ocbh2_early=(self.Ocb_std+sol[-1,0])*self.h**2        
Example #22
0
    def _preoptimize_model(self):
        """ Preoptimizes the model by estimating a Gaussian state space models
        
        Returns
        ----------
        - Gaussian model latent variable object
        """
        gaussian_model = LLT(self.data, integ=self.integ, target=self.target)
        gaussian_model.fit()
        self.latent_variables.z_list[0].start = gaussian_model.latent_variables.get_z_values()[1]
        self.latent_variables.z_list[1].start = gaussian_model.latent_variables.get_z_values()[2]

        if self.model_name2 == 't':

            def temp_function(params):
                return -np.sum(ss.t.logpdf(x=self.data, df=np.exp(params[0]), 
                    loc=np.ones(self.data.shape[0])*params[1], scale=np.exp(params[2])))

            p = optimize.minimize(temp_function,np.array([2.0, 0.0, -1.0]),method='L-BFGS-B')
            self.latent_variables.z_list[2].start = p.x[2]
            self.latent_variables.z_list[3].start = p.x[0]

        elif self.model_name2 == 'Skewt':

            def temp_function(params):
                return -np.sum(fam.Skewt.logpdf_internal(x=self.data,df=np.exp(params[0]),
                    loc=np.ones(self.data.shape[0])*params[1], scale=np.exp(params[2]),gamma=np.exp(params[3])))

            p = optimize.minimize(temp_function,np.array([2.0, 0.0, -1.0, 0.0]),method='L-BFGS-B')
            self.latent_variables.z_list[2].start = p.x[3]
            self.latent_variables.z_list[3].start = p.x[2]
            self.latent_variables.z_list[4].start = p.x[0]

        return gaussian_model.latent_variables
Example #23
0
    def test_scipy_style(self):
        def func(x, sign=1.0):
            """ Objective function """
            return sign*(2*x[0]*x[1] + 2*x[0] - x[0]**2 - 2*x[1]**2)

        def func_deriv(x, sign=1.0):
            """ Derivative of objective function """
            dfdx0 = sign*(-2*x[0] + 2*x[1] + 2)
            dfdx1 = sign*(2*x[0] - 4*x[1])
            return np.array([ dfdx0, dfdx1 ])

        cons = (
            {'type': 'eq',
             'fun' : lambda x: np.array([x[0]**3 - x[1]]),
             'jac' : lambda x: np.array([3.0*(x[0]**2.0), -1.0])},
            {'type': 'ineq',
             'fun' : lambda x: np.array([x[1] - 1]),
             'jac' : lambda x: np.array([0.0, 1.0])})

        from scipy.optimize import minimize
        res = minimize(func, [-1.0,1.0], args=(-1.0,),
               method='SLSQP', options={'disp': True})

        res = minimize(func, [-1.0,1.0], args=(-1.0,), jac=func_deriv,
               constraints=cons, method='SLSQP', options={'disp': True})
Example #24
0
def fit_slice_y_distortion(gmos_mos_fits, initial_slice_edges_guess,
                           method='Nelder-Mead', slice_model_chip=2,
                           slice_model_slice=slice(None), degree=5):
    """
    Fit slice

    Parameters
    ----------

    gmos_mos_fits: ~geminiutil.gmos.gmos_alchemy.GMOSMOSRawFITS
    slice_edges_initial_guess: tuple of ~np.ndarray
    method: str

    """
    if gmos_mos_fits.prepared is None:
        raise ValueError('given fits {0} is not prepared'.format(gmos_mos_fits))

    (initial_slice_lower_edges,
     initial_slice_upper_edges) = initial_slice_edges_guess


    raw_data = gmos_mos_fits.prepared.fits.fits_data[slice_model_chip].data
    data_1D_slice = np.median(raw_data[slice_model_slice], axis=1)

    distorted_model = DistortedSliceModel(data_1D_slice,
                                          initial_slice_lower_edges,
                                          initial_slice_upper_edges,
                                          degree=degree)
    initial_p_coeff = distorted_model.p_coef.copy()
    optimize.minimize(distorted_model.fit, initial_p_coeff, method=method)

    return distorted_model.polynomial(initial_slice_lower_edges), \
           distorted_model.polynomial(initial_slice_upper_edges)
 def minimize_negative_acquisition(self, gpreg):
     # minimization of negative acquisition function
     vals, par = [], []
     x0 = list(self.rand.uniform(np.array(self.bounds).T[0], np.array(self.bounds).T[1],
                                 size=(self.n_iters_aqui - 1, self.dim)
                                 )) + [self.x[int(np.argmax(self.y))]]
     for x in x0:
         if self.acquisition_func == "EI":
             if self.bashop:
                 opti = basinhopping(expected_improvement, x0=x,
                                     minimizer_kwargs={"method": "L-BFGS-B",
                                                       "bounds": self.bounds,
                                                       "args": (self.aquis_par[-1],
                                                                np.max(self.y),
                                                                gpreg, self.dim,)})
             else:
                 opti = minimize(expected_improvement, x0=x, method="L-BFGS-B",
                                 args=(self.aquis_par[-1], np.max(self.y),
                                       gpreg, self.dim,),
                                 bounds=self.bounds)
         else:
             if self.bashop:
                 opti = basinhopping(upper_confidence_bound, x0=x,
                                     minimizer_kwargs={"method": "L-BFGS-B",
                                                       "bounds": self.bounds,
                                                       "args": (self.aquis_par[-1],
                                                                gpreg, self.dim,)})
             else:
                 opti = minimize(upper_confidence_bound, x0=x, method="L-BFGS-B",
                                 args=(self.aquis_par[-1], gpreg, self.dim,),
                                 bounds=self.bounds)
         par.append(opti.x)
         vals.append(opti.fun)
     return np.array(vals), np.array(par)
def opt_col(confident_pixels, test_img_lum):
	size = 3
	n = (size-1)/2
	Y = len(test_img_lum[0])-2*n
	weights = find_weights(test_img_lum)
	cons_a = define_cons(confident_pixels, Y,  True)
	cons_b = define_cons(confident_pixels, Y, False)

	init_guess_a = np.zeros(np.shape(test_img_lum))
	init_guess_b = np.zeros(np.shape(test_img_lum))
	for pixel in confident_pixels:
		x, y, a, b = pixel[0], pixel[1], pixel[2], pixel[3]
		init_guess_a[x][y] = a
		init_guess_b[x][y] = b
	init_guess_a = init_guess_a[n:-n, n:-n]
	init_guess_a = np.reshape(init_guess_a, -1)
	init_guess_b = init_guess_b[n:-n, n:-n]
	init_guess_b = np.reshape(init_guess_b, -1)

	opt_a = minimize(objective, init_guess_a, args=(weights), constraints = cons_a, method='SLSQP')
	channel_a = np.reshape(opt_a.x, (np.shape(test_img_lum)[0] - 2*n, np.shape(test_img_lum)[1] - 2*n))
	opt_b = minimize(objective, init_guess_b, args=(weights), constraints = cons_b, method='SLSQP')
	channel_b = np.reshape(opt_b.x, (np.shape(test_img_lum)[0] - 2*n, np.shape(test_img_lum)[1] - 2*n))

	colored_image = np.zeros((np.shape(test_img_lum)[0]-2*n, np.shape(test_img_lum)[1]-2*n, 3))
	colored_image[:, :, 0] = test_img_lum[n:-n, n:-n]
	colored_image[:, :, 1] = channel_a
	colored_image[:, :, 2] = channel_b
	return colored_image
    def test_back_prop_with_diff_grad_checks(self, iter=200):
        eps = math.sqrt(np.finfo(float).eps)
        init_val = self.packTheta(self.W1, self.b1, self.W2, self.b2)

        err = optimize.check_grad(self.cost, self.cost_prime, init_val, self.X)
        print ("Error after 0 iterations: %f, Error per Param: %f" % (err, err/init_val.size))
        res = optimize.minimize(fun=self.cost, x0=init_val, args=(self.X,), jac=self.cost_prime, method='L-BFGS-B', options={'maxiter':iter})
        self.W1, self.b1, self.W2, self.b2 = self.unpackTheta(res.x)

        err = optimize.check_grad(self.cost, self.cost_prime, init_val, self.X)
        print ("Error after 200 iterations: %f, Error per Param: %f" % (err, err/init_val.size))
        init_val = res.x
        res = optimize.minimize(fun=self.cost, x0=init_val, args=(self.X,), jac=self.cost_prime, method='L-BFGS-B', options={'maxiter':iter})
        self.W1, self.b1, self.W2, self.b2 = self.unpackTheta(res.x)

        err = optimize.check_grad(self.cost, self.cost_prime, init_val, self.X)
        print ("Error after 400 iterations: %f, Error per Param: %f" % (err, err/init_val.size))
        init_val = res.x
        res = optimize.minimize(fun=self.cost, x0=init_val, args=(self.X,), jac=self.cost_prime, method='L-BFGS-B', options={'maxiter':iter})
        self.W1, self.b1, self.W2, self.b2 = self.unpackTheta(res.x)

        err = optimize.check_grad(self.cost, self.cost_prime, init_val, self.X)
        print ("Error after 600 iterations: %f, Error per Param: %f" % (err, err/init_val.size))
        init_val = res.x
        res = optimize.minimize(fun=self.cost, x0=init_val, args=(self.X,), jac=self.cost_prime, method='L-BFGS-B', options={'maxiter':iter})
        self.W1, self.b1, self.W2, self.b2 = self.unpackTheta(res.x)

        err = optimize.check_grad(self.cost, self.cost_prime, init_val, self.X)
        print ("Error after 800 iterations: %f, Error per Param: %f" % (err, err/init_val.size))
Example #28
0
def try_minimize(func, guess, args=(), method=None, quiet=False, timeout=5,
                 unpack=False, max_show=10, **kwds):
    '''Minimization of scalar function of one or more variables.
    See the docstring of `scipy.optimize.minimize`.

    Example
    -------
    from scipy.optimize import rosen
    res = try_minimize(rosen, [0.5, 0.5])
    '''
    from scipy.optimize import minimize
    from numpy import array2string
    from time import clock
    from .funcs import timeout as timer

    guess = np.asarray(guess)
    if unpack:
        func_original = func
        func = lambda x: func_original(*x)

    if method is None:
        methods = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG',
                   'L-BFGS-B', 'TNC', 'COBYLA', 'SLSQP', 'dogleg', 'trust-ncg']
    elif np.isscalar(method):
        methods = [method]
    else:
        methods = method

    results = []
    for method in methods:
        try:
            time = clock()
            if timeout > 0:
                with timer(timeout, ValueError):
                    res = minimize(func, guess, args=args, method=method, **kwds)
            else:
                res = minimize(func, guess, args=args, method=method, **kwds)
            res.time = clock() - time
            res.method = method
            results.append(res)
        except (ValueError, MemoryError, TypeError) as err:
            if not quiet:
                print("{:>12s}: {}".format(method, err))
            continue

    results.sort(key=lambda res: res.fun)
    if not quiet:
        print("---------------------------------------------")
        param_len = min(guess.size, max_show) * 10 + 1
        print("{:>12s}  {:^5s}  {:^10s}  {:^{}s}  {:^s}".format(
            "method", "OK", "fun", "x", param_len, "time"))
        for res in results:
            formatter = {'all': (lambda x: "%9.3g" % x)}
            x = array2string(res.x[:max_show], formatter=formatter, separator=',')
            out = (res.method, str(res.success), float(res.fun), x, res.time)
            print("{:>12s}: {:5s}  {:10.4g}  {}  {:.1e}".format(*out))
    if results:
        return results[0]
    else:
        raise ValueError('Failed.')
Example #29
0
    def optimize(self, niter=3):
        self.system.freeze_parameter("central:*")
        self.system.freeze_parameter("bodies*t0")
        p0 = self.system.get_vector()
        r = minimize(self._nll, p0, jac=self._grad_nll, method="L-BFGS-B")
        if r.success:
            self.system.set_vector(r.x)
        else:
            self.system.set_vector(p0)
        self.system.bodies[0].b = np.abs(self.system.bodies[0].b)
        self.system.thaw_parameter("central:*")
        self.system.thaw_parameter("bodies*t0")

        if not niter > 1:
            return

        for gp, lc in zip(self.gps, self.fit_lcs):
            mu = self.system.light_curve(lc.time, texp=lc.texp, maxdepth=2)
            r = (lc.flux - mu) * 1e3
            p0 = gp.get_vector()
            r = minimize(gp.nll, p0, jac=gp.grad_nll, args=(r, ))
            if r.success:
                gp.set_vector(r.x)
            else:
                gp.set_vector(p0)
        self.optimize(niter=niter - 1)
Example #30
0
    def test_minimize_tol_parameter(self):
        # Check that the minimize() tol= argument does something
        def func(z):
            x, y = z
            return x ** 2 * y ** 2 + x ** 4 + 1

        def dfunc(z):
            x, y = z
            return np.array([2 * x * y ** 2 + 4 * x ** 3, 2 * x ** 2 * y])

        for method in [
            "nelder-mead",
            "powell",
            "cg",
            "bfgs",
            "newton-cg",
            "anneal",
            "l-bfgs-b",
            "tnc",
            "cobyla",
            "slsqp",
        ]:
            if method in ("nelder-mead", "powell", "anneal", "cobyla"):
                jac = None
            else:
                jac = dfunc

            with warnings.catch_warnings():
                # suppress deprecation warning for 'anneal'
                warnings.filterwarnings("ignore", category=DeprecationWarning)
                sol1 = optimize.minimize(func, [1, 1], jac=jac, tol=1e-10, method=method)
                sol2 = optimize.minimize(func, [1, 1], jac=jac, tol=1.0, method=method)
                assert_(func(sol1.x) < func(sol2.x), "%s: %s vs. %s" % (method, func(sol1.x), func(sol2.x)))
    def fit_decomposition_method(self, training_set_inputs, training_set_outputs, test_set_inputs, test_set_outputs, max_number_of_training_iterations = 1000, verbose = False):        
        print()
        print("Optimizing the neural network...")
        # Reinitializing the number of function and gradient evaluations 
        self.NbrFuncEval = 0
        self.NbrGradEval = 0
        self.NormGradAtOptimalPoint = 0
        # Setting the number of inputs per neuron in the hidden layer
        self.number_of_inputs_per_neuron = training_set_inputs.shape[1]
        # Getting the training set size
        P = len(training_set_inputs)
        # Getting the number of neurons in the hidden layer
        N = self.hidden_layer_sizes
        # Getting the value of the regularization parameter rho to use on the global error computation
        rho = self.rho
        #getting the value of the spread in the activation function sigma
        sigma = self.sigma
        #getting the value of the solver
        solver = self.solver
        # Input data
        X = training_set_inputs
        # Output data
        Y = training_set_outputs
        # Preparing the initial guesses for the parameters to be minimized
        self.__random_start()
        # Computing the initial output on training data
        self.result_initial_output_train = self.predict(training_set_inputs)
        start_time = time.time()
        # Starting the two-block decomposition method optimization of the MLP
        # Layer1 to layer2 weights
        v = np.asfarray(self.synaptic_weights_output_layer).flatten()
        # Inputs to layer1 weights
        w = np.asfarray(self.synaptic_weights_hidden_layer)
        # Layer1 noises
        b = list(np.asfarray(self.noises).flatten())
        result_outputs_test = self.predict(test_set_inputs)
        test_error = self.get_error(result_outputs_test, test_set_outputs)
        if (verbose):
            print ()
            print("Initial v: ", v)
            print()
            print("Initial w: ", w)
            print()
            print("Initial b: ", b)
            print()
            print("Initial test error: ", test_error)    
        i = 1
        flag = 1
        nfe = 0
        nje = 0
        err_count = 0
        best_error_and_iteration = [float("inf"), 0]
        while (flag and (i <= 100)):
            if (verbose):
                print()
                print("Iteration ", i)
            
            # Step1: minimization with respect to v
            optimized1 = minimize(self.__error_extreme, v, args=(w, b, X, Y, N, P, sigma, rho), method = solver, options=dict({'maxiter':max_number_of_training_iterations}))
            nfe += optimized1.nfev
            nje += optimized1.njev
            new_v = optimized1.x
            omega = np.asarray(b + list(np.asfarray(w).flatten()))
            
            # Step2: minimization with respect to c
            optimized2 = minimize(self.__error_extreme2, omega, args=(new_v, X, Y, N, P, sigma, rho), method = solver, options=dict({'maxiter':max_number_of_training_iterations}))
            nfe += optimized2.nfev
            nje += optimized2.njev
            result = optimized2.x
            
            # Update the model's weights but befor doing so we save them 
            #in order to set them back in case we are dealing with an increase in weights
            new_w = result[self.hidden_layer_sizes:].reshape(self.number_of_inputs_per_neuron, self.hidden_layer_sizes)
            new_b = result[:self.hidden_layer_sizes]
            old_w = self.synaptic_weights_hidden_layer
            old_v = self.synaptic_weights_output_layer
            old_b = self.noises
            self.synaptic_weights_hidden_layer = new_w
            self.synaptic_weights_output_layer = new_v
            self.noises = new_b
            
            # Computing the new error and comparing with the old one. we update the weights if and only if we get an improvment in error
            result_outputs_test = self.predict(test_set_inputs)
            new_test_error = self.get_error(result_outputs_test, test_set_outputs)
            
            if (verbose):
                print ()
                print("        new v: ", new_v)
                print()
                print("        new w: ", new_w)
                print()
                print("        new w: ", new_b)
                print()
                print("        new test error: ", new_test_error)
            
            # If the error increses or stayed constant for more than 4 times stop training: Early stopping method.
            if ((np.array_equal(new_v, v) and np.array_equal(new_w, w) and np.array_equal(new_b, b)) or err_count == 3):
                flag = 0
            
            # Increment this counter if the error increases or stays constant
            if (new_test_error >= test_error):
                err_count += 1
            # Reset this counter to 0 if the error decreases
            else:
                err_count = 0
            
            # Decide if the actual situation is better than the previous or not
            if (new_test_error < best_error_and_iteration[0]):
                best_error_and_iteration[0] = new_test_error
                best_error_and_iteration[1] = i
            # If it is not, put back the old weights
            else:
                self.synaptic_weights_hidden_layer = old_w
                self.synaptic_weights_output_layer = old_v
                self.noises = old_b
            
            v = new_v
            w = new_w
            b = list(new_b.flatten())
            test_error = new_test_error
            i += 1

        if (verbose):
            print ()
            print ()
            print ("Best computed error: ", best_error_and_iteration[0])
            print ()
            print ("Best computed error iteration: ", best_error_and_iteration[1])
            print ()
            
        self.training_time = time.time() - start_time
        self.NbrFuncEval = nfe
        self.NbrGradEval = nje
        self.NormGradAtOptimalPoint = linalg.norm(optimized2.jac)
        self.NbrOuterIter = i-1
initial_w2 = initializeWeights(n_hidden, n_class)

# unroll 2 weight matrices into single column vector
initialWeights = np.concatenate((initial_w1.flatten(), initial_w2.flatten()), 0)

# set the regularization hyper-parameter
lambdaval = 16
args = (n_input, n_hidden, n_class, train_data, train_label, lambdaval)

# Train Neural Network using fmin_cg or minimize from scipy,optimize module. Check documentation for a working example
opts = {'maxiter': 50}  # Preferred value.

start_time = time.time()
print('statr time',start_time)

nn_params = minimize(nnObjFunction, initialWeights, jac=True, args=args, method='CG', options=opts)

print('training time',time.time() - start_time)
# In Case you want to use fmin_cg, you may have to split the nnObjectFunction to two functions nnObjFunctionVal
# and nnObjGradient. Check documentation for this function before you proceed.
# nn_params, cost = fmin_cg(nnObjFunctionVal, initialWeights, nnObjGradient,args = args, maxiter = 50)


# Reshape nnParams from 1D vector into w1 and w2 matrices
w1 = nn_params.x[0:n_hidden * (n_input + 1)].reshape((n_hidden, (n_input + 1)))
w2 = nn_params.x[(n_hidden * (n_input + 1)):].reshape((n_class, (n_hidden + 1)))

print("after minimize")

# Test the computed parameters
predicted_label = nnPredict(w1, w2, train_data)

# %% [markdown]
# Aquí llamamos la rutina `optimize` para encontrar un vector $\mathbf{p}$ que minimice $E(\mathbf{p})$. Los siguientes parátros son:
#
# - nuestra función objetivo `objective`.
# - un vector inicial de prueba a partir del cual se comenzará la optimización `params0`
# - un método a utilizar, en este caso el algoritmo *SIMPLEX* o de *Nelder-Mead* (para ver una lista completa de los métodos disponibles consultar la [documentación](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html#scipy.optimize.minimize)
# - por último pasamos una diccionario conteniendo una serie de parámetros que son específicos para cada método de optimización, en este caso un número máximo de iteraciones y el booleano `disp` que escribe al finalizar una serie de datos sobree el proceso

# %%
params0 = np.array([0.0, 0.0, 0.0, 0.5])
res = opt.minimize(objective,
                   params0,
                   method='nelder-mead',
                   options={
                       'disp': True,
                       'maxiter': 1000
                   })

# %% [markdown]
# El resultado final de la optimización se encuentra en el vector `x` que es una propiedad del objeto almacenado en `res`:

# %%
res.x

# %% [markdown]
# Podemos observar también el valor de la función objetivo en el conjunto de parámetros iniciales, en el final y en los que utilizamos para generar la muestra:

# %%
print('Valor inicial = ', objective(params0))
Example #34
0
def findJetBreak(jetType,
                 Y,
                 Z,
                 regime,
                 NU,
                 printMode=None,
                 returnAll=False,
                 ax=None):

    tN0 = pp.calcTN0(Y)
    tb = 10 * tN0

    thV = Y[0]
    thC = Y[2]
    thW = Y[3]

    chip = 2 * math.sin(0.5 * (thC + thV))

    tj_guess = 0.24 * tN0 * math.pow(chip, 8.0 / 3.0)

    if jetType == -1:
        if thV <= thC:
            ta = 1.0e-3 * tj_guess
        else:
            tC = 2 * pp.calcTC(jetType, Y, regime)
            ta = 2 * tC
    else:
        if thV <= thC and thV <= thW:
            ta = 1.0e-3 * tj_guess
        elif thV <= thW:
            ta = 1.0e-3 * tj_guess
        else:
            tW = pp.calcTW(jetType, Y, regime)
            ta = 2.0 * tW

    if ta == 0.0:
        print(ta, tb, thV, thC, thW)

    t = np.geomspace(ta, tb, 200)

    theta = np.zeros(t.shape)
    phi = np.zeros(t.shape)
    """
    _, _, u, _ = grb.jet.shockVals(theta, phi, t, jetType, *Y, **Z)
    while u.max() < 1.0:
        ta = ta / 10.0
        print("Retrying with ta = {0:.3e} s".format(ta))
        t = np.geomspace(ta, tb, 200)
        _, _, u, _ = grb.jet.shockVals(theta, phi, t, jetType, *Y, **Z)

    rel = (u >= 0.5) & (u < 1.0e5)
    t = t[rel]
    u = u[rel]
    theta = theta[rel]
    phi = phi[rel]
    """

    ta = 5.0e-2 * tj_guess
    tb = 2.0e1 * tj_guess
    t = np.geomspace(ta, tb, 200)
    theta = np.zeros(t.shape)
    phi = np.zeros(t.shape)
    _, _, u, _ = grb.jet.shockVals(theta, phi, t, jetType, *Y, **Z)

    nu = np.empty(t.shape)
    nu[:] = NU
    nufac = 1.01
    Fnu = grb.fluxDensity(t, nu, jetType, 0, *Y, **Z)
    Fnua = grb.fluxDensity(t, nu / nufac, jetType, 0, *Y, **Z)
    Fnub = grb.fluxDensity(t, nu * nufac, jetType, 0, *Y, **Z)
    beta = np.log(Fnub / Fnua) / np.log(nufac * nufac)

    right = (np.fabs(beta - betaRegime(regime, Y)) < 0.02)\
        & (t > 3*ta) & (t < tb/3)
    t_fit = t[right]
    # nu_fit = nu[right]
    # u_fit = u[right]
    # theta_fit = theta[right]
    # phi_fit = phi[right]
    Fnu_fit = Fnu[right]

    t_guess = 0.24 * tN0 * np.power(2 * np.sin(0.5 * (thC + thV)), 8.0 / 3.0)
    i0s = [
        1,
        len(t_fit) // 4,
        len(t_fit) // 2, 3 * len(t_fit) // 4,
        len(t_fit) - 2
    ]
    i0 = np.searchsorted(t_fit, t_guess)

    if i0 > 1 and i0 < len(t_fit) - 2:
        i0s.append(i0)

    bmin = -10
    bmax = 10

    bounds = [(math.log10(t_fit.min()) - 1, math.log10(t_fit.max()) + 1),
              (math.log10(Fnu_fit.min()) - 1, math.log10(Fnu_fit.max()) + 1),
              (bmin, bmax), (bmin, bmax), (-1.5, 1.5)]

    chi2min = np.inf
    x_best = None

    for i0 in i0s:
        if printMode is 'all':
            print("i0 = {0:d}".format(i0))
        lt0 = math.log10(t_fit[i0])
        lF0 = math.log10(Fnu_fit[i0])
        ls = 1.0

        b0 = math.log(Fnu_fit[i0] / Fnu_fit[0]) / math.log(
            t_fit[i0] / t_fit[0])
        b1 = math.log(Fnu_fit[-1] / Fnu_fit[i0]) / math.log(
            t_fit[-1] / t_fit[i0])

        if b0 < bmin:
            b0 = bmin + 0.1
        if b0 > bmax:
            b0 = bmax - 0.1
        if b1 < bmin:
            b1 = bmin + 0.1
        if b1 > bmax:
            b1 = bmax - 0.1

        x0 = [lt0, lF0, b0, b1, ls]
        func = lf_spl2

        if printMode is 'all':
            print("chi2(x0) = {0:.3e}".format(chi2(x0, t_fit, Fnu_fit, func)))

        res = opt.minimize(chi2,
                           x0, (t_fit, Fnu_fit, func),
                           bounds=bounds,
                           method='TNC',
                           options={'maxiter': 8000})
        if printMode is 'all':
            print("chi2(x1) = {0:.3e}".format(chi2(res.x, t_fit, Fnu_fit,
                                                   func)))
        if printMode is 'all':
            print(res)
        elif printMode is 'summary':
            print("Success: " + str(res.success) +
                  " chi2={0:.2e}".format(res.fun))

        if res.fun < chi2min:
            chi2min = res.fun
            x_best = res.x

    x = x_best

    if x[3] - x[2] > -0.74:
        print("   is that even a jetbreak? b0={0:.2f} b1={1:.2f} thV={2:.2f}".
              format(x[2], x[3], thV))

    if ax is not None:
        ax.plot(t, Fnu)
        ax.plot(t_fit, func(t_fit, *x0), lw=1, color='k', ls='--')
        ax.plot(t_fit, func(t_fit, *x), lw=1, color='k')

        # ax2 = ax.twinx()
        # ax2.plot(t, u, color='tab:orange')
        # ax2.set_yscale('log')

        ax.set_xscale('log')
        ax.set_yscale('log')

    if returnAll:
        return x

    return math.pow(10.0, x[0])
os.system("mkdir -p mcmc/figures/CFHT/likehood")
os.system("mkdir -p mcmc/data")
sig8s = np.linspace(0.1, 0.9, 100)

if computes or (not os.path.isfile('mcmc/data/likehood_sig8.npy')):
    likes = np.array(
        [lnlike([sig8], y, yerrinv, verbose=True) for sig8 in sig8s])
    np.save('mcmc/data/likehood_sig8.npy', likes)
else:
    likes = np.load('mcmc/data/likehood_sig8.npy')

nll = lambda *args: -lnlike(*args)

# Best to use log-parameters I think, thanks to the living spaces of p and q
result = op.minimize(nll, [sig8_st],
                     args=(y, yerrinv, True),
                     method='Nelder-Mead',
                     tol=1e-6)
sig8_ml = result["x"][0]
value = lnlike([sig8_ml], y, yerrinv, verbose=True)

print("Best fit is sig8={0}".format(sig8_ml))

plt.figure().set_size_inches((8, 8), forward=False)
plt.plot(sig8s, likes)
plt.plot([sig8_ml], [value], '-ro', label="{0}".format(sig8_ml))
plt.xlabel('$\\sigma_8$')
plt.ylabel('log_like')
plt.legend()
plt.savefig('mcmc/figures/CFHT/likehood/sig8.png')
plt.show()
Example #36
0
    def __init__(self, zscores, null_lb=-1, null_ub=1, estimate_mean=True,
                 estimate_scale=True, estimate_null_proportion=False):

        # Extract the null z-scores
        ii = np.flatnonzero((zscores >= null_lb) & (zscores <= null_ub))
        if len(ii) == 0:
            raise RuntimeError("No Z-scores fall between null_lb and null_ub")
        zscores0 = zscores[ii]

        # Number of Z-scores, and null Z-scores
        n_zs, n_zs0 = len(zscores), len(zscores0)

        # Unpack and transform the parameters to the natural scale, hold
        # parameters fixed as specified.
        def xform(params):

            mean = 0.
            sd = 1.
            prob = 1.

            ii = 0
            if estimate_mean:
                mean = params[ii]
                ii += 1
            if estimate_scale:
                sd = np.exp(params[ii])
                ii += 1
            if estimate_null_proportion:
                prob = 1 / (1 + np.exp(-params[ii]))

            return mean, sd, prob


        from scipy.stats.distributions import norm


        def fun(params):
            """
            Negative log-likelihood of z-scores.

            The function has three arguments, packed into a vector:

            mean : location parameter
            logscale : log of the scale parameter
            logitprop : logit of the proportion of true nulls

            The implementation follows section 4 from Efron 2008.
            """

            d, s, p = xform(params)

            # Mass within the central region
            central_mass = (norm.cdf((null_ub - d) / s) -
                            norm.cdf((null_lb - d) / s))

            # Probability that a Z-score is null and is in the central region
            cp = p * central_mass

            # Binomial term
            rval = n_zs0 * np.log(cp) + (n_zs - n_zs0) * np.log(1 - cp)

            # Truncated Gaussian term for null Z-scores
            zv = (zscores0 - d) / s
            rval += np.sum(-zv**2 / 2) - n_zs0 * np.log(s)
            rval -= n_zs0 * np.log(central_mass)

            return -rval


        # Estimate the parameters
        from scipy.optimize import minimize
        # starting values are mean = 0, scale = 1, p0 ~ 1
        mz = minimize(fun, np.r_[0., 0, 3], method="Nelder-Mead")
        mean, sd, prob = xform(mz['x'])

        self.mean = mean
        self.sd = sd
        self.null_proportion = prob
Example #37
0
    sigma_y_space = np.linspace(np.log(0.03), np.log(3), ngrid)
    P = np.stack(np.meshgrid(l_space, sigma_y_space), axis=0)
    configs = [(1.0, 0.2), (10, 0.8)]

    fig, ax = plt.subplots()
    plot_gp_pred(x, y, xstar, k, sigma_f, *configs[0], ax)
    pml.savefig("gpr_config0.pdf")

    fig, ax = plt.subplots()
    plot_gp_pred(x, y, xstar, k, sigma_f, *configs[1], ax)
    pml.savefig("gpr_config1.pdf")

    ngrid = 41
    w01 = np.array([np.log(1), np.log(0.1)])
    w02 = np.array([np.log(10), np.log(0.8)])
    s0 = minimize(lambda p: -log_likelihood(x, y, sigma_f, *p), w01)
    s1 = minimize(lambda p: -log_likelihood(x, y, sigma_f, *p), w02)
    levels = -np.array([8.3, 8.5, 8.9, 9.3, 9.8, 11.5, 15])[::-1]
    l_space = np.linspace(np.log(0.5), np.log(80), ngrid)
    sigma_y_space = np.linspace(np.log(0.03), np.log(3), ngrid)

    fig, ax = plt.subplots()
    plot_marginal_likelihood_surface(x,
                                     y,
                                     sigma_f,
                                     l_space,
                                     sigma_y_space,
                                     ax,
                                     levels=levels)
    plt.scatter(*np.exp(s0.x), marker="+", s=100, c="tab:blue")
    plt.scatter(*np.exp(s1.x), marker="+", s=100, c="tab:blue")
Example #38
0
def model_fit(h, y_err, n0, amplitudes, times, xs):
    '''Find model parameters for particular wave defined by amplidues
    Returns:
        Wave speed and error
        Wave length and error
        Wave offset phi
        Reduced chi_sqr for model fit
        '''
    # Use theoretical values as inital vals
    c_t, c_t_err = theory_c(n0, y_err, h, h_err)
    l_t, l_t_err = theory_l(n0, y_err, h, h_err)

    # Calc average phi to center the wave on origin
    t_av = times[2]
    x_av = np.mean(xs)
    phi_av = (x_av - c_t * t_av) / l_t

    c, l, phi = c_t, l_t, -phi_av
    # Start by finding a phi that corrects the wave translation
    phi_min_func = lambda phi: chi_sqr(times, xs, amplitudes, y_err, phi, c, l,
                                       n0, h)
    res = minimize(phi_min_func, phi, method='Nelder-Mead')
    phi = res.x[0]
    # Use the corrected model to find c & l.  x=[c,l]
    min_func = lambda x: chi_sqr(times, xs, amplitudes, y_err, x[2], x[0], x[
        1], n0, h)
    res = minimize(min_func, [c, l, phi], method='Nelder-Mead')
    c, l, phi = res.x
    chi_sqr_red = res.fun

    # Calc errors on params
    target_chi = chi_sqr_red + 100 / amplitudes.size  # Larger errors on params
    c_dash, l_dash = c, l

    # Functions to target chi = target_chi
    t_func_c = lambda c_prime: abs(target_chi - chi_sqr(
        times, xs, amplitudes, y_err, phi, c_prime, l_dash, n0, h))
    t_func_l = lambda l_prime: abs(target_chi - chi_sqr(
        times, xs, amplitudes, y_err, phi, c_dash, l_prime, n0, h))

    # Functions to minimize chi
    m_func_c = lambda c_prime: chi_sqr(times, xs, amplitudes, y_err, phi,
                                       c_prime, l_dash, n0, h)
    m_func_l = lambda l_prime: chi_sqr(times, xs, amplitudes, y_err, phi,
                                       c_dash, l_prime, n0, h)

    # Find error in c
    c_dash, l_dash = c, l
    c_err = 0
    for it in range(ITERS):
        # find target chi varing c
        c_dash = minimize(t_func_c, c_dash, method='Nelder-Mead').x[0]
        # Calc current c error
        c_c_err = abs(c - c_dash)
        if c_c_err > c_err:
            c_err = c_c_err
        # minimize varying l
        l_dash = minimize(m_func_l, l_dash, method='Nelder-Mead').x[0]
    # End on a target chi
    c_dash = minimize(t_func_c, c_dash, method='Nelder-Mead').x[0]
    c_c_err = abs(c - c_dash)
    if c_c_err > c_err:
        c_err = c_c_err

    # Find error in l
    c_dash, l_dash = c, l
    l_err = 0
    for it in range(ITERS):
        # find target chi varing l
        l_dash = minimize(t_func_l, l_dash, method='Nelder-Mead').x[0]
        # calc current l error
        c_l_err = abs(l_dash - l)
        if c_l_err > l_err:
            l_err = c_l_err
        # minimize varying c
        c_dash = minimize(m_func_c, c_dash, method='Nelder-Mead').x[0]
    # End on a target chi
    l_dash = minimize(t_func_l, l_dash, method='Nelder-Mead').x[0]
    c_l_err = abs(l_dash - l)
    if c_l_err > l_err:
        l_err = c_l_err

    return c, c_err, l, l_err, phi, chi_sqr_red
Example #39
0
    def nelder_mead(self,maxiter=1000,step_tol=.0001,obj_tol=.0001,\
                    log_transform=None,verbose=True,start='default'):
        """
        Optimize model log posterior using Nelder-Mead simplex algorithm.

        :param int maxiter: maximum iterations
        :param float obj_tol: convergence tolerance - objective function value
        :param float step_tol: convergence tolerance - simplex position 
        :param list/NoneType log_transform: list of names of variables for which to apply log transform
        :param bool verbose: print optimization info
        :param string start: how to choose start location (default: current model parameter values, random: random value in parameter space)
        :return: tuple containing: x_opt (optimal parameter values), lp_hist (log posterior history), p_native
        """
    
        # don't want verbose model for optimizer but want to change back after
        was_verbose = False
        if self.model.verbose: 
            self.model.verbose=False
            was_verbose=True
        
        # get parameter indices for transform
        if log_transform:
            self.idx_to_transform.clear()
            i = 0
            for prm in self.model.params.mcmcList:
                for ind in range(int(np.prod(prm.val_shape))):
                    if prm.name in log_transform:
                        self.idx_to_transform.append(i)
                    i+=1
        
        # get parameter bounds for random start
        lb = []
        ub = []
        if start=='random':
            i = 0
            for prm in self.model.params.mcmcList:
                if prm.name == 'logPost': continue
                for ind in range(int(np.prod(prm.val_shape))):
                    arr_ind = np.unravel_index(ind, prm.val_shape, order='F')
                    if i in self.idx_to_transform:
                        lb.append(np.log(prm.prior.bounds[0][arr_ind]) if prm.prior.bounds[0][arr_ind] != 0 else -1)
                        if prm.name=='betaU' or prm.name=='betaV':
                            ub.append(3)
                        else:
                            ub.append(np.log(prm.prior.bounds[1][arr_ind]) if prm.prior.bounds[1][arr_ind] != np.inf else np.log(100000))
                    elif prm.name=='lamUz':
                        lb.append(0)
                        ub.append(5)
                    else:
                        lb.append(prm.prior.bounds[0][arr_ind])
                        ub.append(prm.prior.bounds[1][arr_ind] if prm.prior.bounds[1][arr_ind] != np.inf else 100000)
                    i+=1

        # set initial values
        i = 0
        names = []
        x0 = []
        for prm in self.model.params.mcmcList:
            if prm.name == 'logPost': continue
            for ind in range(int(np.prod(prm.val_shape))):
                arr_ind = np.unravel_index(ind, prm.val_shape, order='F')
                if start=='default':
                    if i in self.idx_to_transform:
                        x0.append(self.log_transform(prm.val[arr_ind]))
                    else:
                        x0.append(prm.val[arr_ind])
                elif start=='random':
                    x0.append(np.random.uniform(lb[i],ub[i]))
                else:
                    raise ValueError('start must be either default, or random')
                names.append(prm.name)
                i+=1
        
        # callback function to store history at each iteration
        lp_hist = []
        param_hist = []
        def callback(x):
            fobj = self.optim_logPost(x)
            lp_hist.append(fobj)
            param_hist.append(x)

        # call optimizer
        x_opt = minimize(self.optim_logPost, x0, method='nelder-mead',callback=callback,
               options={'xatol': step_tol, 'fatol': obj_tol,'disp': True,'maxiter':maxiter, 'adaptive': True})
        
        # get opt params on original scale and return
        p_native = deepcopy(x_opt['x'])
        p_native[self.idx_to_transform] = self.inv_log_transform(p_native[self.idx_to_transform])
        if verbose: 
            print('logPost value:',x_opt['fun'])
            print(pd.DataFrame(data={'param': names, 'init value': x0, 'opt value native': p_native}).to_string(index=False))
        self.verbose=was_verbose
        return x_opt,lp_hist,p_native
    def __call__(
        self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs
    ) -> Tuple[np.ndarray, Optional[np.ndarray]]:
        """
        Applies the :class:`.InverseGAN` defence upon the sample input.

        :param x: Sample input.
        :param y: Labels of the sample `x`. This function does not affect them in any way.
        :return: Defended input.
        """
        batch_size = x.shape[0]
        iteration_count = 0

        if self.inverse_gan is not None:
            logger.info("Encoding x_adv into starting z encoding")
            initial_z_encoding = self.inverse_gan.predict(x)
        else:
            logger.info("Choosing a random starting z encoding")
            initial_z_encoding = np.random.rand(batch_size, self.gan.encoding_length)

        def func_gen_gradients(z_i):
            z_i_reshaped = np.reshape(z_i, [batch_size, self.gan.encoding_length])
            grad = self.estimate_gradient(z_i_reshaped, x)
            grad = np.float64(
                grad
            )  # scipy fortran code seems to expect float64 not 32 https://github.com/scipy/scipy/issues/5832

            return grad.flatten()

        def func_loss(z_i):
            nonlocal iteration_count
            iteration_count += 1
            logging.info("Iteration: %d", iteration_count)
            z_i_reshaped = np.reshape(z_i, [batch_size, self.gan.encoding_length])
            loss = self.compute_loss(z_i_reshaped, x)

            return loss

        options_allowed_keys = [
            "disp",
            "maxcor",
            "ftol",
            "gtol",
            "eps",
            "maxfun",
            "maxiter",
            "iprint",
            "callback",
            "maxls",
        ]

        for key in kwargs:
            if key not in options_allowed_keys:
                raise KeyError(
                    "The argument `{}` in kwargs is not allowed as option for `scipy.optimize.minimize` using "
                    '`method="L-BFGS-B".`'.format(key)
                )

        options = kwargs.copy()
        optimized_z_encoding_flat = minimize(
            func_loss, initial_z_encoding, jac=func_gen_gradients, method="L-BFGS-B", options=options
        )
        optimized_z_encoding = np.reshape(optimized_z_encoding_flat.x, [batch_size, self.gan.encoding_length])
        y = self.gan.predict(optimized_z_encoding)

        return y
Example #41
0
plt.clf()
Z = X, Y = np.mgrid[-1.5:1.5:100j, -1.1:1.1:100j]
# Complete in the additional dimensions with zeros
Z = np.reshape(Z, (2, -1)).copy()
Z.resize((100, Z.shape[-1]))
Z = np.apply_along_axis(f, 0, Z)
Z = np.reshape(Z, X.shape)
plt.imshow(Z.T,
           cmap=plt.cm.gray_r,
           extent=[-1.5, 1.5, -1.1, 1.1],
           origin='lower')
plt.contour(X, Y, Z, cmap=plt.cm.gnuplot)

# A reference but slow solution:
t0 = time.time()
x_ref = optimize.minimize(f, K[0], method="Powell").x
print('     Powell: time %.2fs' % (time.time() - t0))
f_ref = f(x_ref)

# Compare different approaches
t0 = time.time()
x_bfgs = optimize.minimize(f, K[0], method="BFGS").x
print('       BFGS: time %.2fs, x error %.2f, f error %.2f' %
      (time.time() - t0, np.sqrt(np.sum(
          (x_bfgs - x_ref)**2)), f(x_bfgs) - f_ref))

t0 = time.time()
x_l_bfgs = optimize.minimize(f, K[0], method="L-BFGS-B").x
print('     L-BFGS: time %.2fs, x error %.2f, f error %.2f' %
      (time.time() - t0, np.sqrt(np.sum(
          (x_l_bfgs - x_ref)**2)), f(x_l_bfgs) - f_ref))
Example #42
0
def _fit_minimize(f,
                  score,
                  start_params,
                  fargs,
                  kwargs,
                  disp=True,
                  maxiter=100,
                  callback=None,
                  retall=False,
                  full_output=True,
                  hess=None):
    """
    Fit using scipy minimize, where kwarg `min_method` defines the algorithm.

    Parameters
    ----------
    f : function
        Returns negative log likelihood given parameters.
    score : function
        Returns gradient of negative log likelihood with respect to params.
    start_params : array_like, optional
        Initial guess of the solution for the loglikelihood maximization.
        The default is an array of zeros.
    fargs : tuple
        Extra arguments passed to the objective function, i.e.
        objective(x,*args)
    kwargs : dict[str, Any]
        Extra keyword arguments passed to the objective function, i.e.
        objective(x,**kwargs)
    disp : bool
        Set to True to print convergence messages.
    maxiter : int
        The maximum number of iterations to perform.
    callback : callable callback(xk)
        Called after each iteration, as callback(xk), where xk is the
        current parameter vector.
    retall : bool
        Set to True to return list of solutions at each iteration.
        Available in Results object's mle_retvals attribute.
    full_output : bool
        Set to True to have all available output in the Results object's
        mle_retvals attribute. The output is dependent on the solver.
        See LikelihoodModelResults notes section for more information.
    hess : str, optional
        Method for computing the Hessian matrix, if applicable.

    Returns
    -------
    xopt : ndarray
        The solution to the objective function
    retvals : dict, None
        If `full_output` is True then this is a dictionary which holds
        information returned from the solver used. If it is False, this is
        None.
    """
    kwargs.setdefault('min_method', 'BFGS')

    # prepare options dict for minimize
    filter_opts = [
        'extra_fit_funcs', 'niter', 'min_method', 'tol', 'bounds',
        'constraints'
    ]
    options = {k: v for k, v in kwargs.items() if k not in filter_opts}
    options['disp'] = disp
    options['maxiter'] = maxiter

    # Use Hessian/Jacobian only if they're required by the method
    no_hess = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'COBYLA', 'SLSQP']
    no_jac = ['Nelder-Mead', 'Powell', 'COBYLA']
    if kwargs['min_method'] in no_hess:
        hess = None
    if kwargs['min_method'] in no_jac:
        score = None

    # Use bounds/constraints only if they're allowed by the method
    has_bounds = ['L-BFGS-B', 'TNC', 'SLSQP', 'trust-constr']
    has_constraints = ['COBYLA', 'SLSQP', 'trust-constr']

    if 'bounds' in kwargs.keys() and kwargs['min_method'] in has_bounds:
        bounds = kwargs['bounds']
    else:
        bounds = None

    if 'constraints' in kwargs.keys(
    ) and kwargs['min_method'] in has_constraints:
        constraints = kwargs['constraints']
    else:
        constraints = ()

    res = optimize.minimize(f,
                            start_params,
                            args=fargs,
                            method=kwargs['min_method'],
                            jac=score,
                            hess=hess,
                            bounds=bounds,
                            constraints=constraints,
                            callback=callback,
                            options=options)

    xopt = res.x
    retvals = None
    if full_output:
        nit = getattr(res, 'nit', np.nan)  # scipy 0.14 compat
        retvals = {
            'fopt': res.fun,
            'iterations': nit,
            'fcalls': res.nfev,
            'warnflag': res.status,
            'converged': res.success
        }
        if retall:
            retvals.update({'allvecs': res.values()})

    return xopt, retvals
Example #43
0
    return np.log(1 + np.exp(x))


def kl_mvn(Sigma1, Sigma2):  #D_KL(n(Sigma1)||n(Sigma2))
    iSigma2 = np.linalg.inv(Sigma2)
    return 0.5*(-np.log(np.linalg.det(iSigma2@Sigma1)) - Sigma1.shape[0] + \
                np.trace(iSigma2@Sigma1))


def objectiveqp(s, Sigmap):
    s_ = softplus(s)
    Sigmaq = np.diag(s_)
    return kl_mvn(Sigmaq, Sigmap)


optqp = minimize(lambda s: objectiveqp(s, Sigma), [0.1, 0.1])
Sigma2qp = np.diag(softplus(optqp.x))
Zplotqp = fplot(Xplot, Yplot, Sigma2qp)
plt.contour(Xplot,
            Yplot,
            Zplotqp,
            levels=5,
            colors='red',
            label='q(x,y)',
            alpha=0.75)
plt.xlabel('x')
plt.ylabel('y')
plt.savefig('../tex/figs/klil3a')
#%%
plt.figure()
plt.contour(Xplot, Yplot, Zplot, levels=5, colors='blue')
Example #44
0
def run_patch(patchname,
              splinedata=splinedata,
              psfpath=psfpath,
              maxactive=3,
              init_cov=None,
              start=None,
              nstart=25,
              nwarm=250,
              ntune=2500,
              niter=200,
              runtype="sample",
              ntime=10,
              verbose=True,
              rank=0,
              scatter_fluxes=False,
              tag=""):
    """
    This runs in a single CPU process.  It dispatches the 'patch data' to the
    device and then runs a pymc3 HMC sampler.  Each likelihood call within the
    HMC sampler copies the proposed parameter position to the device, runs the
    kernel, and then copies back the result, returning the summed ln-like and
    the gradients thereof to the sampler.

    :param patchname: 
        Full path to the patchdata hdf5 file.
    """

    print(patchname)

    #if tag is not None:
    #    tail = "_{}_result"

    resultname = tag + "-" + os.path.basename(patchname).replace(
        ".h5", "_result")
    resultname = pjoin(path_to_results, resultname)

    print("Rank {} writing to {}".format(rank, resultname))

    # --- Prepare Patch Data ---
    use_bands = slice(None)
    stamps, scene = patch_conversion(patchname,
                                     splinedata,
                                     psfpath,
                                     nradii=9,
                                     use_bands=use_bands)
    miniscene = set_inactive(scene, [stamps[0], stamps[-1]], nmax=maxactive)
    pra = np.median([s.ra for s in miniscene.sources])
    pdec = np.median([s.dec for s in miniscene.sources])
    zerocoords(stamps, miniscene, sky_zero=np.array([pra, pdec]))

    if scatter_fluxes:
        for s in miniscene.sources:
            s.flux += np.arange(len(s.filternames)) * 0.1

    patch = Patch(stamps=stamps, miniscene=miniscene, return_residual=True)
    p0 = miniscene.get_all_source_params().copy()

    # --- Copy patch data to device ---
    gpu_patch = patch.send_to_gpu()
    gpu_proposer = Proposer(patch)

    # --- Instantiate the ln-likelihood object ---
    # This object splits the lnlike_function into two, since that computes
    # both lnp and lnp_grad, and we need to wrap them in separate theano ops.
    model = GPUPosterior(gpu_proposer,
                         miniscene,
                         name=patchname,
                         verbose=verbose)

    # --- Subtract off the fixed sources ---
    # TODO

    if runtype == "sample":
        # -- Launch HMC ---
        # wrap the loglike and grad in theano tensor ops
        model.proposer.patch.return_residuals = False
        logl = LogLikeWithGrad(model)
        # Get upper and lower bounds for variables
        lower, upper = prior_bounds(miniscene)
        print(lower.dtype, upper.dtype)
        pnames = miniscene.parameter_names
        start = dict(zip(pnames, p0))

        # Define the windows used to tune the mass matrix.
        nwindow = nstart * 2**np.arange(
            np.floor(np.log2((n_tune - n_burn) / n_start)))
        nwindow = np.append(nwindow, ntune - nwarm - np.sum(nwindow))
        nwindow = nwindow.astype(int)

        # The pm.sample() method below will draw an initial theta,
        # then call logl.perform and logl.grad multiple times
        # in a loop with different theta values.
        t = time()
        with pm.Model() as opmodel:

            # Set priors for each element of theta.
            z0 = [
                pm.Uniform(p, lower=l, upper=u)
                for p, l, u in zip(pnames, lower, upper)
            ]
            theta = tt.as_tensor_variable(z0)

            # Instantiate target density.
            pm.DensityDist('likelihood',
                           lambda v: logl(v),
                           observed={'v': theta})

            # Tune mass matrix.
            start = None
            burnin = None
            for steps in nwindow:
                step = get_step_for_trace(init_cov=init_cov, trace=burnin)
                burnin = pm.sample(start=start,
                                   tune=steps,
                                   draws=2,
                                   step=step,
                                   compute_convergence_checks=False,
                                   discard_tuned_samples=False)
                start = [t[-1] for t in burnin._straces.values()]
            step = get_step_for_trace(init_cov=init_cov, trace=burnin)
            tm = time() - t

            # Sample with tuned mass matrix.
            trace = pm.sample(draws=niter,
                              tune=nwarm,
                              step=step,
                              start=start,
                              progressbar=False,
                              cores=1,
                              discard_tuned_samples=True)
        ts = time() - t

        # yuck.
        chain = np.array([trace.get_values(n) for n in pnames]).T

        result = Result()
        result.ndim = len(p0)
        result.nactive = miniscene.nactive
        result.nbands = patch.n_bands
        result.nexp = patch.n_exp
        result.pinitial = p0.copy()
        result.chain = chain
        result.ncall = np.copy(model.ncall)
        result.wall_time = ts
        #result.scene = miniscene
        result.lower = lower
        result.upper = upper
        result.patchname = patchname
        result.sky_reference = (pra, pdec)
        result.parameter_names = pnames

        #last = chain[:, -1]
        #model.proposer.patch.return_residuals = True
        #result.residuals = model.residuals(last)

        save_results(result, resultname)

    elif runtype == "optimize":
        # --- Launch an optimization ---
        opts = {
            'ftol': 1e-6,
            'gtol': 1e-6,
            'factr': 10.,
            'disp': False,
            'iprint': 1,
            'maxcor': 20
        }
        theta0 = p0
        t = time()
        scires = minimize(model.nll,
                          theta0,
                          jac=True,
                          method='BFGS',
                          options=opts,
                          bounds=None)
        ts = time() - t
        chain = scires

    elif runtype == "timing":
        # --- Time a single call ---
        model.proposer.patch.return_residuals = False
        t = time()
        for i in range(ntime):
            model.evaluate(p0)
            print(model._lnp)
            print(model._lnp_grad)
        ts = time() - t
        chain = [model._lnp, model._lnp_grad]
        print("took {}s for a single call".format(ts / ntime))

    return chain, (rank, model.ncall, ts)
# Defining the optimization problem and settings
cons = ({
    'type': 'ineq',
    'fun': lambda x: Vmax[0] - np.dot(x[:], len_elements.T)[0]
})  # Make sure that only 1D array comes out
bnds = tuple([(.1, 20)] * A.shape[1])
arguments = (GDof, elementNodes, nodeCoordinates, index_elem, E, prescribedDof,
             force, False)
options = {'gtol': 1e-5, 'disp': True, 'maxiter': 50}

res = minimize(optim_fun,
               A,
               args=arguments,
               method='SLSQP',
               jac=None,
               hess=None,
               hessp=None,
               bounds=bnds,
               constraints=cons,
               tol=None,
               callback=None,
               options=options)
AreaOpt = res.x
Maxiter = res.nit

# =============================================================================
# boundary conditions and solution
# =============================================================================

stiffness= FEM.formStiffness2Dtruss(AreaOpt,GDof,elementNodes, nodeCoordinates, \
                                    index_elem, E)
    def findSurrogateMinimum(cobra, surrogateModels, bestPredictor):
        xStarts = computeStartPoints(cobra)
        cons = []
        cons.append({'type': 'ineq', 'fun': gCOBRA})

        for factor in range(len(cobra['lower'])):
            lower = cobra['lower'][factor]
            l = {
                'type': 'ineq',
                'fun': lambda x, lb=lower, i=factor: x[i] - lb
            }
            cons.append(l)
        for factor in range(len(cobra['upper'])):
            upper = cobra['upper'][factor]
            u = {
                'type': 'ineq',
                'fun': lambda x, ub=upper, i=factor: ub - x[i]
            }
            cons.append(u)

        submins = []
        besti = 0
        bestFun = 0
        success = []

        for i in range(len(xStarts)):
            xStart = xStarts[i]
            opts = {'maxiter': cobra['seqFeval'], 'tol': cobra['seqTol']}
            subMin = optimize.minimize(subSMSProb2,
                                       xStart,
                                       constraints=cons,
                                       options=opts,
                                       method='COBYLA')
            submins.append(subMin)
            success.append(subMin['success'])
            if subMin['fun'] < bestFun and subMin['success']:
                bestFun = subMin['fun']
                besti = i

        if all(success):
            minRequiredEvaluations = (cobra['dimension'] +
                                      cobra['nConstraints'] +
                                      cobra['nObj']) * 20
            adjustedAmountEvaluations = int(cobra['seqFeval'] * 0.9)
            cobra['seqFeval'] = max(adjustedAmountEvaluations,
                                    minRequiredEvaluations)

            maxStartingPoints = (cobra['dimension'] + cobra['nConstraints'] +
                                 cobra['nObj']) * 10
            adjustedAmountPoints = int(cobra['computeStartingPoints'] * 1.1)
            cobra['computeStartingPoints'] = min(maxStartingPoints,
                                                 adjustedAmountPoints)
        else:
            maxRequiredEvaluations = (cobra['dimension'] +
                                      cobra['nConstraints'] +
                                      cobra['nObj']) * 1000
            adjustedAmountEvaluations = int(cobra['seqFeval'] * 1.1)
            cobra['seqFeval'] = min(adjustedAmountEvaluations,
                                    maxRequiredEvaluations)

            minRequiredPoints = 2 * (cobra['dimension'] +
                                     cobra['nConstraints'] + cobra['nObj'])
            adjustedAmountPoints = int(cobra['computeStartingPoints'] * 0.9)
            cobra['computeStartingPoints'] = max(adjustedAmountPoints,
                                                 minRequiredPoints)

        if not any(success):
            print('NO SUCCESS', cobra['computeStartingPoints'],
                  cobra['seqFeval'])
            smallest_constr = np.finfo(np.float64).max
            bestObj = np.finfo(np.float64).max
            besti = 0
            i = 0
            for subMin in submins:
                if subMin['maxcv'] < smallest_constr or (
                        subMin['maxcv'] <= smallest_constr
                        and subMin['fun'] < bestObj):
                    smallest_constr = subMin['maxcv']
                    bestObj = subMin['fun']
                    besti = i
                i += 1

        subMin = submins[besti]
        xNew = subMin['x']
        xNew = np.maximum(xNew, cobra['lower'])
        xNew = np.minimum(xNew, cobra['upper'])

        cobra['optimizerConvergence'] = np.append(
            cobra['optimizerConvergence'], subMin['status'])
        return xNew
Example #47
0
def fitModel(data):
    args = (data)
    x0 = [1., .2, 0.1]
    res = minimize(hubbertModel_Fit, x0, args=args)
    return res
Example #48
0
de.drop(['Unnamed: 0', 'student', 'income'], axis=1, inplace=True)
de['default'].replace(to_replace=['No', 'Yes'], value=[0, 1], inplace=True)
df['default'].replace(to_replace=['No', 'Yes'], value=[0, 1], inplace=True)
df['student'].replace(to_replace=['No', 'Yes'], value=[0, 1], inplace=True)
observ = df.drop('default', axis=1).values
respon = df['default'].values.reshape(-1, 1)

# LogisticReg = sm.GLM(endog=de['default'], exog=sm.add_constant(de['balance']), family=sm.families.Binomial()).fit()


def log_likelihood(par):
    xb = par[0] + par[1] * de['balance']
    return -np.sum((de['default'] * xb) - np.log(1 + np.exp(xb)))


MLE = minimize(log_likelihood, x0=np.array([0, 0]), method='Nelder-Mead')
print(MLE)

# Bernoulli
values = np.array([1, 1, 1, 0, 0])


def bernloglik(par):
    return -np.sum(values * np.log(par) + (1 - values) * np.log(1 - par))


MLE_2 = minimize(bernloglik, x0=np.array([0]), method='Nelder-Mead')

parvals = np.linspace(0, 1, 20)
funcvals = list(map(lambda x: -bernloglik(x), parvals))
Example #49
0
    def __call__(self, input_or_adv, label=None, unpack=True):
        """Uses SLSQP to minimize the distance between the input and the
        adversarial under the constraint that the input is adversarial.

        Parameters
        ----------
        input_or_adv : `numpy.ndarray` or :class:`Adversarial`
            The original, correctly classified input. If it is a
            numpy array, label must be passed as well. If it is
            an :class:`Adversarial` instance, label must not be passed.
        label : int
            The reference label of the original input. Must be passed
            if input is a numpy array, must not be passed if input is
            an :class:`Adversarial` instance.
        unpack : bool
            If true, returns the adversarial input, otherwise returns
            the Adversarial object.

        """

        a = input_or_adv
        del input_or_adv
        del label
        del unpack

        x = a.unperturbed
        dtype = a.unperturbed.dtype
        min_, max_ = a.bounds()

        # flatten the input (and remember the shape)
        shape = x.shape
        n = x.size
        x = x.flatten()

        x0 = nprng.uniform(min_, max_, size=x.shape)
        bounds = [(min_, max_)] * n
        options = {"maxiter": 500}

        def fun(x, *args):
            """Objective function with derivative"""
            distance = a.normalized_distance(x.reshape(shape))
            return distance.value, distance.gradient.reshape(-1)

        def eq_constraint(x, *args):
            """Equality constraint"""
            _, is_adv = a.forward_one(x.reshape(shape).astype(dtype))
            if is_adv:
                return 0.0
            else:
                return 1.0

        constraints = [{"type": "eq", "fun": eq_constraint}]

        result = so.minimize(
            fun,
            x0,
            method="SLSQP",
            jac=True,
            bounds=bounds,
            constraints=constraints,
            options=options,
        )

        a.forward_one(result.x.reshape(shape).astype(dtype))
Example #50
0
    def run_minimize(self):
        """
        Here the actual minimization of the cost_function is done via
        scipy.optimize.minimize.
        First, data from the glacier directory is read and optionally a
        DataLogger is created. The inversion settings used for this
        particular inversion are saved in this subdirectory. Bounds for the
        minimization are derived. Then the cost function is created and the
        minimization of this cost function started. In the end, the result is
        written to disk and optionally, further information is written to disk.

        The whole process is dominated by the set inversion settings

        Returns
        -------
        Result of minimization as scipy.optimize.minimize returns (res.x
        gives flattened ndarray with bed, needs to be reshaped)

        """

        # Copy first_guessed_bed to inversion directory
        if self.inv_settings['log_minimize_steps']:
            # TODO: really useful? -> respect reset argument in gdir?
            self.clear_dir(self.get_current_basedir())

        with rasterio.open(self.gdir.get_filepath('first_guessed_bed')) as src:
            profile = src.profile
            data = src.read(1)
        with rasterio.open(self.get_subdir_filepath('first_guessed_bed'),
                           'w', **profile) as dst:
            dst.write(data, 1)
        if os.path.exists(self.gdir.get_filepath('first_guessed_bed_noise')):
            shutil.copy(self.gdir.get_filepath('first_guessed_bed_noise'),
                        self.get_subdir_filepath('first_guessed_bed_noise'))

        write_pickle(self.inv_settings,
                     self.get_subdir_filepath('inversion_settings'))
        # Write out reg_parameters to check easier later on
        self.write_string_to_file(self.get_subdir_filepath('reg_parameters'),
                                  str(self.inv_settings['reg_parameters']))
        self.inv_settings = load_pickle(
            self.get_subdir_filepath('inversion_settings'))
        self._read_all_data()
        self.minimize_log = ''
        self.data_logger = None
        callback = None

        if self.inv_settings['log_minimize_steps']:
            dl = DataLogger(self)
            self.data_logger = dl
            callback = self.iteration_info_callback

        # ----------------------------------------------------------------------
        # Core: things are happening here:
        bounds = self.get_bounds()

        self.cost_func = create_cost_func(self.gdir, self.data_logger,
                                          self.surf_noise,
                                          self.bed_measurements)
        res = minimize(fun=self.cost_func,
                       x0=self.first_guessed_bed.astype(np.float64).flatten(),
                       method=self.inv_settings['solver'], jac=True,
                       bounds=bounds,
                       options=self.inv_settings['minimize_options'],
                       callback=callback)

        inverted_bed = res.x.reshape(self.first_guessed_bed.shape)
        # ----------------------------------------------------------------------

        profile['dtype'] = 'float64'
        with rasterio.open(self.get_subdir_filepath('inverted_bed'),
                           'w', **profile) as dst:
            dst.write(inverted_bed, 1)

        if self.inv_settings['log_minimize_steps']:
            self.write_string_to_file('log.txt', self.minimize_log)
            dir = self.get_current_basedir()
            dl.filter_data_from_optimization()  # Optional, if we want to
            data_logging.write_pickle(dl,
                                      self.get_subdir_filepath('data_logger'))
            dl.plot_all(dir)
            plt.close('all')

        return res
Example #51
0
    def train(self, disp=True):
        """
        Train SVR model

        Args:
           disp (bool): Display process or not. Default to True.

        Returns:
            None
        """
        if self.svrinfo.nepsi == 0 and self.svrinfo.nc == 0 and self.svrinfo.nlens == 0:
            if disp:
                print("Construct SVR without tuning parameters.")
            self.svrinfo.optimizer = None
            xparamopt = [
                self.svrinfo.theta, self.svrinfo.epsilon, self.svrinfo.c,
                self.svrinfo.wgk
            ]

            if self.svrinfo.errtype == 'L2':
                self.svrinfo.errloo, self.svrinfo.mu, self.svrinfo.alpha, self.svrinfo.epsilon, \
                    self.svrinfo.theta, self.svrinfo.c, self.svrinfo.wgk = l2svr(xparamopt, self.svrinfo, return_all=True)
            else:
                raise NotImplementedError(
                    'Other options are not yet available')

        else:
            xhyp0_norm = sobol_points(self.svrinfo.nrestart + 1,
                                      len(self.svrinfo.lbhyp))
            xhyp0 = realval(np.array(self.svrinfo.lbhyp),
                            np.array(self.svrinfo.ubhyp), xhyp0_norm[1:, :])
            optimbound = np.transpose(
                np.vstack((self.svrinfo.lbhyp, self.svrinfo.ubhyp)))

            bestxcand = np.zeros(np.shape(xhyp0))
            errloocand = np.zeros(shape=[self.svrinfo.nrestart])
            for ii in range(self.svrinfo.nrestart):
                xhyp0_ii = xhyp0[ii, :]

                if self.svrinfo.optimizer == 'lbfgsb':
                    res = minimize(l2svr,
                                   xhyp0_ii,
                                   method='L-BFGS-B',
                                   options={
                                       'eps': 1e-03,
                                       'disp': False
                                   },
                                   bounds=optimbound,
                                   args=(self.svrinfo, False))
                    bestxcand_ii = res.x
                    errloocand_ii = res.fun
                elif self.svrinfo.optimizer == 'diff_evo':
                    res = differential_evolution(l2svr,
                                                 optimbound,
                                                 args=(self.svrinfo, False))
                    bestxcand_ii = res.x
                    errloocand_ii = res.fun
                else:
                    raise NotImplementedError(
                        'Other optimizers are not yet implemented')

                bestxcand[ii, :] = bestxcand_ii
                errloocand[ii] = errloocand_ii

            I = np.argmin(errloocand)
            xparamopt = bestxcand[I, :]

            if disp:
                print("Train hyperparam finished.")
                print(f"Best hyperparameter is {xparamopt}")
                print(f"With Error LOO of {errloocand[I]}")

            self.svrinfo.errloo, self.svrinfo.mu, self.svrinfo.alpha, self.svrinfo.epsilon, \
                self.svrinfo.theta, self.svrinfo.c, self.svrinfo.wgk = l2svr(xparamopt, self.svrinfo, return_all=True)
Example #52
0
def estimate_gaze_from_landmarks(iris_landmarks,
                                 iris_centre,
                                 eyeball_centre,
                                 eyeball_radius,
                                 initial_gaze=None):
    """Given iris edge landmarks and other coordinates, estimate gaze direction.

    More correctly stated, estimate gaze from iris edge landmark coordinates, iris centre
    coordinates, eyeball centre coordinates, and eyeball radius in pixels.
    """
    e_x0, e_y0 = eyeball_centre
    i_x0, i_y0 = iris_centre

    if initial_gaze is not None:
        theta, phi = initial_gaze
        # theta = -theta
    else:
        theta = np.arcsin(np.clip((i_y0 - e_y0) / eyeball_radius, -1.0, 1.0))
        phi = np.arcsin(
            np.clip((i_x0 - e_x0) / (eyeball_radius * -np.cos(theta)), -1.0,
                    1.0))

    delta = 0.1 * np.pi
    if iris_landmarks[0, 0] < iris_landmarks[4, 0]:  # flipped
        alphas = np.flip(np.arange(0.0, 2.0 * np.pi, step=np.pi / 4.0), axis=0)
    else:
        alphas = np.arange(-np.pi, np.pi, step=np.pi / 4.0) + np.pi / 4.0
    sin_alphas = np.sin(alphas)
    cos_alphas = np.cos(alphas)

    def gaze_fit_loss_func(inputs):
        theta, phi, delta, phase = inputs
        sin_phase = np.sin(phase)
        cos_phase = np.cos(phase)
        # sin_alphas_shifted = np.sin(alphas + phase)
        sin_alphas_shifted = sin_alphas * cos_phase + cos_alphas * sin_phase
        # cos_alphas_shifted = np.cos(alphas + phase)
        cos_alphas_shifted = cos_alphas * cos_phase - sin_alphas * sin_phase

        sin_theta = np.sin(theta)
        cos_theta = np.cos(theta)
        sin_phi = np.sin(phi)
        cos_phi = np.cos(phi)
        sin_delta_sin = np.sin(delta * sin_alphas_shifted)
        sin_delta_cos = np.sin(delta * cos_alphas_shifted)
        cos_delta_sin = np.cos(delta * sin_alphas_shifted)
        cos_delta_cos = np.cos(delta * cos_alphas_shifted)
        # x = -np.cos(theta + delta * sin_alphas_shifted)
        x1 = -cos_theta * cos_delta_sin + sin_theta * sin_delta_sin
        # x *= np.sin(phi + delta * cos_alphas_shifted)
        x2 = sin_phi * cos_delta_cos + cos_phi * sin_delta_cos
        x = x1 * x2
        # y = np.sin(theta + delta * sin_alphas_shifted)
        y1 = sin_theta * cos_delta_sin
        y2 = cos_theta * sin_delta_sin
        y = y1 + y2

        ix = e_x0 + eyeball_radius * x
        iy = e_y0 + eyeball_radius * y
        dx = ix - iris_landmarks[:, 0]
        dy = iy - iris_landmarks[:, 1]
        out = np.mean(dx**2 + dy**2)

        # In addition, match estimated and actual iris centre
        iris_dx = e_x0 + eyeball_radius * -cos_theta * sin_phi - i_x0
        iris_dy = e_y0 + eyeball_radius * sin_theta - i_y0
        out += iris_dx**2 + iris_dy**2

        # sin_alphas_shifted = sin_alphas * cos_phase + cos_alphas * sin_phase
        # cos_alphas_shifted = cos_alphas * cos_phase - sin_alphas * sin_phase
        dsin_alphas_shifted_dphase = -sin_alphas * sin_phase + cos_alphas * cos_phase
        dcos_alphas_shifted_dphase = -cos_alphas * sin_phase - sin_alphas * cos_phase

        # sin_delta_sin = np.sin(delta * sin_alphas_shifted)
        # sin_delta_cos = np.sin(delta * cos_alphas_shifted)
        # cos_delta_sin = np.cos(delta * sin_alphas_shifted)
        # cos_delta_cos = np.cos(delta * cos_alphas_shifted)
        dsin_delta_sin_ddelta = cos_delta_sin * sin_alphas_shifted
        dsin_delta_cos_ddelta = cos_delta_cos * cos_alphas_shifted
        dcos_delta_sin_ddelta = -sin_delta_sin * sin_alphas_shifted
        dcos_delta_cos_ddelta = -sin_delta_cos * cos_alphas_shifted
        dsin_delta_sin_dphase = cos_delta_sin * delta * dsin_alphas_shifted_dphase
        dsin_delta_cos_dphase = cos_delta_cos * delta * dcos_alphas_shifted_dphase
        dcos_delta_sin_dphase = -sin_delta_sin * delta * dsin_alphas_shifted_dphase
        dcos_delta_cos_dphase = -sin_delta_cos * delta * dcos_alphas_shifted_dphase

        # x1 = -cos_theta * cos_delta_sin + sin_theta * sin_delta_sin
        # x2 = sin_phi * cos_delta_cos + cos_phi * sin_delta_cos
        dx1_dtheta = sin_theta * cos_delta_sin + cos_theta * sin_delta_sin
        dx2_dtheta = 0.0
        dx1_dphi = 0.0
        dx2_dphi = cos_phi * cos_delta_cos - sin_phi * sin_delta_cos
        dx1_ddelta = -cos_theta * dcos_delta_sin_ddelta + sin_theta * dsin_delta_sin_ddelta
        dx2_ddelta = sin_phi * dcos_delta_cos_ddelta + cos_phi * dsin_delta_cos_ddelta
        dx1_dphase = -cos_theta * dcos_delta_sin_dphase + sin_theta * dsin_delta_sin_dphase
        dx2_dphase = sin_phi * dcos_delta_cos_dphase + cos_phi * dsin_delta_cos_dphase

        # y1 = sin_theta * cos_delta_sin
        # y2 = cos_theta * sin_delta_sin
        dy1_dtheta = cos_theta * cos_delta_sin
        dy2_dtheta = -sin_theta * sin_delta_sin
        dy1_dphi = 0.0
        dy2_dphi = 0.0
        dy1_ddelta = sin_theta * dcos_delta_sin_ddelta
        dy2_ddelta = cos_theta * dsin_delta_sin_ddelta
        dy1_dphase = sin_theta * dcos_delta_sin_dphase
        dy2_dphase = cos_theta * dsin_delta_sin_dphase

        # x = x1 * x2
        # y = y1 + y2
        dx_dtheta = dx1_dtheta * x2 + x1 * dx2_dtheta
        dx_dphi = dx1_dphi * x2 + x1 * dx2_dphi
        dx_ddelta = dx1_ddelta * x2 + x1 * dx2_ddelta
        dx_dphase = dx1_dphase * x2 + x1 * dx2_dphase
        dy_dtheta = dy1_dtheta + dy2_dtheta
        dy_dphi = dy1_dphi + dy2_dphi
        dy_ddelta = dy1_ddelta + dy2_ddelta
        dy_dphase = dy1_dphase + dy2_dphase

        # ix = w_2 + eyeball_radius * x
        # iy = h_2 + eyeball_radius * y
        dix_dtheta = eyeball_radius * dx_dtheta
        dix_dphi = eyeball_radius * dx_dphi
        dix_ddelta = eyeball_radius * dx_ddelta
        dix_dphase = eyeball_radius * dx_dphase
        diy_dtheta = eyeball_radius * dy_dtheta
        diy_dphi = eyeball_radius * dy_dphi
        diy_ddelta = eyeball_radius * dy_ddelta
        diy_dphase = eyeball_radius * dy_dphase

        # dx = ix - iris_landmarks[:, 0]
        # dy = iy - iris_landmarks[:, 1]
        ddx_dtheta = dix_dtheta
        ddx_dphi = dix_dphi
        ddx_ddelta = dix_ddelta
        ddx_dphase = dix_dphase
        ddy_dtheta = diy_dtheta
        ddy_dphi = diy_dphi
        ddy_ddelta = diy_ddelta
        ddy_dphase = diy_dphase

        # out = dx ** 2 + dy ** 2
        dout_dtheta = np.mean(2 * (dx * ddx_dtheta + dy * ddy_dtheta))
        dout_dphi = np.mean(2 * (dx * ddx_dphi + dy * ddy_dphi))
        dout_ddelta = np.mean(2 * (dx * ddx_ddelta + dy * ddy_ddelta))
        dout_dphase = np.mean(2 * (dx * ddx_dphase + dy * ddy_dphase))

        # iris_dx = e_x0 + eyeball_radius * -cos_theta * sin_phi - i_x0
        # iris_dy = e_y0 + eyeball_radius * sin_theta - i_y0
        # out += iris_dx ** 2 + iris_dy ** 2
        dout_dtheta += 2 * eyeball_radius * (sin_theta * sin_phi * iris_dx +
                                             cos_theta * iris_dy)
        dout_dphi += 2 * eyeball_radius * (-cos_theta * cos_phi * iris_dx)

        return out, np.array(
            [dout_dtheta, dout_dphi, dout_ddelta, dout_dphase])

    phase = 0.02
    result = minimize(
        gaze_fit_loss_func,
        x0=[theta, phi, delta, phase],
        bounds=(
            (-0.4 * np.pi, 0.4 * np.pi),
            (-0.4 * np.pi, 0.4 * np.pi),
            (0.01 * np.pi, 0.5 * np.pi),
            (-np.pi, np.pi),
        ),
        jac=True,
        tol=1e-6,
        method='TNC',
        options={
            # 'disp': True,
            'gtol': 1e-6,
            'maxiter': 100,
        })
    if result.success:
        theta, phi, delta, phase = result.x

    return np.array([-theta, phi])
Example #53
0
zahyou = np.array([[-6,3]])
f2 = lam(zahyou, P[14].reshape(1,12), Es)

def g(args):
    """
    最適化関数
    :param args: 行列を行ベクトルに崩したもの
    :return: f2 に args を叩き込んだもの
    """
    arr1 = args[0:6].reshape(3,2)  # E' variables ( E'[0] = this[0:dim+1,0] * (E:E0) )
    arr2 = args[6:8].reshape(2,1)  # R  variables ( ignore )
    return f2(arr1,arr2)

init = np.array([1,0,0,1,0,0,0,0])
res = opt.minimize(g, init, method='L-BFGS-B')
print(res)

print("original x: "+str(Xs[14]))
print("original y: "+str(Ys[14]))
temp1 = Es[0].copy() ; temp2 = Es[1].copy()
p_iii = P[14].reshape(1,12) - (zahyou[0,0]*Es[0,:].reshape(1,12) + zahyou[0,1]*Es[1,:].reshape(1,12))
p_ii = p_iii / np.linalg.norm(p_iii)

Es[0] = res.x[0] * temp1 + res.x[2] * temp2 + res.x[4] * p_ii
Es[1] = res.x[1] * temp1 + res.x[3] * temp2 + res.x[5] * p_ii

update_points()
print("updated x: "+str(Xs[14]))
print("updated y: "+str(Ys[14]))
Example #54
0
            y[i] = 0
    p = h.argmax(axis=1)
    accuracy = np.sum(p == y) / m
    return accuracy


#def main():
filename = "ex4data1.mat"
x, y = getData(filename)
input_layer_size = 400
hidden_layer_size = 25
num_labels = 10
lam = 1
Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size)
Theta2 = randInitializeWeights(hidden_layer_size, num_labels)
nn_params = unrollingParams(Theta1, Theta2)
#J, grad = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, x, y, lam)
#print(J, grad.shape)

result = op.minimize(fun=nnCostFunction,
                     x0=nn_params,
                     args=(input_layer_size, hidden_layer_size, num_labels, x,
                           y, lam),
                     method='TNC',
                     jac=gradient)

Theta1, Theta2 = rollingParams(result.x, input_layer_size, hidden_layer_size,
                               num_labels)
accuracy = predictAccuracy(Theta1, Theta2, x, y)
print("Training Set Accuracy: {:%}".format(accuracy))
Example #55
0
def compute_response_one_mecha(x, type_opti, Green_RW):

    keys_mechanism = ['stf', 'zsource', 'f0', 'M0', 'M', 'phi', 'mt']

    mecha = x
    station = mecha['station_tab'][0]
    rtab = np.array([station['xs'] / 1000.])
    phitab = np.array([0.])
    type, unknown, mode_max, dimension_seismic = 'RW', 'v', -1, 3
    err = mecha['FPUC']
    errdepth = mecha['ERDEP'] * 1000.

    ## Setup perturbed mechanisms range
    mw = mecha['MAG']
    if (mw < 4.):
        mw = (2. / 3.) * mecha['MAG'] + 1.15

    ## Setup a baseline mechanism
    mechanism = {}
    for key in keys_mechanism:
        mechanism[key] = mecha[key]

    mt = mechanism['mt']
    if not type_opti in ['min', 'max']:

        strike = mecha['STRIKE']
        if type_opti == 'left_strike_slip':
            dip, rake = 90., 0.
        elif type_opti == 'right_strike_slip':
            dip, rake = 90., 180.
        elif type_opti == 'normal':
            dip, rake = 45., -90.
        elif type_opti == 'reverse':
            dip, rake = 45., 90
        else:
            sys.exit('Fault type not recognized: ' + type_opti)

    else:
        Green_RW.update_mechanism(mechanism)
        bounds = Bounds([
            0. - mecha['STRIKE'], 0. - mecha['DIP'], -180.0 - mecha['RAKE']
        ], [360. - mecha['STRIKE'], 90. - mecha['DIP'], 180.0 - mecha['RAKE']])

        ## Solve minimization problem
        x0 = np.array([0., 0., 0.])  # Initial condition

        def constraint(x, err):
            return err - np.sum(np.abs(x))

        res = minimize(Green_RW.response_perturbed_solution,
                       x0=x0,
                       method="COBYLA",
                       constraints=({
                           "fun": constraint,
                           "type": "ineq",
                           'args': (err, )
                       }),
                       args=(rtab, phitab, type, unknown, mode_max,
                             dimension_seismic, type_opti),
                       bounds=bounds)

        ## Compute a mechanism input to change the error simulation
        mechanism = Green_RW.get_mechanism()
        strike0, dip0, rake0 = mt.both_strike_dip_rake()[0]
        strike, dip, rake = strike0 + res['x'][0], dip0 + res['x'][
            1], rake0 + res['x'][2]

    m0 = mt.scalar_moment()
    mt = mtm.MomentTensor(strike=strike, dip=dip, rake=rake, scalar_moment=m0)
    mechanism['M'] = mt.m6_up_south_east()
    mechanism['M'] /= 1.e15
    mechanism['mt'] = mt

    ## Change depth
    add = errdepth if type_opti == 'min' else -1 * errdepth
    mechanism['zsource'] += add

    ## Update current dataframe row
    for key in ['zsource', 'M', 'mt']:
        x[key] = mechanism[key]

    x['perturbation'] = True

    return x
Example #56
0
#print np.dot(X[0],np.transpose(initial_theta))
# Compute and display initial cost and gradient
cost = costFunction(initial_theta, X, y)
print 'Cost at initial theta (zeros): %f' % cost

grad = gradientFunction(initial_theta, X, y)
print 'Gradient at initial theta (zeros): ' + str(grad)

raw_input("Program paused. Press Enter to continue...")

# ============= Part 3: Optimizing using scipy  =============
res = minimize(costFunction,
               initial_theta,
               method='TNC',
               jac=False,
               args=(X, y),
               options={
                   'gtol': 1e-3,
                   'disp': True,
                   'maxiter': 400
               })

theta = res.x
cost = res.fun

# Print theta to screen
print 'Cost at theta found by scipy: %f' % cost
print 'theta:', ["%0.4f" % i for i in theta]

# Plot Boundary
plotDecisionBoundary(theta, X, y)
raw_load_disp_data = np.genfromtxt(exp_filename, delimiter=',')
load = raw_load_disp_data[:, 0]
displacement = raw_load_disp_data[:, 1]

max_displacement = np.amax(displacement)
interpolated_disp = np.linspace(0, max_displacement, num=51)
interpolated_load = np.interp(interpolated_disp, displacement, load)
scaled_displacement = interpolated_disp / (inputs.indenter_radius)
scaled_load = interpolated_load / (inputs.indenter_radius**2)

exp_disp_load = np.array((scaled_displacement, scaled_load)).T

hist_file = open('./results/history.txt', 'wt')
hist_file.write('sum of squares of residuals, yeild_stress, K, n\n')
hist_file.close()

algorithm = 'Nelder-Mead'

optimisation_result = optimize.minimize(calc_sum_of_squares,
                                        material_variables,
                                        args=(exp_disp_load, ),
                                        tol=0.005,
                                        method=algorithm)
optimised_material_properties = optimisation_result.x
best_S = optimisation_result.fun

np.savetxt('./results/optimised_material_properties.csv',
           np.array([optimised_material_properties]),
           delimiter=',')
Example #58
0
def _principal_component_analysis(x1, x2, bin_size=250):
    '''
    Performs a modified principal component analysis (PCA) 
    [Eckert et. al 2016] on two variables (`x1`, `x2`). The additional
    PCA is performed in 5 steps:
    1) Transform `x1` & `x2` into the principal component domain and shift
       the y-axis so that all values are positive and non-zero
    2) Fit the `x1` data in the transformed reference frame with an 
       inverse Gaussian Distribution
    3) Bin the transformed data into groups of size bin and find the 
       mean of `x1`, the mean of `x2`, and the standard deviation of `x2`
    4) Perform a first-order linear regression to determine a continuous
       the function relating the mean of the `x1` bins to mean of the `x2` bins
    5) Find a second-order polynomial which best relates the means of 
       `x1` to the standard deviation of `x2` using constrained optimization
         
    Eckert-Gallup, A. C., Sallaberry, C. J., Dallman, A. R., & 
    Neary, V. S. (2016). Application of principal component 
    analysis (PCA) and improved joint probability distributions to 
    the inverse first-order reliability method (I-FORM) for predicting 
    extreme sea states. Ocean Engineering, 112, 307-319.

    Parameters
    ----------
    x1: numpy array 
        Component 1 data
    x2: numpy array 
        Component 2 data        
    bin_size : int
        Number of data points in each bin 
        
    Returns
    -------
    PCA: dict 
       Keys:
       -----       
       'principal_axes': sign corrected PCA axes 
       'shift'         : The shift applied to x2 
       'x1_fit'        : gaussian fit of x1 data
       'mu_param'      : fit to _mu_fcn
       'sigma_param'   : fit to _sig_fits            
    '''
    assert isinstance(x1, np.ndarray), 'x1 must be of type np.ndarray'
    assert isinstance(x2, np.ndarray), 'x2 must be of type np.ndarray'
    assert isinstance(bin_size, int), 'bin_size must be of type int'
    # Step 0: Perform Standard PCA
    mean_location = 0
    x1_mean_centered = x1 - x1.mean(axis=0)
    x2_mean_centered = x2 - x2.mean(axis=0)
    n_samples_by_n_features = np.column_stack(
        (x1_mean_centered, x2_mean_centered))
    pca = skPCA(n_components=2)
    pca.fit(n_samples_by_n_features)
    principal_axes = pca.components_

    # STEP 1: Transform data into new reference frame
    # Apply correct/expected sign convention
    principal_axes = abs(principal_axes)
    principal_axes[1, 1] = -principal_axes[1, 1]

    # Rotate data into Principal direction
    x1_and_x2 = np.column_stack((x1, x2))
    x1_x2_components = np.dot(x1_and_x2, principal_axes)
    x1_components = x1_x2_components[:, 0]
    x2_components = x1_x2_components[:, 1]

    # Apply shift to Component 2 to make all values positive
    shift = abs(min(x2_components)) + 0.1
    x2_components = x2_components + shift

    # STEP 2: Fit Component 1 data using a Gaussian Distribution
    x1_sorted_index = x1_components.argsort()
    x1_sorted = x1_components[x1_sorted_index]
    x2_sorted = x2_components[x1_sorted_index]

    x1_fit_results = stats.invgauss.fit(x1_sorted, floc=mean_location)
    x1_fit = {
        'mu': x1_fit_results[0],
        'loc': x1_fit_results[1],
        'scale': x1_fit_results[2]
    }

    # Step 3: Bin Data & find order 1 linear relation between x1 & x2 means
    N = len(x1)
    minimum_4_bins = np.floor(N * 0.25)
    if bin_size > minimum_4_bins:
        bin_size = minimum_4_bins
        msg = ('To allow for a minimum of 4 bins the bin size has been' +
               f'set to {minimum_4_bins}')
        print(msg)

    N_multiples = N // bin_size
    max_N_multiples_index = N_multiples * bin_size

    x1_integer_multiples_of_bin_size = x1_sorted[0:max_N_multiples_index]
    x2_integer_multiples_of_bin_size = x2_sorted[0:max_N_multiples_index]

    x1_bins = np.split(x1_integer_multiples_of_bin_size, N_multiples)
    x2_bins = np.split(x2_integer_multiples_of_bin_size, N_multiples)

    x1_last_bin = x1_sorted[max_N_multiples_index:]
    x2_last_bin = x2_sorted[max_N_multiples_index:]

    x1_bins.append(x1_last_bin)
    x2_bins.append(x2_last_bin)

    x1_means = np.array([])
    x2_means = np.array([])
    x2_sigmas = np.array([])

    for x1_bin, x2_bin in zip(x1_bins, x2_bins):
        x1_means = np.append(x1_means, x1_bin.mean())
        x2_means = np.append(x2_means, x2_bin.mean())
        x2_sigmas = np.append(x2_sigmas, x2_bin.std())

    mu_fit = stats.linregress(x1_means, x2_means)

    # STEP 4: Find order 2 relation between x1_mean and x2 standard deviation
    sigma_polynomial_order = 2
    sig_0 = 0.1 * np.ones(sigma_polynomial_order + 1)

    def _objective_function(sig_p, x1_means, x2_sigmas):
        return mean_squared_error(np.polyval(sig_p, x1_means), x2_sigmas)

    # Constraint Functions
    y_intercept_gt_0 = lambda sig_p: (sig_p[2])
    sig_polynomial_min_gt_0 = lambda sig_p: (sig_p[2] - (sig_p[1]**2) / \
                                             (4 * sig_p[0]))
    constraints = ({
        'type': 'ineq',
        'fun': y_intercept_gt_0
    }, {
        'type': 'ineq',
        'fun': sig_polynomial_min_gt_0
    })

    sigma_fit = optim.minimize(_objective_function,
                               x0=sig_0,
                               args=(x1_means, x2_sigmas),
                               method='SLSQP',
                               constraints=constraints)

    PCA = {
        'principal_axes': principal_axes,
        'shift': shift,
        'x1_fit': x1_fit,
        'mu_fit': mu_fit,
        'sigma_fit': sigma_fit
    }

    return PCA
Example #59
0
        return(100/sum(sample_weights)*np.dot(
                sample_weights, (np.abs((y_true - y_pred) / y_true))
        ))
    
loss_function = mean_absolute_percentage_error

from scipy.optimize import minimize

def objective_function(beta, X, Y):
    error = loss_function(np.matmul(X,beta), Y)
    return(error)

# You must provide a starting point at which to initialize
# the parameter search space
beta_init = np.array([1]*X.shape[1])
result = minimize(objective_function, beta_init, args=(X,Y),
                  method='BFGS', options={'maxiter': 500})

# The optimal values for the input parameters are stored
# in result.x
beta_hat = result.x
print(beta_hat)

loss_function(np.matmul(X,beta_hat), Y)

class CustomLinearModel:
    """
    Linear model: Y = XB, fit by minimizing the provided loss_function
    with L2 regularization
    """
    def __init__(self, loss_function=mean_absolute_percentage_error, 
                 X=None, Y=None, sample_weights=None, beta_init=None, 
Example #60
0
    halv = int(n_points_checked * 0.5)
    temp = 0.0
    for i in range(0, n_points_checked):
        x = (i - halv) * 0.2481
        a = pi(x, b) - line_array[512 - halv + i][1]
        temp += a * a
    return temp


#          [sigma, gamma, x_sh , B  ,bkg    ]
x0 = array([1.52, 3.3, 0.0, 5050.0, 1230])  # initial values for minimize

print(x0)
res = minimize(s,
               x0,
               method='nelder-mead',
               options={
                   'xtol': 1e-2,
                   'disp': True,
                   'maxfev': 1e4,
                   'maxiter': 1e4
               })
#res = minimize(s, x0, method='Powell')
print(res.x)
print(res.fun * 1e-6)

# print out the whole line
for i in range(1024):
    x = (i - 512) * 0.2481  # x in milimiters
    print(x, ", ", line_array[i][1], ", ", pi(x, res.x))