Example #1
0
def multi_curveFitting_2(least_func, avg, seed, min_range=5):
    cost = []
    #param1 = np.ones((n_param, 300))
    #param2 = np.ones((n_param, 300))
    
    x_range = np.linspace(1, 300, 300)
    for n in range( int(300/min_range) - 1) : # iteration for all data
        # print("iteration ", n)

        x1 = x_range[:min_range*(n+1)]
        x2 = x_range[min_range*(n+1):]

        #print('\n\n - x1:', x1)
        #print(' - x2:', x2)
        
        y1 = avg[:min_range*(n+1)]
        y2 = avg[min_range*(n+1):]

        lsq1 = least_squares(least_func, seed, args=(x1, y1))
        lsq2 = least_squares(least_func, seed, args=(x2, y2))
        
        #param1[:, n] = lsq1.x
        #param2[:, n] = lsq2.x

        cost.append(lsq1.cost+lsq2.cost)
        
    idx = np.argmin(cost)
    return min_range*(idx+1)#, param1[:, idx], param2[:, idx]
Example #2
0
def curve_Fitting(least_func, curve_func, x, y, seed, file_path, clt_num):
    fig, ax = plt.subplots(1, 1, figsize=(6,4))
    
    # popt, pcov = curve_fit(func, x, y, maxfev = 1000000)
    '''
    upper_bound = []
    lower_bound = []
    for i in range(len(pcov)):
        upper_bound.append(popt[i] + pcov[i,i])
        lower_bound.append(popt[i] - pcov[i,i])
    '''
    
    x_fit = np.linspace(0, 16, 100)
    '''
    if seed == 1:
        lsq = least_squares(least_func, seed, args=(x, y))
        y_mean = curve_func(x_fit, lsq.x)
        cost = lsq.cost
    '''
    if len(seed) == 1:
        lsq = least_squares(least_func, seed, args=(x, y))
        y_mean = curve_func(x_fit, lsq.x)
        cost = lsq.cost
    
    if len(seed) == 2:
        lsq = least_squares(least_func, seed, args=(x, y))
        y_mean = curve_func(x_fit, lsq.x[0], lsq.x[1])
        cost = lsq.cost
    
    elif len(seed) == 3:
        lsq = least_squares(least_func, seed, args=(x, y))
        y_mean = curve_func(x_fit, lsq.x[0], lsq.x[1], lsq.x[2])
        cost = lsq.cost

    elif len(seed) == 4:
        lsq = least_squares(least_func, seed, args=(x, y))
        y_mean = curve_func(x_fit, lsq.x[0], lsq.x[1], lsq.x[2], lsq.x[3])
        cost = lsq.cost

    print(" - Curve Fitting Parameters: {0}".format(lsq.x))    
    print(" - Curve Fitting Cost: {0}\n".format(cost))
    
    ax.plot(x, y, 'rx', label="average score")
    ax.plot(x_fit, y_mean, 'b-', label="curve fitting")    
    '''    
    for i in range(len(x_fit)):
        if i == 0:
            ax.plot([x_fit[i], x_fit[i]], [y_lower[i], y_upper[i]], 'b-', label="variance")
        else:
            ax.plot([x_fit[i], x_fit[i]], [y_lower[i], y_upper[i]], 'b-')
    
    '''
    ax.set_ylim([0, max(y)+0.2])
    ax.legend(fontsize=14)
    ax.set_title("Cluster {0} (Cost {1})".format(clt_num, round(cost, 2)))
    # ax.text(0.77, 0.03, "cost: {0}".format(round(cost, 2)), horizontalalignment='left', verticalalignment='bottom', transform=ax.transAxes, fontsize=15)
    fig.savefig(file_path, dpi=100)
    
    return lsq.x, cost
Example #3
0
 def test_solver_selection(self):
     sparse = BroydenTridiagonal(mode='sparse')
     dense = BroydenTridiagonal(mode='dense')
     res_sparse = least_squares(sparse.fun, sparse.x0, jac=sparse.jac,
                                method=self.method)
     res_dense = least_squares(dense.fun, dense.x0, jac=dense.jac,
                               method=self.method)
     assert_allclose(res_sparse.cost, 0, atol=1e-20)
     assert_allclose(res_dense.cost, 0, atol=1e-20)
     assert_(issparse(res_sparse.jac))
     assert_(isinstance(res_dense.jac, np.ndarray))
Example #4
0
 def test_numerical_jac(self):
     p = BroydenTridiagonal()
     for jac in ['2-point', '3-point', 'cs']:
         res_dense = least_squares(p.fun, p.x0, jac, method=self.method)
         res_sparse = least_squares(
             p.fun, p.x0, jac,method=self.method,
             jac_sparsity=p.sparsity)
         assert_equal(res_dense.nfev, res_sparse.nfev)
         assert_allclose(res_dense.x, res_sparse.x, atol=1e-20)
         assert_allclose(res_dense.cost, 0, atol=1e-20)
         assert_allclose(res_sparse.cost, 0, atol=1e-20)
Example #5
0
 def test_in_bounds(self):
     for jac in ['2-point', '3-point', 'cs', jac_trivial]:
         res = least_squares(fun_trivial, 2.0, jac=jac,
                             bounds=(-1.0, 3.0), method=self.method)
         assert_allclose(res.x, 0.0, atol=1e-4)
         assert_equal(res.active_mask, [0])
         assert_(-1 <= res.x <= 3)
         res = least_squares(fun_trivial, 2.0, jac=jac,
                             bounds=(0.5, 3.0), method=self.method)
         assert_allclose(res.x, 0.5, atol=1e-4)
         assert_equal(res.active_mask, [-1])
         assert_(0.5 <= res.x <= 3)
Example #6
0
 def test_numerical_jac(self):
     p = BroydenTridiagonal()
     with warnings.catch_warnings():
         warnings.simplefilter('ignore', UserWarning)
         for jac in ['2-point', '3-point', 'cs']:
             res_dense = least_squares(p.fun, p.x0, jac, method=self.method)
             res_sparse = least_squares(
                 p.fun, p.x0, jac,method=self.method,
                 jac_sparsity=p.sparsity)
             assert_equal(res_dense.nfev, res_sparse.nfev)
             assert_allclose(res_dense.x, res_sparse.x, atol=1e-20)
             assert_allclose(res_dense.cost, 0, atol=1e-20)
             assert_allclose(res_sparse.cost, 0, atol=1e-20)
Example #7
0
 def test_equivalence(self):
     sparse = BroydenTridiagonal(mode='sparse')
     dense = BroydenTridiagonal(mode='dense')
     res_sparse = least_squares(
         sparse.fun, sparse.x0, jac=sparse.jac,
         method=self.method)
     res_dense = least_squares(
         dense.fun, dense.x0, jac=sparse.jac,
         method=self.method)
     assert_equal(res_sparse.nfev, res_dense.nfev)
     assert_allclose(res_sparse.x, res_dense.x, atol=1e-20)
     assert_allclose(res_sparse.cost, 0, atol=1e-20)
     assert_allclose(res_dense.cost, 0, atol=1e-20)
Example #8
0
 def test_diff_step(self):
     # res1 and res2 should be equivalent.
     # res2 and res3 should be different.
     res1 = least_squares(fun_trivial, 2.0, diff_step=1e-1,
                          method=self.method)
     res2 = least_squares(fun_trivial, 2.0, diff_step=-1e-1,
                          method=self.method)
     res3 = least_squares(fun_trivial, 2.0,
                          diff_step=None, method=self.method)
     assert_allclose(res1.x, 0, atol=1e-4)
     assert_allclose(res2.x, 0, atol=1e-4)
     assert_allclose(res3.x, 0, atol=1e-4)
     assert_equal(res1.x, res2.x)
     assert_equal(res1.nfev, res2.nfev)
     assert_(res2.nfev != res3.nfev)
Example #9
0
 def test_bounds_shape(self):
     for jac in ['2-point', '3-point', 'cs', jac_2d_trivial]:
         x0 = [1.0, 1.0]
         res = least_squares(fun_2d_trivial, x0, jac=jac)
         assert_allclose(res.x, [0.0, 0.0])
         res = least_squares(fun_2d_trivial, x0, jac=jac,
                             bounds=(0.5, [2.0, 2.0]), method=self.method)
         assert_allclose(res.x, [0.5, 0.5])
         res = least_squares(fun_2d_trivial, x0, jac=jac,
                             bounds=([0.3, 0.2], 3.0), method=self.method)
         assert_allclose(res.x, [0.3, 0.2])
         res = least_squares(
             fun_2d_trivial, x0, jac=jac, bounds=([-1, 0.5], [1.0, 3.0]),
             method=self.method)
         assert_allclose(res.x, [0.0, 0.5], atol=1e-5)
Example #10
0
 def test_equivalence(self):
     sparse = BroydenTridiagonal(mode='sparse')
     dense = BroydenTridiagonal(mode='dense')
     with warnings.catch_warnings():
         warnings.simplefilter("ignore", UserWarning)
         res_sparse = least_squares(
             sparse.fun, sparse.x0, jac=sparse.jac,
             method=self.method)
         res_dense = least_squares(
             dense.fun, dense.x0, jac=sparse.jac,
             method=self.method)
         assert_equal(res_sparse.nfev, res_dense.nfev)
         assert_allclose(res_sparse.x, res_dense.x, atol=1e-20)
         assert_allclose(res_sparse.cost, 0, atol=1e-20)
         assert_allclose(res_dense.cost, 0, atol=1e-20)
def inference_cEWRGt(W, thresh):
    
    k = (W>0).sum(axis=0) # degrees
    s = W.sum(axis=0) # strength

    #from scipy.optimize import root
    from scipy.optimize import least_squares

    x0=np.concatenate([k,s])*1E-4 # initial solution
    
    # Initialize least squares from previous solution
    sollm = least_squares(lambda v: eq(v,thresh,k,s),
                          x0=x0,
                          bounds= (0,np.inf),
                          method='trf',
                          ftol=1E-8,
                          xtol=1E-8,
                          verbose=1)

    sollm = root(lambda z: eq(z,thresh,k,s),
                 x0=x0,
                 method='lm',
                 options={'xtol':1E-30,'gtol':1E-30,'ftol':1E-30},
                 tol=1E-6)
    
    
    #print('Final cost', sollm['cost'])
    sollm = sollm['x']
    n2 = int(len(sollm)//2)
    x,y = sollm[0:n2],sollm[n2:]
    return x, y
Example #12
0
    def solve_van_genuchten_1986(self, D=0.01, R=0.01, ftol=1e-10,
                        max_nfev=1000, **kwargs):
        """
        Scipy optimize method to solve least squares
        for van genuchten 1986. Miscable displacement.

        Parameters:
        ----------
        :param float D: Diffusivity initial guess. Cannot be 0
        :param float R: Retardation initial guess. Cannot be 0
        :param float ftol: scipy function tolerance for solution
        :param int max_nfev: maximum number of function iterations
        :param **kwargs: scipy least squares kwargs

        Returns:
        -------
        :return: scipy least squares dictionary. Answer in dict['x']
        """
        from scipy.optimize import least_squares
        l = self.ylen * self.resolution
        v = self.uy
        t = self.bt['nts'].as_matrix() * self.timestep
        bt = self.bt['ncpr'].as_matrix() / self.ncol
        x0 = np.array([D, R])

        return least_squares(self.__van_genuchten_residuals, x0,
                             args=(l, v, t, bt),
                             ftol=ftol, max_nfev=max_nfev,
                             **kwargs)
def optimize_diffusion_simp_parameters_with_bounds(
        params_guess_dict, params_bounds_dict, const_params_dict,
        exp_time_array, exp_x_array, exp_temp_array,
        x_array, num_steps, time_step, finite_step_method, sim_fpath,
        lsq_fn=_lsq_func_simp, figsize=FIG_SIZE,
):
    params_guess = np.array([v for k, v in sorted(params_guess_dict.items())])
    if params_bounds_dict is None:
        bounds = None
    else:
        pgi = sorted(params_bounds_dict.items())
        lower_bounds = np.array([v[0] for k, v in pgi])
        upper_bounds = np.array([v[1] for k, v in pgi])
        bounds = (lower_bounds, upper_bounds)
    iter_fn = count()
    if figsize is not None:
        fig = plt.figure(figsize=figsize)
    ax = fig.add_subplot(111)
    fit_lines = list()
    return least_squares(
        fun=lsq_fn, x0=params_guess, bounds=bounds, verbose=2,
        args=(
            const_params_dict, params_guess_dict.keys(),
            exp_time_array, exp_x_array, exp_temp_array,
            x_array, num_steps, time_step,
            finite_step_method, sim_fpath, iter_fn, fig, ax, fit_lines
        ),
    )
Example #14
0
 def test_with_bounds(self):
     p = BroydenTridiagonal()
     for jac, jac_sparsity in product(
             [p.jac, '2-point', '3-point', 'cs'], [None, p.sparsity]):
         res_1 = least_squares(
             p.fun, p.x0, jac, bounds=(p.lb, np.inf),
             method=self.method,jac_sparsity=jac_sparsity)
         res_2 = least_squares(
             p.fun, p.x0, jac, bounds=(-np.inf, p.ub),
             method=self.method, jac_sparsity=jac_sparsity)
         res_3 = least_squares(
             p.fun, p.x0, jac, bounds=(p.lb, p.ub),
             method=self.method, jac_sparsity=jac_sparsity)
         assert_allclose(res_1.optimality, 0, atol=1e-10)
         assert_allclose(res_2.optimality, 0, atol=1e-10)
         assert_allclose(res_3.optimality, 0, atol=1e-10)
def robust_l2(obs_phase, freqs, solve_cs=True):
    '''Solve the tec and cs for multiple datasets.
    `obs_phase` : `numpy.ndarray`
        the measured phase with shape (num_freqs, )
    `freqs` : `numpy.ndarray`
        the frequencies at the datapoints (num_freqs,)
    `solve_cs` : (optional) bool
        Whether to solve cs (True)
    '''
    obs_phase = phase_unwrapp1d(obs_phase)
    if solve_cs:
        def residuals(m, freqs, obs_phase):
            tec,cs = m[0],m[1]
            return calc_phase(tec,freqs,cs=cs) - obs_phase
    else:
        def residuals(m, freqs, obs_phase):
            tec,cs = m[0],m[1]
            return calc_phase(tec,freqs,cs=0.) - obs_phase
    nan_mask = np.bitwise_not(np.isnan(obs_phase))
    obs_phase_ = obs_phase[nan_mask]
    freqs_ = freqs[nan_mask]
    m0 = [0.0, 0.]
    m = least_squares(residuals,m0,loss='soft_l1',f_scale=90.*np.pi/180.,args=(freqs_,obs_phase_))
    if solve_cs:
        return m.x[0], m.x[1]
    else:
        return m.x[0], 0.
Example #16
0
    def start_fit(self, w, taus, fix_last_tau=False, fix_width=False,
                  fix_disp=False):
        ds = self.dataset
        time_zeros = self.zero_func(ds.wavenumbers)
        disp_guess = np.polyfit(ds.wavenumbers, time_zeros)
        x0 = np.hstack((disp_guess, w, taus))
        idx = np.ones_like(x0, dtype='bool')
        if fix_last_tau:
            idx[-1] = False
        if fix_width:
            idx[self.disp_poly_deg] = False
        if fix_disp:
            idx[:self.disp_poly_deg] = False

        start_guess = x0[idx]

        def fix_func(x):
            x0[idx] = x
            return self.fit_func(x0)

        bounds = np.array([(-np.inf, np.inf)] * len(x0))
        bounds[self.disp_poly_deg:, 0] = 0
        bounds = bounds[idx, :]
        x = least_squares(fix_func, start_guess, bounds=bounds.T)
        return x, x0
Example #17
0
 def test_fun(self):
     # Test that res.fun is actual residuals, and not modified by loss
     # function stuff.
     for loss in LOSSES:
         res = least_squares(fun_trivial, 2.0, loss=loss,
                             method=self.method)
         assert_equal(res.fun, fun_trivial(res.x))
Example #18
0
    def solve_jury_1991(self, D=0.01, R=0.01, ftol=1e-10,
                        max_nfev=1000, **kwargs):
        """
        Scipy optimize method to solve least sqares
        for jury 1991. Pulse flux.

        Parameters:
        ----------
        :param float D: Diffusivity initial guess. Cannot be 0
        :param float R: Retardation initial guess. Cannot be 0
        :param float ftol: scipy function tolerance for solution
        :param int max_nfev: maximum number of function iterations
        :param **kwargs: scipy least squares kwargs

        Returns:
        -------
        :return: scipy least squares dictionary. Answer in dict['x']
        """
        # todo: test this method! look up references for clearer examples!
        from scipy.optimize import leastsq, minimize, least_squares
        a = self.ncol
        l = self.ylen * self.resolution
        v = self.uy
        pdf, t = self.__prep_data()
        x0 = np.array([D, R])

        return least_squares(self.__jury_residuals, x0,
                             args=(a, l, t, v, pdf),
                             ftol=ftol, max_nfev=max_nfev,
                             **kwargs)
Example #19
0
 def _solve_least_squares(self, model, helpers):
     initial_condition = list(model.params())
     cost_function = self.cost_function_generator(model, helpers)
     bound = self.bounds()
     solution = least_squares(cost_function,
                              initial_condition,
                              bounds=bound)
     return model
Example #20
0
    def test_options(self):
        for loss in LOSSES:
            res = least_squares(fun_trivial, 2.0, loss=loss,
                                method=self.method)
            assert_allclose(res.x, 0, atol=1e-15)

        assert_raises(ValueError, least_squares, fun_trivial, 2.0,
                      loss='hinge', method=self.method)
Example #21
0
    def test_robustness(self):
        for noise in [0.1, 1.0]:
            p = ExponentialFittingProblem(1, 0.1, noise, random_seed=0)

            for jac in ['2-point', '3-point', 'cs', p.jac]:
                res_lsq = least_squares(p.fun, p.p0, jac=jac,
                                        method=self.method)
                assert_allclose(res_lsq.optimality, 0, atol=1e-2)
                for loss in LOSSES:
                    if loss == 'linear':
                        continue
                    res_robust = least_squares(
                        p.fun, p.p0, jac=jac, loss=loss, f_scale=noise,
                        method=self.method)
                    assert_allclose(res_robust.optimality, 0, atol=1e-2)
                    assert_(norm(res_robust.x - p.p_opt) <
                            norm(res_lsq.x - p.p_opt))
Example #22
0
    def test_args_kwargs(self):
        # Test that args and kwargs are passed correctly to the functions.
        a = 3.0
        for jac in ['2-point', '3-point', 'cs', jac_trivial]:
            with warnings.catch_warnings():
                warnings.simplefilter("ignore", UserWarning)
                res = least_squares(fun_trivial, 2.0, jac, args=(a,),
                                    method=self.method)
                assert_allclose(res.x, a, rtol=1e-4)

                assert_raises(TypeError, least_squares, fun_trivial, 2.0,
                              args=(3, 4,), method=self.method)

                res = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a},
                                    method=self.method)
                assert_allclose(res.x, a, rtol=1e-4)
                assert_raises(TypeError, least_squares, fun_trivial, 2.0,
                              kwargs={'kaboom': 3}, method=self.method)
Example #23
0
    def test_jac_options(self):
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", UserWarning)
            for jac in ['2-point', '3-point', 'cs', jac_trivial]:
                res = least_squares(fun_trivial, 2.0, jac, method=self.method)
                assert_allclose(res.x, 0, atol=1e-4)

        assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac='oops',
                      method=self.method)
Example #24
0
    def test_x_scale_jac_scale(self):
        p = BroydenTridiagonal()
        res = least_squares(p.fun, p.x0, p.jac, method=self.method,
                            x_scale='jac')
        assert_allclose(res.cost, 0.0, atol=1e-20)

        p = BroydenTridiagonal(mode='operator')
        assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
                      method=self.method, x_scale='jac')
Example #25
0
def _citrate_voigt_fitting(ppm, spectrum):
    """Private function to fit a mixture of Voigt profile to
    citrate metabolites.

    """
    ppm_limits = (2.30, 2.90)
    idx_ppm = np.flatnonzero(np.bitwise_and(ppm > ppm_limits[0],
                                            ppm < ppm_limits[1]))
    sub_ppm = ppm[idx_ppm]
    sub_spectrum = spectrum[idx_ppm]

    f = interp1d(sub_ppm, sub_spectrum, kind='cubic')
    ppm_interp = np.linspace(sub_ppm[0], sub_ppm[-1], num=5000)

    # Define the default parameters
    # Define their bounds
    mu_bounds = (2.54, 2.68)
    delta_2_bounds = (.08, .17)
    delta_3_bounds = (.08, .17)

    # Define the default shifts
    ppm_cit = np.linspace(mu_bounds[0], mu_bounds[1], num=1000)
    mu_dft = ppm_cit[np.argmax(f(ppm_cit))]
    # Redefine the maximum to avoid to much motion
    mu_bounds = (mu_dft - 0.01, mu_dft + 0.01)
    # Redefine the limit of ppm to use for the fitting
    ppm_interp = np.linspace(mu_dft - .20, mu_dft + 0.20, num=5000)
    delta_2_dft = .14
    delta_3_dft = .14

    # Define the default amplitude
    alpha_1_dft = (f(mu_dft) /
                   _voigt_profile(0., 1., 0., .001, .001))
    alpha_2_dft = (f(mu_dft + delta_2_dft) /
                   _voigt_profile(0., 1., 0., .001, .001))
    alpha_3_dft = (f(mu_dft - delta_3_dft) /
                   _voigt_profile(0., 1., 0., .001, .001))
    # Create the vector for the default parameters
    popt_default = [alpha_1_dft, mu_dft, .001, .001,
                    alpha_2_dft, delta_2_dft, .001, .001,
                    alpha_3_dft, delta_3_dft, .001, .001]
    # Define the bounds properly
    param_bounds = ([0., mu_bounds[0], 0., 0.,
                     0., delta_2_bounds[0], 0., 0.,
                     #0., 0., 0.],
                     0., delta_3_bounds[0], 0., 0.],
                    [np.inf, mu_bounds[1], np.inf, np.inf,
                     np.inf, delta_2_bounds[1], np.inf, np.inf,
                     np.inf, delta_3_bounds[1], np.inf, np.inf])

    res_robust = least_squares(_loss_voigt_citrate_4_dof, popt_default,
                               loss='huber', f_scale=.1,
                               bounds=param_bounds,
                               args=(ppm_interp, f(ppm_interp)))

    return res_robust.x
 def test_convergence_with_only_one_tolerance_enabled(self):
     x0 = [-2, 1]
     x_opt = [1, 1]
     for ftol, xtol, gtol in [(1e-8, None, None),
                               (None, 1e-8, None),
                               (None, None, 1e-8)]:
         res = least_squares(fun_rosenbrock, x0, jac=jac_rosenbrock,
                             ftol=ftol, gtol=gtol, xtol=xtol,
                             method=self.method)
         assert_allclose(res.x, x_opt)
Example #27
0
    def test_jac_options(self):
        for jac in ['2-point', '3-point', 'cs', jac_trivial]:
            with suppress_warnings() as sup:
                sup.filter(UserWarning,
                           "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
                res = least_squares(fun_trivial, 2.0, jac, method=self.method)
            assert_allclose(res.x, 0, atol=1e-4)

        assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac='oops',
                      method=self.method)
Example #28
0
def lmFit(y,x,t):
	x = x[0,]
	y = y[0,]
	print("constrained NLS using trust-region Levenberg-Marquardt")
	#mybounds = ([-np.inf,-np.inf,-np.inf,-np.inf,20], [0,0,0,0,36])
	#scl = [1, 1, 1, 1, 36]
	#mybounds = [(None,None), (None,None), (None,None), (None,None), (20, None)]
	res = least_squares(doubleExp_wrapper, np.array(x),method='lm', args=(np.array(y), np.array(t)), jac=doubleExp_jacWrapper) #bounds=mybounds, x_scale=scl)
	print("converged at " + str(res.nfev) + " f: " + str(np.linalg.norm(res.fun)) + " => " + str(res.x))
	return (res.nfev,res.x,res.fun)
Example #29
0
    def test_args_kwargs(self):
        # Test that args and kwargs are passed correctly to the functions.
        a = 3.0
        for jac in ['2-point', '3-point', 'cs', jac_trivial]:
            with suppress_warnings() as sup:
                sup.filter(UserWarning,
                           "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
                res = least_squares(fun_trivial, 2.0, jac, args=(a,),
                                    method=self.method)
                res1 = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a},
                                    method=self.method)

            assert_allclose(res.x, a, rtol=1e-4)
            assert_allclose(res1.x, a, rtol=1e-4)

            assert_raises(TypeError, least_squares, fun_trivial, 2.0,
                          args=(3, 4,), method=self.method)
            assert_raises(TypeError, least_squares, fun_trivial, 2.0,
                          kwargs={'kaboom': 3}, method=self.method)
Example #30
0
 def fit(self):
     self.logger.info('Method %s' % self.config['method'])
     bounds = tuple(map(list, zip(*self.config['limits'])))  # [(min,max),(min1,max1)..] -> ([min,min1,..], [max,max1..])
     self.result = least_squares(self.callable, self.config['initial_values'], args=(self.param_dict['spectra_range'],), bounds=bounds)
     for idx, symbol in enumerate(self.symbols):
         self.param_dict[symbol] = dict()
         self.param_dict[symbol]['stderr'] = None
         self.param_dict[symbol]['value'] = self.result.x[idx]
     self._calc_fitted_spectra()
     self._calc_residuals()
     return Result(self.result, self.fitted_spectra, self.residuals, self.param_dict['wave_range']), self.param_dict
Example #31
0
#------------------------------------------------------------------------------
#Displaced-Diffusion


def ddcalibration(x, S, strikes, r, vols, T):
    err = 0.0
    for i, vol in enumerate(vols):
        price = DisplacedDiffusionCall(S, strikes[i], r, sigma, T, x[0])
        err += (vol - impliedCallVolatility(S, strikes[i], r, price, T))**2

    return err


initialGuess = [0.3]
res = least_squares(
    lambda x: ddcalibration(x, S, df['strike'].values, r, df[
        'impliedvol_market'], T), initialGuess)

beta_dd = res.x[0]
#------------------------------------------------------------------------------
summary_dd = []
for i in range(n):
    K = strike[i]
    if K <= 850:
        price_dd5 = DisplacedDiffusionPut(S, K, r, sigma, T, beta_dd)
        impliedvol_dd5 = impliedPutVolatility(S, K, r, price_dd5, T)

    elif K > 850:
        price_dd5 = DisplacedDiffusionCall(S, K, r, sigma, T, beta_dd)
        impliedvol_dd5 = impliedCallVolatility(S, K, r, price_dd5, T)
    summary_dd.append([impliedvol_dd5])
    def fit_phase(self, frequencies=None, z_data=None, plot=False):
        """
        this function fits the phase response of S21 circle shifted to the center
        extracting accurate values for  resonance frequency fr, total Quality factor QL and
        of resonance point angle theta_0.

        :param frequencies:
        :param z_data:
        :param plot:
        :return:
        """
        if (frequencies is None) and (z_data is None):
            frequencies = self.frequencies
            z_data = self.z_data_undelayed
        elif (frequencies is None) ^ (z_data is None):
            raise ValueError(
                'please supply both frequencies and z_data or none of them')

        if self.delay is None:
            raise NameError('missing cable delay value')

        # z_data = self.correctdelay(frequencies, z_data, self.delay)
        # self.z_data_undelayed = z_data.copy()
        xc, yc, r0 = self.fit_circle(z_data)
        # self.circle = {'xc': xc, 'yc': yc, 'r0': r0}
        # xc = self.circle['xc']
        # yc = self.circle['yc']
        z_data = z_data - np.complex(xc, yc)
        phase = np.unwrap(np.angle(z_data))

        # calculating initial value for resonance frequency
        phase_smooth = gaussian_filter1d(phase, 30)
        phase_derivative = np.gradient(phase_smooth)
        fr_initial_val = frequencies[np.argmax(phase_derivative)]

        # calculating initial value for Ql
        try:
            freq_truncated, z_truncated = self.narrow_band(width_multiplier=1)
            delta_f = freq_truncated[-1] - freq_truncated[0]
        except ValueError:
            delta_f = frequencies[-1] - frequencies[0]

        Ql_intial_value = fr_initial_val / delta_f

        theta_intial_value = phase[0] - np.pi
        initial_values = [fr_initial_val, theta_intial_value, Ql_intial_value]

        # defining the residuals function  for least squares fit
        def residuals(fit_parametrs, frequencies, phase):
            theta = fit_parametrs[1]
            Ql = fit_parametrs[2]
            fr = fit_parametrs[0]
            res = theta + 2. * np.arctan(2. * Ql *
                                         (1 - frequencies / fr)) - phase
            return res

        # making first a fit for each parameter separately, for better convergence
        for i, parameter in enumerate(initial_values):

            def residuals_partial(parameter, frequencies, phase):
                initial_values[i] = parameter
                return residuals(initial_values, frequencies, phase)

            parameter_semi_optimized = optimize.least_squares(
                residuals_partial,
                parameter,
                args=(frequencies, phase),
                ftol=1e-12,
                xtol=1e-12)
            initial_values[i] = parameter_semi_optimized.x[0]
        # final optimization for all parameters together starting with the values of independently fitted parameters
        optimized = optimize.least_squares(residuals,
                                           initial_values,
                                           args=(frequencies, phase),
                                           ftol=1e-12,
                                           xtol=1e-12)
        calculated_values = optimized['x']
        fr, theta_0, Ql = calculated_values
        return fr, theta_0, Ql, phase
# In this example we find a minimum of the Rosenbrock function without bounds
# on independent variables.

def fun_rosenbrock(x):
    return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])

# Notice that we only provide the vector of the residuals. The algorithm
# constructs the cost function as a sum of squares of the residuals, which
# gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.

from scipy.optimize import least_squares
x0_rosenbrock = np.array([2, 2])
res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)
res_1.x
# array([ 1.,  1.])
res_1.cost
# 9.8669242910846867e-30
res_1.optimality
# 8.8928864934219529e-14

# We now constrain the variables, in such a way that the previous solution
# becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and
# ``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter
# to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.

# We also provide the analytic Jacobian:

def jac_rosenbrock(x):
    return np.array([
        [-20 * x[0], 10],
        [-1, 0]])
Example #34
0
def add_delay(P, time, ca, roi, shift, nid, fit_model):
    n = 1 + (shift[1] - shift[0]) / shift[2]
    Delay = shift[0] + shift[2] * np.arange(n)
    Error = Delay * 0
    dt = time[1] - time[0]
    nT = len(time)

    for i in range(int(n)):
        if Delay[i] > 0:
            aif_del = ShiftAif(ca, time, Delay[i])
        else:
            aif_del = ca
        if Delay[i] < 0:
            roi_del = ShiftAif(roi, time, -Delay[i])
        else:
            roi_del = roi
        if fit_model == 'uptake':
            res = least_squares(fun_uptake,
                                P,
                                method='lm',
                                args=(time, roi_del, aif_del),
                                max_nfev=20000,
                                verbose=0)
            fit = model_uptake(res.x, time, aif_del)
        elif fit_model == '2CFM':
            res = least_squares(fun_2CFM,
                                P,
                                method='lm',
                                args=(time, roi_del, aif_del),
                                max_nfev=20000,
                                verbose=0)
            fit = model_2CFM(res.x, time, aif_del)
        if Delay[i] < 0:
            fit_shft = ShiftAif(fit, time, Delay[i])
        else:
            fit_shft = fit
        Error[i] = delay_error(roi, Delay[i], dt, nT, fit_shft)

    imin = np.where(Error == Error.min())
    Delay = Delay[imin]
    print('Delay', Delay)
    if Delay > 0:
        aif_del = ShiftAif(ca, time, Delay)
    else:
        aif_del = ca
    if Delay < 0:
        roi_del = ShiftAif(roi, time, -Delay)
    else:
        roi_del = roi
    if fit_model == 'uptake':
        res = least_squares(fun_uptake,
                            P,
                            method='lm',
                            args=(time, roi_del, aif_del),
                            max_nfev=20000,
                            verbose=0)
        fit = model_uptake(res.x, time, aif_del)
    elif fit_model == '2CFM':
        res = least_squares(fun_2CFM,
                            P,
                            method='lm',
                            args=(time, roi_del, aif_del),
                            max_nfev=20000,
                            verbose=1)
        fit = model_2CFM(res.x, time, aif_del)
    if Delay < 0:
        fit = np.interp(time - Delay, time, fit)
    return res, fit
Example #35
0
for i in range(len(randomized_x)):
    randomized_x[i] += randomized_x[i] * uniform(-0.1, 0.1) + uniform(
        -0.05, 0.05)

initial_guess = np.array([c - 0.2, k + 0.1, 0.9, 0.1])


# only necessary input is data and an initial guess
def residuals(guess, t, data):
    return odeint(mass_spring_damper, [guess[2], guess[3]],
                  t,
                  args=(guess[0], guess[1]))[:, 0] - data


results = least_squares(residuals, initial_guess, args=(t, randomized_x))

guess = results.x

resulting_x = odeint(mass_spring_damper, [guess[2], guess[3]],
                     t,
                     args=(guess[0], guess[1]))[:, 0]

# plotting
plt.plot(t, sol[:, 0], 'b', label='x(t) original')
plt.plot(t, resulting_x, 'g', label='x(t) estimation')
plt.scatter(t, randomized_x, 4, c="r", label='"data"')
# plt.plot(t, sol[:, 1], 'g', label='x_prime(t) original')
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
Example #36
0
def model_selection_bootstrap(s, b_vals, b_vecs, n_bs, m_max= 3, threshold= 0.5, model='DIAMOND', delta_mthod= False):
    
    m_selected= False
    m_opt= -1
    m= 0
    wt_temp= s**2
    
    while not m_selected:
        
        true_numbers= m
        
        if model=='ball_n_sticks':
            R_init, bounds_lo, bounds_up = polar_fibers_and_iso_init(true_numbers, lam_1=0.0019, lam_2=0.0004, d_iso=0.003)
        elif model=='DIAMOND':
            #R_init, bounds_lo, bounds_up = diamond_init(true_numbers, lam_1=0.0019, lam_2=0.0004, d_iso=0.003)
            R_init, bounds_lo, bounds_up = diamond_init_log(true_numbers, lam_1=0.0019, lam_2=0.0004, d_iso=0.003)
        else:
            print('Model type unidentified.')
            return np.nan
        
        ss_bs=       np.zeros(len(b_vals))
        ss_bs_count= np.zeros(len(b_vals))
        
        ss_bs_matrix=       np.zeros( (len(b_vals), n_bs) )
        ss_bs_count_matrix= np.zeros( (len(b_vals), n_bs) )
        
        for i_bs in range(n_bs):
            
            np.random.seed(i_bs)
            
            ind_bs_train= np.random.randint(0,len(b_vals), b_vals.shape)
            ind_bs_test= [i not in ind_bs_train   for i in range(len(b_vals))]
            ind_counts = [np.sum(ind_bs_train==i) for i in range(len(b_vals))]
            
            if model=='ball_n_sticks':
                
                solution = opt.least_squares(polar_fibers_and_iso_resid, R_init,
                                                bounds=(bounds_lo,bounds_up),
                                                args=( true_numbers, b_vals[ind_bs_train], b_vecs[ind_bs_train,:],
                                                s[ind_bs_train],
                                                wt_temp[ind_bs_train]**0.0))
            elif model=='DIAMOND':
                
                '''solution = opt.least_squares(diamond_resid, R_init,
                                                bounds=(bounds_lo,bounds_up),
                                                args=( true_numbers, b_vals[ind_bs_train], b_vecs[ind_bs_train,:],
                                                s[ind_bs_train],
                                                wt_temp[ind_bs_train]**0.0))'''
                
                solution = opt.least_squares(diamond_resid_log, R_init,
                                                bounds=(bounds_lo,bounds_up),
                                                args=( true_numbers, b_vals[ind_bs_train], b_vecs[ind_bs_train,:],
                                                s[ind_bs_train],
                                                wt_temp[ind_bs_train]**0.0))
                
                '''solution = pybobyqa.solve(polar_fibers_and_iso_resid_bbq, R_init,
                                                bounds=(bounds_lo,bounds_up),
                                                args=( true_numbers, b_vals[ind_bs_train], b_vecs[ind_bs_train,:],
                                                s[ind_bs_train],
                                                wt_temp[ind_bs_train]**0.5),
                                                rhobeg= 0.002, scaling_within_bounds= True, seek_global_minimum=False)'''
            
            if model=='ball_n_sticks':
                ss= polar_fibers_and_iso_simulate(solution.x, true_numbers, b_vals, b_vecs)
            elif model=='DIAMOND':
                #ss= diamond_simulate(solution.x, true_numbers, b_vals, b_vecs)
                ss= diamond_simulate_log(solution.x, true_numbers, b_vals, b_vecs)
            
            ss_bs[ind_bs_test]+= ( ss[ind_bs_test]-s[ind_bs_test] )**2
            ss_bs_count[ind_bs_test]+= 1
            
            ss_bs_matrix[ind_bs_test,i_bs]= ( ss[ind_bs_test]-s[ind_bs_test] )**2
            ss_bs_count_matrix[:,i_bs]= ind_counts
            
        ss_bs[ss_bs_count>0]= ss_bs[ss_bs_count>0]/ ss_bs_count[ss_bs_count>0]
        #ss_bs= ss_bs[ss_bs_count>0]
        E_bs= ss_bs.mean()
        
        if model=='ball_n_sticks':
            solution = opt.least_squares(polar_fibers_and_iso_resid, R_init,
                                                bounds=(bounds_lo,bounds_up),
                                                args=( true_numbers, b_vals, b_vecs,
                                                s,
                                                wt_temp**0.0))
            ss= polar_fibers_and_iso_simulate(solution.x, true_numbers, b_vals, b_vecs)
        elif model=='DIAMOND':
            '''solution = opt.least_squares(diamond_resid, R_init,
                                    bounds=(bounds_lo,bounds_up),
                                    args=( true_numbers, b_vals, b_vecs,
                                    s,
                                    wt_temp**0.0))
            ss= diamond_simulate(solution.x, true_numbers, b_vals, b_vecs)'''
            solution = opt.least_squares(diamond_resid_log, R_init,
                                    bounds=(bounds_lo,bounds_up),
                                    args=( true_numbers, b_vals, b_vecs,
                                    s,
                                    wt_temp**0.0))
            '''solution = pybobyqa.solve(polar_fibers_and_iso_resid_bbq, R_init,
                                    bounds=(bounds_lo,bounds_up),
                                    args=( true_numbers, b_vals, b_vecs,
                                    s,
                                    wt_temp**0.5),
                                    rhobeg= 0.002, scaling_within_bounds= True, seek_global_minimum=False)'''
                                          
            ss= diamond_simulate_log(solution.x, true_numbers, b_vals, b_vecs)
        
        ss_fit= ( ss-s )**2
        E_fit= ss_fit.mean()
        
        E_632= 0.368*E_fit + 0.632*E_bs
        
        if m==0:
            
            E_632_m_n_1= E_632
            #E_fit_m_n_1= E_fit
            E_bs_m_n_1 = E_bs
            ss_bs_m_n_1= ss_bs.copy()
            ss_bs_matrix_m_n_1= ss_bs_matrix.copy()
            #ss_bs_count_matrix_m_n_1= ss_bs_count_matrix.copy()
            
        else:
            
            del_632= E_632_m_n_1- E_632
            #del_fit= E_fit_m_n_1- E_fit
            del_bs = E_bs_m_n_1 - E_bs
            
            del_bs_ss= ss_bs_m_n_1- ss_bs
            
            if delta_mthod:
                
                del_ss_bs_matrix= ss_bs_matrix_m_n_1- ss_bs_matrix
                
                q_hat= np.sum(del_ss_bs_matrix, axis=0)
                
                N_hat= np.mean( ss_bs_count_matrix, axis=1 )
                
                numer= ss_bs_count_matrix - N_hat[:,np.newaxis]
                
                numer= np.matmul(numer, q_hat)
                
                denom= np.sum( ss_bs_count_matrix==0, axis=1 )
                
                D= (2+ 1/(len(b_vals)-1)) * ( del_bs_ss - del_bs_ss.mean() )/ len(b_vals) + (denom>0) * numer/ (denom+1e-7)
                
                SE_BS= np.linalg.norm(D)
                
            else:
                
                SE_BS= np.sqrt( np.sum( ( del_bs_ss - del_bs_ss.mean() )**2 ) / ( len(b_vals)**2 ) )
            
            SE_632= del_632/del_bs * SE_BS
            
            if del_632 < threshold*SE_632 or m==m_max:
                
                m_opt= m-1
                m_selected= True
                
            else:
                
                E_632_m_n_1= E_632
                #E_fit_m_n_1= E_fit
                E_bs_m_n_1 = E_bs
                ss_bs_m_n_1= ss_bs.copy()
                ss_bs_matrix_m_n_1= ss_bs_matrix.copy()
                #ss_bs_count_matrix_m_n_1= ss_bs_count_matrix.copy()
        
        m+= 1
    
    
    return m_opt
Example #37
0
PhaseAVR=np.array([-8,-15,-22,-32,-41,-48,-52,-57,-62,-66,\
                      -68,-72,-73,-75,-78,-80,-83,-84,-86,-89])

#x包含T1、T2、T3、T4; 并考察T5,T6,K1,K2,a,p
#lb=np.array([0.0199,0.0199,4.2199,4.2199,4.2199,2.1499,0.999,6.76,0.499,0.49,0.05])
#ub=np.array([0.0201,0.0201,4.2201,4.2201,4.2201,2.1501,1.001,6.78,5.001,0.51,2.0001])
#不含T1、T2、T3、T4; 仅考察T5,T6,K1,K2,a,p
lb = np.array([3, 2, 0.1, 0.1, 0.499, 0.01])
ub = np.array([8, 5, 10, 5, 0.501, 2])

#lb=np.array([0       ,0       ,1,998,998,1 ,0.999,0.1,0.1,0.01,0.01])
#ub=np.array([0.0002  ,0.0002  ,8,1000,1000,10,1    ,10 ,5  ,5   ,2   ])

#x=np.array([T1,T2,T3,T4,T5,T6,K,K1,K2,a,p])
x = np.array([T5, T6, K1, K2, a, p])
w = 2 * np.pi * np.linspace(0.1, 2, num=20)

counter = 0
for counter in range(100):
    res = least_squares(Func_PSSAP,
                        x,
                        ftol=0.01,
                        bounds=(lb, ub),
                        method='trf')
    # lm方法不行,没有约束条件就会使值变负
    #    res_3 = least_squares(fun_PSS2B,x0_PSS2B,ftol=0.01 , method='lm' )
    x = res.x
    counter = counter + 1

PhasePSS, PhaseSUM, m, n = Func_Plot_PSSAP(x, w, PhaseAVR)
Example #38
0
                                1000000)
            initialStations2 = (np.random.rand(int(PROBLEM_DIMENSIONS / 2)) *
                                5)
            initialStations.append(
                np.concatenate((initialStations1, initialStations2), axis=0))

        targetStation1 = (np.random.rand(int(PROBLEM_DIMENSIONS / 2)) *
                          1000000)
        targetStation2 = (np.random.rand(int(PROBLEM_DIMENSIONS / 2)) * 5)
        targetStation = np.concatenate((targetStation1, targetStation2),
                                       axis=0)

        diArray = calculate_distance(targetStation)
        initialPoint = np.random.randint(100000, size=PROBLEM_DIMENSIONS)

        res = least_squares(optimization_func, initialPoint, method='lm').x

        resultFit = np.sum(checkEquality(targetStation, res))
        if resultFit >= 3:
            successCounter += 1
        if resultFit == 6:
            offCounter += 1

        print(
            f"{x+1}* initialStations - {[print_space_point(point) for point in initialStations]}"
        )
        print(f"{x+1}* targetStation - {print_space_point(targetStation)}")
        print(f"{x+1}* initialPoint - {print_space_point(initialPoint)}")
        print(f"{x+1}* result - {print_space_point(res)}")
        print(
            f"{x+1}* result fits expectations - {bool(resultFit)}. Correct answers - {resultFit} \n"
Example #39
0
    def _solve_S_from_DC(self,C_dataFrame,tee=False,with_bounds=False,max_iter=200):
        """Solves a basic least squares problems with SVD.
        
        Args:
            C_dataFrame (DataFrame) data frame with concentration values
        
        Returns:
            DataFrame with estimated S_values 

        """
        D_data = self.model.D
        if self._n_meas_lambdas:
            # build Dij vector
            D_vector = np.zeros(self._n_meas_times*self._n_meas_lambdas)
            
            row  = []
            col  = []
            data = []    
            for i,t in enumerate(self._meas_times):
                for j,l in enumerate(self._meas_lambdas):
                    for k,c in enumerate(self._mixture_components):
                        row.append(i*self._n_meas_lambdas+j)
                        col.append(j*self._n_components+k)
                        data.append(C_dataFrame[c][t])
                    D_vector[i*self._n_meas_lambdas+j] = D_data[t,l]    
                
                        
            Bd = scipy.sparse.coo_matrix((data, (row, col)),
                                         shape=(self._n_meas_times*self._n_meas_lambdas,
                                                self._n_components*self._n_meas_lambdas))

            if not with_bounds:
                if self._n_meas_times == self._n_components:
                    s_array = scipy.sparse.linalg.spsolve(Bd, D_vector)
                elif self._n_meas_times>self._n_components:
                    result_ls = scipy.sparse.linalg.lsqr(Bd, D_vector,show=tee)
                    s_array = result_ls[0]
                else:
                    raise RuntimeError('Need n_t_meas >= self._n_components')
            else:
                nl = self._n_meas_lambdas
                nt = self._n_meas_times
                nc = self._n_components
                x0 = np.zeros(nl*nc)+1e-2
                M = Bd.tocsr()
                
                def F(x,M,rhs):
                    return  rhs-M.dot(x)

                def JF(x,M,rhs):
                    return -M

                if tee == True:
                    verbose = 2
                else:
                    verbose = 0
                    
                res_lsq = least_squares(F,x0,JF,
                                        bounds=(0.0,np.inf),
                                        max_nfev=max_iter,
                                        verbose=verbose,args=(M,D_vector))
                s_array = res_lsq.x
                
            s_shaped = s_array.reshape((self._n_meas_lambdas,self._n_components))
        else:
            s_shaped = np.empty((self._n_meas_lambdas,self._n_components))

        return s_shaped
Example #40
0
    def post(self):
        args = parser.parse_args()
        ratoonStartDate, harvestStartDate, latlonlist = args[
            'ratoonStartDate'], args['harvestStartDate'], args['latlonlist']

        latlonlist_processed = []
        print(latlonlist)
        """
		for latlon_dict in latlonlist:
			res_latlon_dict = json.loads(latlon_dict.replace("\'", "\""))
			lon, lat = res_latlon_dict["lon"], res_latlon_dict["lat"]
			latlonlist_processed.append( [lon, lat] )

		print(latlonlist_processed)
		"""

        l_latlonlist = len(latlonlist)

        num_point = int(l_latlonlist / 2)

        for i in list(range(num_point)):
            lon, lat = latlonlist[i * 2], latlonlist[i * 2 + 1]
            latlonlist_processed.append([float(lon), float(lat)])
        print(latlonlist_processed)

        inputGeo = {"type": "Polygon", "coordinates": [latlonlist_processed]}
        inputGeo_go = geometry.GeometryCollection(
            [geometry.shape(inputGeo).buffer(0)])

        # need to find the max lat lon and min lat lon
        maxlon = max([latlon[0] for latlon in latlonlist_processed])
        minlon = min([latlon[0] for latlon in latlonlist_processed])

        maxlat = max([latlon[1] for latlon in latlonlist_processed])
        minlat = min([latlon[1] for latlon in latlonlist_processed])

        print("maxlon ", maxlon, "minlon ", minlon)
        print("maxlat ", maxlat, "minlat ", minlat)

        # construct a wrapper rectangular polygon
        wrapperGeo = { "type": "Polygon", "coordinates": [ [ [minlon, maxlat],\
                      [maxlon, maxlat],\
                      [maxlon, minlat],\
                      [minlon, minlat],\
                      [minlon, maxlat]
                     ] ]  }
        wrapperGeo_go = geometry.GeometryCollection(
            [geometry.shape(wrapperGeo).buffer(0)])

        [top_x, top_y] = latlon_to_xy(lat_origin, lon_origin, maxlat, minlon)
        [bottom_x, bottom_y] = latlon_to_xy(lat_origin, lon_origin, minlat,
                                            maxlon)

        print("dis is ", latlon_to_xy2(lat_origin, lon_origin, maxlat, minlon))
        print("dis is ", latlon_to_xy2(lat_origin, lon_origin, minlat, maxlon))

        result = geometry.GeometryCollection(
        )  # sugarcane masks that intersects tile
        for k in features:
            if geometry.shape(k["geometry"]).intersects(inputGeo_go):
                result = result.union(
                    geometry.shape(k["geometry"]).intersection(inputGeo_go))

        # this is for proserpine
        latlon_tile = [(147.95529899839866, -19.893321306080015), \
             (149.01671779369045, -19.893321306080015), \
             (149.01671779369045, -20.876176661128618), \
           (147.95529899839866, -20.876176661128618)]

        top_left_lat_long, top_right_lat_long, bottom_right_lat_long, bottom_left_lat_long = latlon_tile[0], latlon_tile[1],\
                         latlon_tile[2], latlon_tile[3]

        fullImageWidth = 10980
        fullImageHeight = 10980

        needed_ij_list = []
        print(top_x, bottom_x, top_y, bottom_y)
        # 6456 6463 5693 5717
        if inputGeo_go.intersects(result):
            # find all the sugarcane xy positions in this input region
            sugarcane_pos = tile_sugarcane_position_dict["sugarcane_positions"]

            for i in tqdm(list(range(top_x, bottom_x))):
                for j in list(range(top_y, bottom_y)):
                    # if [i,j] in sugarcane_pos:
                    # 	needed_ij_list.append((i,j))

                    lat_long = GetLatLongForCoords(i, j, top_left_lat_long,
                                                   top_right_lat_long,
                                                   bottom_right_lat_long,
                                                   bottom_left_lat_long,
                                                   fullImageWidth,
                                                   fullImageHeight)
                    if result.intersects(geometry.Point(lat_long)):

                        # this coordinate is needed
                        needed_ij_list.append((i, j))

            # if all needed ij is found
            # get the NDVI of these xy positions
            # dont need to calculate it again

            print("Got needed ij")
            print(len(needed_ij_list))

            if len(needed_ij_list) == 0:
                return {"errorCode": 1}

            filepath = "/mnt/volume_sgp1_01/pepsL2A_processed_img/T55KFT/split_dates/"
            daterange = [ratoonStartDate, harvestStartDate]
            """
			sugarpositions = needed_ij_list
			ndvi_dict = get_custom_ndvi(filepath, daterange, sugarpositions)
			mean_ndvi_list = ndvi_dict["mean_ndvi_list"]
			date_list = ndvi_dict["date_list"]
			"""
            sugar_daterange = get_daterange(filepath, daterange)
            # sugarcane_pos = tile_sugarcane_position_dict["sugarcane_positions"]

            mean_ndvi_list = []
            for d in tqdm(sugar_daterange):
                date_info = d.split('-')
                image_name = "{}{}{}.tif".format(date_info[0], date_info[1],
                                                 date_info[2])

                # tile_ndvi_list = T55KFT_total_ndvi_list[image_name]
                with open(
                        "./ndvi_info_dict/ndvi_list_{}.txt".format(
                            image_name[:8]), "rb") as fp:
                    # tile_ndvi_dict = json.load(fp)
                    tile_ndvi_dict = pickle.load(fp)
                # tile_ndvi_list = tile_ndvi_dict[image_name[:8]]
                # print(type(tile_ndvi_dict))

                sub_ndvi_list = []
                for ij in tqdm(needed_ij_list):
                    noIndex = False
                    try:

                        # ind = sugarcane_pos.index( [ij[0], ij[1]] )
                        ndvi = tile_ndvi_dict["{}-{}".format(ij[0], ij[1])]
                    except KeyError:
                        noIndex = True
                    if noIndex == True:
                        continue

                    # ndvi = tile_ndvi_list[ind]

                    # print(ndvi)

                    sub_ndvi_list.append(ndvi)

                mean_ndvi = np.mean(np.array(sub_ndvi_list))
                mean_ndvi_list.append(mean_ndvi)

            print(mean_ndvi_list)

            date_list = sugar_daterange
            """
			date_list = ndvi_dict_T55KFT["date_list"]
			mean_ndvi_list = ndvi_dict_T55KFT["mean_ndvi_list"]
			"""

            reference_date = datetime.strptime(date_list[0], "%Y-%m-%d")
            days_elapsed_list = []
            for i in list(range(0, len(date_list))):
                do = datetime.strptime(date_list[i], "%Y-%m-%d")
                delta = do - reference_date
                delta_days = delta.days
                days_elapsed_list.append(delta_days)

            ydata = np.array(mean_ndvi_list)
            xdata = np.array(days_elapsed_list)

            def fun(x, t, y):
                m, a, b, ti, tf = x[0], x[1], x[2], x[3], x[4]
                return NDVI_func(t, m, a, b, ti, tf) - y

            x0 = np.ones(5)
            res_lsq = least_squares(fun, x0, args=(xdata, ydata))
            m, a, b, ti, tf = res_lsq.x[0], res_lsq.x[1], res_lsq.x[
                2], res_lsq.x[3], res_lsq.x[4]
            I = quad(NDVI_func, 0, xdata[-1], args=(m, a, b, ti, tf))
            integrals = I[0]

            # print( estimated_integral )
            print(integrals)
            print(days_elapsed_list)
            k = 0.47352

            estimated_yield = k * float(integrals)
            print(estimated_yield)

            predicted_yield_list = []
            for i in list(range(len(days_elapsed_list))):
                I = quad(NDVI_func, 0, i, args=(m, a, b, ti, tf))
                integrals = I[0]
                predicted_yield = k * integrals
                predicted_yield_list.append(predicted_yield)

            # get type clustering
            # year = int(harvestStartDate[:4])

            if ratoonStartDate == "2017-09-23":
                year = 2018
            elif ratoonStartDat == "2018-09-28":
                year = 2019
            else:
                print("error")

            type_dict = get_type_percentage(year, needed_ij_list)

            # calculate Area and tons
            square_meter_area = len(needed_ij_list) * 100
            hec = square_meter_area * 0.0001
            estimated_tons = estimated_yield * hec

            sugar_content = 0.1 * estimated_tons

            sugar_lbs = sugar_content * 2000
            global_sugar_price = 12.72

            revenue = global_sugar_price * sugar_lbs

            return {
                "errorCode": 0,
                "estimated_yield": estimated_yield,
                "mean_ndvi_list": mean_ndvi_list,
                "date_list": date_list,
                "days_elapsed_list": days_elapsed_list,
                "predicted_yield_list": predicted_yield_list,
                "type_clustering": type_dict,
                "estimated_tons": estimated_tons,
                "sugar_content": sugar_content,
                "revenue": revenue
            }

        else:
            # the selected region does not contain any sugarcane
            print("No sugarcane in this region")
            estimated_yield = -1

            return {"errorCode": 1}
    def calculate_cable_delay(self, frequencies=None, z_data=None):
        """
        calculating cable delay using least square fit of the data for a circle for different values of delay
        :param z_data:
        :return:
        """
        delay_upper_bound = 100e-9

        if z_data is None:
            z_data = self.z_data_raw
            frequencies = self.frequencies
        # normalize data
        # first part - clculate cable delay by minimizing the angle distance between the two parts of the signal
        # def residuals(delay, frequencies, z_data):
        #     z_data_ = self.correctdelay(frequencies, z_data, delay[0])
        #     angles_at_limits = np.unwrap([np.angle(z_data_[0]), np.angle(z_data_[-1])])
        #     angle_distance = angles_at_limits[0] - angles_at_limits[1]
        #     res = angle_distance
        #
        #
        if self.delay_rough_estimation <= delay_upper_bound:
            initial_guess = self.delay_rough_estimation
        else:
            initial_guess = delay_upper_bound
        if self.config == 'T':

            def residuals(delay, frequencies, z_data):
                z_data_ = self.correctdelay(frequencies, z_data, delay[0])
                xc, yc, r0 = self.fit_circle(z_data_)
                # calculating the distance from radius of each point (will be zero for perfect circle)
                distance_from_radius = np.sqrt((z_data_.real - xc)**2 +
                                               (z_data_.imag - yc)**2) - r0
                # calculating the angle distance between the first and last points
                res = distance_from_radius / r0
                return res

            optimized = optimize.least_squares(residuals,
                                               initial_guess,
                                               args=(frequencies, z_data),
                                               bounds=(0, delay_upper_bound),
                                               xtol=5e-16,
                                               ftol=5e-16,
                                               gtol=1e-12)
            cable_delay = optimized.x[0]

        elif self.config == 'circulator':

            def residuals(delay, frequencies, z_data):
                z_data_ = self.correctdelay(frequencies, z_data, delay[0])
                xc, yc, r0 = self.fit_circle(z_data_)
                # calculating the distance from radius of each point (will be zero for perfect circle)
                distance_from_radius = np.sqrt((z_data_.real - xc)**2 +
                                               (z_data_.imag - yc)**2) - r0
                # calculating the angle distance between the first and last points
                angles_at_limits = np.unwrap(
                    [np.angle(z_data_[0]),
                     np.angle(z_data_[-1])])
                angle_distance = angles_at_limits[0] - angles_at_limits[1]
                # returning residuls while taking both circle parameters into accout:
                # distance from radius and, and complition of a circle, in case of a small circle the residulas normalized
                # by the radius to increase the value of the function
                res = angle_distance
                return res

            optimized = optimize.least_squares(residuals,
                                               initial_guess,
                                               args=(frequencies, z_data),
                                               bounds=(0, delay_upper_bound),
                                               xtol=5e-16,
                                               ftol=5e-16,
                                               gtol=1e-12)
            cable_delay = optimized.x[0]
            initial_guess = cable_delay

            def residuals_(delay, frequencies, z_data):
                z_data_ = self.correctdelay(frequencies, z_data, delay[0])
                xc, yc, r0 = self.fit_circle(z_data_)
                # calculating the distance from radius of each point (will be zero for perfect circle)
                distance_from_radius = np.sqrt((z_data_.real - xc)**2 +
                                               (z_data_.imag - yc)**2) - r0
                # calculating the angle distance between the first and last points
                angles_at_limits = np.unwrap(
                    [np.angle(z_data_[0]),
                     np.angle(z_data_[-1])])
                angle_distance = angles_at_limits[0] - angles_at_limits[1]
                # returning residuls while taking both circle parameters into accout:
                # distance from radius and, and complition of a circle, in case of a small circle the residulas normalized
                # by the radius to increase the value of the function
                res = distance_from_radius
                return res

            optimized = optimize.least_squares(residuals_,
                                               initial_guess,
                                               args=(frequencies, z_data),
                                               bounds=(0.9 * initial_guess,
                                                       1.1 * initial_guess),
                                               xtol=5e-16,
                                               ftol=5e-16,
                                               gtol=1e-12)
            cable_delay = optimized.x[0]

        self.delay = cable_delay
        logger.info(f"Calculated cable delay is: {cable_delay:.5E}")
        return cable_delay
Example #42
0
def loadingFF(UserIn, geomParams, XsecPolar, W, omega, Vx, Vz, alphaShaft):

    import bisect
    import numpy as np
    from scipy.optimize import least_squares

    def fixed_pitch_residuals(omega):
        '''
        This function computes the residuals between the trim targets and computed trim variables for rpm trim.

        :param th: an array of three elements the first being the collective pitch setting, followed by the lateral and longituinal cyclic pitch amplitudes.
        :param mu_x: advance ratio
        :param lamTPP_init: initial estimate for the inflow ratio
        :return: difference between the trim targets and computes CT, beta1c, and beta1s.
        '''
        trimOut = fixed_pitch_trim(omega)
        res = trimTargs - trimOut[0]
        print(f'Trim residuals: T = {round(res,6)}N')
        return res

    def variable_pitch_residuals(th, mu, lamTPP_init):
        '''
        This function computes the residuals between the trim targets and computed trim variables for collective and cyclic pitch trim.

        :param th: an array of three elements the first being the collective pitch setting, followed by the lateral and longituinal cyclic pitch amplitudes.
        :param mu_x: advance ratio
        :param lamTPP_init: initial estimate for the inflow ratio
        :return: difference between the trim targets and computes CT, beta1c, and beta1s.
        '''

        if UserIn['trim'] == 2:
            trimOut = variable_pitch_trim([th, 0, 0], mu, lamTPP_init)
            res = trimTargs - trimOut[0]
            print(f'Trim residuals: T = {round(res, 6)}N')
        else:
            trimOut = variable_pitch_trim(th, mu, lamTPP_init)
            res = trimTargs - np.array([trimOut[0], trimOut[2], trimOut[3]])
            print(
                f'Trim residuals: T = {round(res[0], 6)}N, Mx = {round(res[1], 6)}Nm, My = {round(res[2], 6)}Nm'
            )
        return res

    def fixed_pitch_trim(omega):
        '''
        This function performs an rpm trim, whereby the rotational rate is varied until the desired thrust is achieved.
        :param omega: rotational rate [rad/s]
        :return:
        '''

        mu = U / (omega * R)
        CT = W / (rho * np.pi * R**2 * (omega * R)**2)
        lamTPP_init = inflowModSelect(2, mu * np.tan(alphaInit), mu, CT)
        dCT = CT * np.ones(np.shape(lamTPP_init))

        err = 1
        while np.any(err > 0.0005):

            up = inflowModSelect(UserIn['inflowMod'], lamTPP_init, mu, CT, dCT)
            ut = r + mu * np.expand_dims(np.sin(phi), axis=1)
            AoA = th0 + geomParams['twistDist'] - up / ut
            CL, CD = np.array([aeroParams(x) for x in AoA]).transpose(1, 0, 2)
            dCT = 1 / 2 * solDist * r**2 * (CL * np.cos(up / ut) -
                                            CD * AoA * np.sin(up / ut))
            CT = 1 / (2 * np.pi) * np.trapz(np.trapz(dCT, r), phi)
            err = np.abs((up - lamTPP_init) / up)
            lamTPP_init = up

        T = CT * rho * np.pi * R**2 * (omega * R)**2

        return T, CT, dCT, lamTPP_init, ut, up, CL, CD, AoA, mu

    def variable_pitch_trim(th, mu, lamTPP_init):
        '''
        This function performs a collective/cyclic pitch trim, whereby the thrust coefficient, roll, and pitching moments are the trim targets.

        :param th: an array of three elements the first being the collective pitch setting, followed by the lateral and longituinal cyclic pitch amplitudes, expressed in radians.
        :param mu_x: the advance ratio
        :param lamTPP_init: a constant inflow ratio with respect to the tip path plane (TTP)

        :return:
        :param beta: converged array consisting of the coning, longitudinal, and lateral flapping angles, expressed in radians.
        :param alpha: convered rotor disk angle of attack, expressed in radians
        :param mu: converged advance ratio
        :param CT: converged thrust coefficient
        :param lamTTP_temp: converged inflow ratio
        :param theta_expanded: expanded form of the pitch variations, accounting for first harmonic fluctuations in cyclic pitch (len(phi)xlen(r)).
        :param beta_expanded: expanded form of the flap variations, accounting for first harmonic fluctuations in longitudinal and lateral flapping
        :param ut: nondimensionalized tangential velocity component, evaluated with respect to the hub plane.
        :param up: nondimensionalized normal velocity component, evaluated with respect to the hub plane.
        '''
        err = 1
        i = 0
        while np.any(err > 0.0005):

            theta_expanded = geomParams['twistDist'] + th[0] + np.expand_dims(
                th[1] * np.cos(phi), axis=1) + np.expand_dims(
                    th[2] * np.sin(phi), axis=1)
            ut = r + mu * np.cos(alphaInit) * np.expand_dims(np.sin(phi),
                                                             axis=1)
            up = lamTPP_init

            AoA = theta_expanded - up / ut

            CL, CD = np.array([aeroParams(x) for x in AoA]).transpose(1, 0, 2)

            dCT = 1 / 2 * solDist * r**2 * (CL * np.cos(up / ut) -
                                            CD * np.sin(up / ut))
            CT = 1 / (2 * np.pi) * np.trapz(np.trapz(dCT, r), phi)

            lamTTP_temp = inflowModSelect(UserIn['inflowMod'], lamTPP_init, mu,
                                          CT, dCT)
            err = np.abs((lamTTP_temp - lamTPP_init) / lamTTP_temp)
            lamTPP_init = lamTTP_temp

        Mx = 1 / (2 * np.pi) * np.trapz(
            np.trapz(r * dCT * np.expand_dims(np.sin(phi), axis=1), r),
            phi) * rho * (omega * R)**2 * np.pi * R**3
        My = -1 / (2 * np.pi) * np.trapz(
            np.trapz(r * dCT * np.expand_dims(np.cos(phi), axis=1), r),
            phi) * rho * (omega * R)**2 * np.pi * R**3

        return CT, dCT, Mx, My, lamTTP_temp, theta_expanded, ut, up, CL, CD, AoA

    def inflowModSelect(model, lam, mu, CT, *args):
        '''
        This function selects and returns the converged inflow distribution based on the model specified in the user input module.
        :param model: integer specifing the model selected in the input module (UserIn['inflowMod'])
        :param lam: initial guess for the inflow distribution, can be an arbitrary sized array
        :param mu: = standard advance ratio (V/(omega*R)), unresolved into parallel and perpendicular components to the rotor disk.
        :param CT: thrust coefficient
        :return:
        '''
        if model == 1:
            lam = constant_inflow(lam, mu, CT)
        elif model == 2:
            lam = linear_inflow(lam, mu, CT)
        elif model == 3:
            lam = drees_inflow(lam, mu, CT)
        elif model == 4:
            lam = pitt_peters_inflow(lam, mu, args[0])
        return lam

    def constant_inflow(lam, mu, CT):
        '''
        This function applies the fixed point iteration method to converge the constant inflow ratio.

        :param lam: the estimate of the inflow ratio
        :param mu: the advance ratio
        :param alpha: angle of attack of the rotor disk
        :param CT: thrust/weight coefficient
        :return: converged inflow ratio
        '''
        errFP = 1
        mu = mu * np.cos(alphaInit)
        while np.any(errFP > 0.0005):
            lam_temp = mu * np.tan(alphaInit) + CT / (2 *
                                                      np.sqrt(mu**2 + lam**2))
            errFP = np.abs((lam_temp - lam) / lam_temp)
            lam = lam_temp
        return lam

    def linear_inflow(lam, mu, CT):
        '''
        This function utilizes the fixed point itteration method to converge the Glauert's linear inflow model.

        :param lam: the estimate of the inflow ratio
        :param mu: the advance ratio
        :param alpha: angle of attack of the rotor disk
        :param CT: thrust/weight coefficient
        :return: converged inflow ratio
        '''
        err = 1
        mu = mu * np.cos(alphaInit)
        while np.any(err > 0.0005):
            lam_temp = CT / (2 * np.sqrt(mu**2 + lam**2)) * (
                1 + 1.2 * r * np.expand_dims(np.cos(phi), axis=1))
            err = np.abs((lam_temp - lam) / lam_temp)
            lam = lam_temp
        return lam

    def drees_inflow(lam, mu, CT):
        '''
        This function utilizes the fixed point itteration method to converge the Drees's inflow model.

        :param lam: the estimate of the inflow ratio
        :param mu: the advance ratio
        :param alpha: angle of attack of the rotor disk
        :param CT: thrust/weight coefficient
        :return: converged inflow ratio
        '''
        err = 1
        while np.any(err > 0.0005):
            wake_skew = np.arctan(mu * np.cos(alphaInit) / lam)
            kx = 4 / 3 * (
                (1 - np.cos(wake_skew) - 1.8 * mu**2) / np.sin(wake_skew))
            ky = -2 * mu
            lam_temp = CT / (2 * np.sqrt(mu**2 + lam**2)) * (
                1 + kx * r * np.expand_dims(np.cos(phi), axis=1) +
                ky * r * np.expand_dims(np.sin(phi), axis=1))
            err = np.abs((lam_temp - lam) / lam_temp)
            lam = lam_temp
        return lam

    def pitt_peters_inflow(lam, mu, dCT):
        '''
        This function computes the inflow distribution based on the steady component of the Pitt-Peters model. This
        formulation was originally presented in, Pitt, Dale M., and David A. Peters. "Theoretical prediction of
        dynamic-inflow derivatives." (1980) and then again in Chen, Robert TN. "A survey of nonuniform inflow models
        for rotorcraft flight dynamics and control applications." (1989). This model takes into account the effects
        that the hub moments have on the steady (1st harmonic) induced velocity distribution. This model should be
        used when performing the fixed/collective pitch trim since the inflow distribution would inevitably vary in
        order to produce the necessary reaction to counteract the hub moments.
        :param lam: initial guess for the inflow distribution
        :param mu: advance ratio
        :param dCT: radial and azimuthal distribution of the thrust coefficient
        '''

        CT = 1 / (2 * np.pi) * np.trapz(np.trapz(dCT, r), phi)
        CMX = 1 / (2 * np.pi) * np.trapz(
            np.trapz(r * dCT * np.expand_dims(np.sin(phi), axis=1), r), phi)
        CMY = -1 / (2 * np.pi) * np.trapz(
            np.trapz(r * dCT * np.expand_dims(np.cos(phi), axis=1), r), phi)

        lam = constant_inflow(lam, mu, CT)
        wake_skew = np.arctan(mu * np.cos(alphaInit) / lam)
        vt = np.sqrt((mu * np.cos(alphaInit))**2 + lam**2)
        vm = ((mu * np.cos(alphaInit))**2 + lam *
              (lam + CT / (2 * np.sqrt(mu**2 + lam**2)))) / vt

        L = np.array(
            [[0.5 * vt, 0, 15 * np.pi / (64 * vm) * np.tan(wake_skew / 2)],
             [0, -4 / (vm * (1 + np.cos(wake_skew))), 0],
             [
                 15 * np.pi / (64 * vt) * np.tan(wake_skew / 2), 0,
                 -4 * np.cos(wake_skew) / (vm * (1 + np.cos(wake_skew)))
             ]])
        lam_0, lam_1c, lam_1s = np.dot(L, [CT, CMX, CMY])
        lam = lam_0 + lam_1c * r * np.expand_dims(np.cos(
            phi), axis=1) + lam_1s * r * np.expand_dims(np.sin(phi), axis=1)

        return lam

    def aeroParams(AoA):
        '''
        This function returns the lift and drag coefficients corresponding to a radial and azimuthal distribution of the
        angles of attack. The lift coefficient for stalled blade sections is linearly interpolated between the
        section's airfoil minimum and maximum lift coefficients. The drag coefficient is assumed to be 10% of the
        lift coefficient tunless the blade section is stalled. In that case, the sectional drag coefficient is set to
        the airfoil's drag coefficient at the angle of attack corresponding to the maximum lift coefficient
        :param AoA: array of size [phiRes x len(r)] filled with the computed angles of attack
        :return:
        :param CL: lift coefficient, linearly interpolated for the stalled blade sections
        :param CD:  drag coefficient, set equal to its value at the angle of attack corresponding to themaximum lift
        coefficient for the stalled blade sections
        '''

        dCL = np.zeros(len(AoA))
        dCD = np.zeros(len(AoA))

        for k, v in XsecPolar.items():
            ind = np.squeeze(np.where(np.array(polarInd) == k))

            if np.any(AoA[ind] > v['alphaMax']) or np.any(
                    AoA[ind] < v['Alpha0']):

                ind2 = np.where((AoA[ind] < v['Alpha0'])
                                | (AoA[ind] > v['alphaMax']))
                dCL[ind2] = np.interp(AoA[ind2] % (2 * np.pi),
                                      xp=[v['alphaMax'], 2 * np.pi],
                                      fp=[v['ClMax'], v['ClMin']])

                if np.any(AoA[ind2] % (2 * np.pi) > -v['alphaMax'] %
                          (2 * np.pi)):
                    ind3 = np.squeeze(
                        np.where(AoA[ind2] % (2 * np.pi) > -v['alphaMax'] %
                                 (2 * np.pi)))
                    dCD[ind3] = np.interp(
                        AoA[ind2][ind3] % (2 * np.pi),
                        xp=[-v['alphaMax'] % (2 * np.pi), 2 * np.pi],
                        fp=[v['CdMax'], v['CdMin']])
                    ind2 = np.delete(ind2, ind3)
                else:
                    dCD[ind2] = v['CdMax']

                ind = np.delete(ind, ind2)

            dCL[ind] = np.interp(AoA[ind],
                                 xp=v['Polar'][:, 0],
                                 fp=v['Polar'][:, 1])
            # dCL[ind] = 2*np.pi*AoA[ind]
            dCD[ind] = np.interp(AoA[ind],
                                 xp=v['Polar'][:, 0],
                                 fp=v['Polar'][:, 2])
        return dCL, dCD

#%%

    omega = omega / 60 * 2 * np.pi
    rho = UserIn['rho']
    Nb = UserIn['Nb']
    R = geomParams['R']
    r = geomParams['r']
    solDist = geomParams['solDist']
    XsecLocation = UserIn['XsecLocation']
    CT = W / (rho * np.pi * R**2 * (omega * R)**2)

    alphaShaft = alphaShaft * (np.pi / 180)
    thFP = np.arctan(Vz / Vx)
    alphaInit = alphaShaft + thFP
    U = np.linalg.norm((Vx, Vz))

    mu = U / (omega * R)

    phiRes = 361
    phi = np.linspace(0, 2 * np.pi, phiRes)
    a = np.ones((len(r))) * XsecPolar[list(XsecPolar.keys())[0]]['Lift Slope']
    th0 = UserIn['thetaInit'] * np.pi / 180

    # %% This section of code assigns the airfoil parameters from the XFoil polar to the corresponding radial section
    # along the blade span

    XsecPolarExp = {}
    if len(XsecLocation) > 1:
        polarInd = []
        ind = np.zeros((len(XsecLocation) + 1))
        for i, Xsec in enumerate(XsecLocation):
            ind[i] = bisect.bisect(r, Xsec)
        ind[0] = 0
        ind[-1] = len(r)

        for i, Xsec in enumerate(XsecPolar.keys()):
            polarInd.extend([Xsec] * int(ind[i + 1] - ind[i]))
            for ii, param in enumerate(list(XsecPolar[Xsec].keys())[1:]):
                if i == 0:
                    XsecPolarExp = {
                        **XsecPolarExp,
                        **{
                            param: XsecPolar[Xsec][param] * np.ones(len(r))
                        }
                    }
                else:
                    XsecPolarExp[param][int(ind[i]):] = XsecPolar[Xsec][param]
    else:
        polarInd = list(XsecPolar.keys()) * len(r)
        for i, key in enumerate(
                list(XsecPolar[list(XsecPolar.keys())[0]].keys())[1:]):
            XsecPolarExp[key] = np.ones(
                (phiRes, len(r))) * XsecPolar[list(XsecPolar.keys())[0]][key]

#%%
    if UserIn['trim'] == 1:
        trimTargs = W
        trim_sol = least_squares(fixed_pitch_residuals,
                                 omega,
                                 method='lm',
                                 diff_step=0.5)
        omega = trim_sol.x
        th = np.array([th0, 0, 0])
        T, CT, dCT, lam, ut, up, CL, CD, AoA, mu = fixed_pitch_trim(omega)
        theta_expanded = np.empty((np.shape(AoA))) * th0

    elif UserIn['trim'] == 2:
        trimTargs = CT
        lamTPP_init = inflowModSelect(1, mu * np.tan(alphaInit), mu, trimTargs)
        trim_sol = least_squares(variable_pitch_residuals,
                                 th0,
                                 args=[mu, lamTPP_init],
                                 method='lm')
        th = np.array([np.squeeze(trim_sol.x), 0, 0])
        CT, dCT, Mx, My, lam, theta_expanded, ut, up, CL, CD, AoA = variable_pitch_trim(
            th, mu, lamTPP_init)

    else:
        trimTargs = [CT, 0, 0]
        th = np.array([th0, np.pi / 180, np.pi / 180])
        lamTPP_init = inflowModSelect(1, mu * np.tan(alphaInit), mu,
                                      trimTargs[0])
        trim_sol = least_squares(variable_pitch_residuals,
                                 th,
                                 args=[mu, lamTPP_init],
                                 method='lm')
        th = trim_sol.x
        CT, dCT, Mx, My, lam, theta_expanded, ut, up, CL, CD, AoA = variable_pitch_trim(
            th, mu, lamTPP_init)

#%%
    UT = ut * omega * R
    UP = up * omega * R
    U = np.sqrt(UT**2 + UP**2)

    dT = rho * np.pi * R**2 * (omega * R)**2 * dCT
    T = 1 / (2 * np.pi) * np.trapz(np.trapz(dT, r), phi)

    dCQ = 0.5 * solDist * r**3 * (CL * np.sin(up / ut) + CD * np.cos(up / ut))
    CQ = 1 / (2 * np.pi) * np.trapz(np.trapz(dCQ, r), phi)
    dQ = rho * np.pi * R**3 * (omega * R)**2 * dCQ
    Q = 1 / (2 * np.pi) * np.trapz(np.trapz(dQ, r), phi)
    P = Q * omega

    # resolves loading vectors to vertical and horizontal directions so that a change of base can be applied to the
    # blade geometry account for the pitching motion in the namelist file - 1/18/21
    dFz = dT / Nb * np.cos(-theta_expanded) - dQ / (
        Nb * r * R) * np.sin(-theta_expanded)
    dFx = dT / Nb * np.sin(-theta_expanded) + dQ / (
        Nb * r * R) * np.cos(-theta_expanded)
    # dFr = rho*np.pi*R**2*(omega*R)**2*(1/2*solDist*r**2*(-CL*np.expand_dims(np.sin(beta_exp),axis = 1)+CD*np.sin(np.expand_dims(mu_x*np.cos(phi),axis = 1)/ut)))
    dFr = np.zeros((np.shape(dFz)))

    #   if the rotor is rotating CW the force distributions are flipped along the longitudinal axis of the rotor disk.
    if UserIn['rotation'] == 2:
        dFz = np.flip(dFz, axis=0)
        dFx = np.flip(dFx, axis=0)
        AoA = np.flip(AoA, axis=0)
        U = np.flip(U, axis=0)

        if UserIn['inflowMod'] != 1:
            lam = np.flip(lam, axis=0)
        th[2] = -th[2]

    #   hub force
    H = Nb / (2 * np.pi) * np.trapz(
        np.trapz((dFr * np.expand_dims(np.cos(phi), axis=1) +
                  dFx * np.expand_dims(np.sin(phi), axis=1)), r), phi)
    #   side force
    Y = Nb / (2 * np.pi) * np.trapz(
        np.trapz((dFr * np.expand_dims(np.sin(phi), axis=1) -
                  dFx * np.expand_dims(np.cos(phi), axis=1)), r), phi)
    #   roll moment
    Mx = Nb / (2 * np.pi) * np.trapz(
        np.trapz(
            geomParams['rdim'] * dFz * np.expand_dims(np.sin(phi), axis=1), r),
        phi)
    #   pitch moment
    My = -Nb / (2 * np.pi) * np.trapz(
        np.trapz(
            geomParams['rdim'] * dFz * np.expand_dims(np.cos(phi), axis=1), r),
        phi)
    hubLM = [H, Y, Mx, My]

    #   assembles a dictionary with the computed parameters that is returned to the user and is referenced in other segments of the program
    loadParams = {
        'residuals': trim_sol.fun,
        'phiRes': phiRes,
        'ClaDist': a,
        'AoA': AoA,
        'alpha': alphaInit,
        'mu': mu,
        'phi': phi,
        'th': th,
        'CT': CT,
        'T': T,
        'CQ': CQ,
        'Q': Q,
        'P': P,
        'UP': UP,
        'UT': UT,
        'U': U,
        'dFx': dFx,
        'dFy': dFr,
        'dFz': dFz,
        'hubLM': hubLM,
        'omega': omega * 60 / (2 * np.pi)
    }
    #
    return loadParams
Example #43
0
    def track(self, epsilon=None, threshold=1E-6, num_observes=1, verbose=0):
        if self.sample_queue is None or self.model_params is None:
            raise ValueError("calibrator not initialized!")

        if not isinstance(num_observes, int) or num_observes < 1:
            raise ValueError("number of observes must be a integer greater than or equal to 1!")

        # estimate output
        new_output = 0
        for _ in range(num_observes):
            new_output += self.model.observe(self.sample_queue[-1].inputs)
        new_output /= num_observes
        if np.abs(new_output - self.model.guess(self.sample_queue[-1].inputs, self.model_params)) < threshold:
            return

        epsilon = np.sqrt(np.finfo(float).eps) if epsilon is None else epsilon
        log.debug("epsilon: " + str(epsilon))

        # observe other points
        y1 = 0
        x1 = np.asarray(self.sample_queue[-1].inputs) + np.asarray([0, epsilon])
        for _ in range(num_observes):
            y1 += self.model.observe(x1.tolist())
        y1 /= num_observes

        y2 = 0
        x2 = np.asarray(self.sample_queue[-1].inputs) + np.asarray([epsilon, 0])
        for _ in range(num_observes):
            y2 += self.model.observe(x2.tolist())
        y2 /= num_observes

        def residual(new_params):

            y0_model = self.model.guess(self.sample_queue[-1].inputs, new_params)
            y1_model = self.model.guess(x1.tolist(), new_params)
            y2_model = self.model.guess(x2.tolist(), new_params)

            return np.asarray([new_output, y1, y2]) - np.asarray([y0_model, y1_model, y2_model])

        def residual_jac(new_params):

            jac_y0 = self.model.guess_jac_params(self.sample_queue[-1].inputs, new_params)
            jac_y1 = self.model.guess_jac_params(x1.tolist(), new_params)

            return np.concatenate(jac_y0, jac_y1)

        if verbose > 0:
            print('tracking...')
        results = least_squares(residual, self.model_params, ftol=1e-16, xtol=1e-16, gtol=1e-16)

        next_model_params = results.x.tolist()
        log.debug(results)

        if verbose > 0:
            print('searching minimum...')
        min_inputs = self.model.argmin(params=next_model_params, initial_inputs=self.sample_queue[-1].inputs, verbose=verbose)
        min_ = self.model.observe(min_inputs)

        log.info("minimum of proposed model: " + str(min_))

        if min_ < new_output:
            self.model_params = next_model_params
            self.sample_queue.popleft()
            self.sample_queue.append(SampleRecord(min_inputs, min_))
Example #44
0
 def calc(self):
     results = least_squares(self.equations, self.initial_guess)
     return results.x
def scipyForecaster(returns, ff, nf=6, length=60, loss='cauchy'):

    output = []
    factorLoadings = []
    varianceOfErrors = []
    df = ff.merge(returns, left_index=True, right_index=True)
    name = returns.columns.tolist()[0]
    df[name] = df[name] - df['RF']
    regressors = ['Mkt.Rf', 'SMB', 'HML', 'Mom', 'RMW', 'CMA']

    if nf == 3:
        regressors = ['Mkt.Rf', 'SMB', 'HML']

    if nf == 21:
        regressors = ff.columns.tolist()
        regressors.remove('RF')

    for j in range(length, len(df.index.tolist())):
        trainData = df.iloc[(j - length):j, :]
        trainX = trainData[regressors]
        trainY = trainData[[name]]
        x0 = np.array([1.0 for _ in range(0, nf)])

        if True == trainY.isnull().values.any():
            output.append(np.nan)
            factorLoadings.append(np.zeros((1, nf)))
            varianceOfErrors.append(np.nan)
            continue

        model = ''
        if loss == 'cauchy':
            model = least_squares(minF,
                                  x0,
                                  loss='cauchy',
                                  f_scale=0.1,
                                  args=(trainX, trainY))
        if loss == 'atan':
            model = least_squares(minF,
                                  x0,
                                  loss='arctan',
                                  f_scale=0.1,
                                  args=(trainX, trainY))
        if loss == 'softl1':
            model = least_squares(minF,
                                  x0,
                                  loss='soft_l1',
                                  f_scale=0.1,
                                  args=(trainX, trainY))
        if loss == 'huber':
            model = least_squares(minF,
                                  x0,
                                  loss='huber',
                                  f_scale=0.1,
                                  args=(trainX, trainY))

        factorLoadings.append(np.array(model.x))
        model.x.shape = (nf, 1)
        varianceOfErrors.append(
            np.var(trainY - np.dot(trainX, model.x)).tolist()[0])

        testData = pd.DataFrame(df.iloc[j, :]).T
        testX = testData[regressors]
        prediction = np.dot(testX, model.x)
        output.append(prediction[0][0])

    return (name, output, factorLoadings, varianceOfErrors)
Example #46
0
def model_selection_f_test(s, b_vals, b_vecs, m_max= 3, threshold= 20, condition_mode= 'F_val', model='DIAMOND'):
    
    m_selected= False
    m_opt= -1
    m= 0
    wt_temp= s**2
    
    while not m_selected:
        
        true_numbers= m
        if model=='ball_n_sticks':
            R_init, bounds_lo, bounds_up = polar_fibers_and_iso_init(true_numbers, lam_1=0.0020, lam_2=0.0004, d_iso=0.003)
        elif model=='DIAMOND':
            #R_init, bounds_lo, bounds_up = diamond_init(true_numbers, lam_1=0.0019, lam_2=0.0004, d_iso=0.003)
            R_init, bounds_lo, bounds_up = diamond_init_log(true_numbers, lam_1=0.0020, lam_2=0.0004, d_iso=0.003)
        else:
            print('Model type unidentified.')
            return np.nan
        
        if model=='ball_n_sticks':
            solution = opt.least_squares(polar_fibers_and_iso_resid, R_init,
                                                bounds=(bounds_lo,bounds_up),
                                                args=( true_numbers, b_vals, b_vecs,
                                                s,
                                                wt_temp**0.0))
            ss= polar_fibers_and_iso_simulate(solution.x, true_numbers, b_vals, b_vecs)
        elif model=='DIAMOND':
            '''solution = opt.least_squares(diamond_resid, R_init,
                                    bounds=(bounds_lo,bounds_up),
                                    args=( true_numbers, b_vals, b_vecs,
                                    s,
                                    wt_temp**0.0))
            ss= diamond_simulate(solution.x, true_numbers, b_vals, b_vecs)'''
            solution = opt.least_squares(diamond_resid_log, R_init,
                                    bounds=(bounds_lo,bounds_up),
                                    args=( true_numbers, b_vals, b_vecs,
                                    s,
                                    wt_temp**0.0))
            ss= diamond_simulate_log(solution.x, true_numbers, b_vals, b_vecs)
            
        ss_fit= ( ss-s )**2
        
        if m==0:
            
            RSS1= ss_fit.sum()
            p1= len(R_init)
            
        else:
            
            RSS2= ss_fit.sum()
            p2= len(R_init)
            
            f_val= ( (RSS1-RSS2) / (p2-p1) )   /   ( RSS2 / ( len(b_vals)-p2 ) )
            
            if condition_mode== 'F_prob':
                f_stat= f.cdf( f_val , p2-p1, len(b_vals)-p2 )
                cond= f_stat< 1-threshold or m==m_max
            elif condition_mode== 'F_val':
                cond= f_val< threshold or m==m_max
            else:
                print('Condition mode unidentified!')
                return np.nan
            
            if cond:
                
                m_opt= m-1
                m_selected= True
                
            else:
                
                RSS1= ss_fit.sum()
                p1= len(R_init)
                    
        
        m+= 1
        
    
    return m_opt
Example #47
0
    def minimise(self,
                 params,
                 step,
                 limits,
                 minimiser_name="minuit",
                 max_calls=0):
        """

        Parameters
        ----------
        params: ndarray
            Seed parameters for fit
        step: ndarray
            Initial step size in the fit
        limits: ndarray
            Fit bounds
        minimiser_name: str
            Name of minimisation method
        max_calls: int
            Maximum number of calls to minimiser
        Returns
        -------
        tuple: best fit parameters and errors
        """
        limits = np.asarray(limits)
        if minimiser_name == "minuit":

            self.min = Minuit(
                self.get_likelihood,
                print_level=1,
                source_x=params[0],
                error_source_x=step[0],
                limit_source_x=limits[0],
                fix_source_x=False,
                source_y=params[1],
                error_source_y=step[1],
                limit_source_y=limits[1],
                fix_source_y=False,
                core_x=params[2],
                error_core_x=step[2],
                limit_core_x=limits[2],
                fix_core_x=False,
                core_y=params[3],
                error_core_y=step[3],
                limit_core_y=limits[3],
                fix_core_y=False,
                energy=params[4],
                error_energy=step[4],
                limit_energy=limits[4],
                fix_energy=False,
                x_max_scale=params[5],
                error_x_max_scale=step[5],
                limit_x_max_scale=limits[5],
                fix_x_max_scale=False,
                goodness_of_fit=False,
                fix_goodness_of_fit=True,
                errordef=1,
            )

            self.min.tol *= 1000
            self.min.set_strategy(1)

            migrad = self.min.migrad()
            fit_params = self.min.values
            errors = self.min.errors

            return (
                (
                    fit_params["source_x"],
                    fit_params["source_y"],
                    fit_params["core_x"],
                    fit_params["core_y"],
                    fit_params["energy"],
                    fit_params["x_max_scale"],
                ),
                (
                    errors["source_x"],
                    errors["source_y"],
                    errors["core_x"],
                    errors["core_x"],
                    errors["energy"],
                    errors["x_max_scale"],
                ),
                self.min.fval,
            )

        elif "nlopt" in minimiser_name:
            import nlopt

            opt = nlopt.opt(nlopt.LN_BOBYQA, 6)
            opt.set_min_objective(self.get_likelihood_nlopt)
            opt.set_initial_step(step)

            opt.set_lower_bounds(np.asarray(limits).T[0])
            opt.set_upper_bounds(np.asarray(limits).T[1])
            opt.set_xtol_rel(1e-3)
            if max_calls:
                opt.set_maxeval(max_calls)

            x = opt.optimize(np.asarray(params))

            return x, (0, 0, 0, 0, 0, 0), self.get_likelihood_min(x)

        elif minimiser_name in ("lm", "trf", "dogleg"):
            self.array_return = True

            min = least_squares(
                self.get_likelihood_min,
                params,
                method=minimiser_name,
                x_scale=step,
                xtol=1e-10,
                ftol=1e-10,
            )

            return min.x, (0, 0, 0, 0, 0, 0), self.get_likelihood_min(min.x)

        else:
            min = minimize(
                self.get_likelihood_min,
                np.array(params),
                method=minimiser_name,
                bounds=limits,
                options={"disp": False},
                tol=1e-5,
            )

            return np.array(min.x), (0, 0, 0, 0, 0,
                                     0), self.get_likelihood_min(min.x)
def compute_exact(D_topright, anchors, basis, guess=None, method='least_squares', verbose=False):
    """ Function to compute exact solution to quadratically constrained problem. 

    See also UniquenessStudies.ipynb for seeing how to use these methods. 

    :param D_topright, anchors, basis: see :class:`solvers.semidefRelaxation` for explanation. 
    :param guess: initial guess for coefficients (necessary for some methdos). 
    :param method: Method to use to find exact solution. Currently implemented are 

    - least_squares: do gradient descent on the least squares cost function. We do in total 100
    random initializations in (-10, 10)^KD, and we keep all solutions where the cost function is zero, 
    meaning that all constraints are satisfied.  

    - minimize: We setup a constrained minimization problem, and then try to minimize a banal cost function. It doesn't work well when first guess is not feasible, according to some online discussions. 

    - grid: Do a simple grid search to find zeros of the least-squares cost function. Runs out of memory even for small model sizes. 

    - roots: Find the roots of the cost function. Problem: this only allows us to add exactly K*D constraints, which is almost never enough. 

    :return: list of solutions (least_squares) or single solution. 


    """
    dim = anchors.shape[0]
    K = basis.shape[0]

    if method == 'least_squares':
        kwargs = {'anchors': anchors, 'basis': basis, 'distance_measurements': D_topright}
        max_it = 100
        coeffs_hat_list = []
        for i in range(max_it):
            x0 = 2 * (np.random.rand(dim * K) - 0.5) * 10  # uniform between -10, 10
            sol = opt.least_squares(objective_ls, x0=x0, verbose=0, kwargs=kwargs)

            if np.all(np.abs(sol.fun) < 1e-10):
                new_coeffs_hat = sol.x.reshape((dim, K))
                already_present = False
                for c in coeffs_hat_list:
                    if np.allclose(c, new_coeffs_hat):
                        already_present = True
                        break

                if not already_present:
                    coeffs_hat_list.append(new_coeffs_hat)

        if len(coeffs_hat_list) > 0:
            return coeffs_hat_list
        else:
            raise ValueError('No exact solution found in {} random initializations'.format(max_it))
    elif method == 'minimize':
        if guess is None:
            guess = np.zeros((dim * K, ))

        # set up nonlinear constraints.
        constraints = []
        for m, a_m in enumerate(anchors.T):
            assert len(a_m) == dim
            for n, f_n in enumerate(basis.T):
                dist = D_topright[n, m]
                if dist == 0:
                    continue

                assert len(f_n) == K
                single_args = [a_m, dist, f_n]
                constraint = {
                    'type': 'eq',
                    'fun': quadratic_constraint,
                    'jac': quadratic_constraint_jac,
                    'args': single_args
                }
                constraints.append(constraint)
        constraints_max = constraints[:len(guess)]
        assert len(constraints_max) == len(guess)
        sol = opt.minimize(objective_quadratic, x0=guess, constraints=constraints_max, options={'disp': verbose})

        if sol.success:
            if verbose:
                evaluate_constraints(constraints, sol.x)

            coeffs_hat = sol.x.reshape((dim, K))
            return coeffs_hat
        else:
            raise ValueError('Did not converge with message \n {}'.format(sol.message))
    elif method == 'grid':
        ranges = [slice(-10, 10, 1.0) for _ in range(len(guess))]
        sol = opt.brute(objective_ls, ranges)
        return sol
    elif method == 'roots':
        if guess is None:
            guess = np.zeros((dim * K, ))
        args = (anchors, basis, D_topright)
        sol = opt.root(objective_root, x0=guess, args=args, tol=1e-12)
        if not sol.success:
            raise ValueError('root did not converge with message {}:'.format(sol.message))

        coeffs_hat = sol.x.reshape((dim, K))
        return coeffs_hat
    else:
        raise NotImplementedError('Not implemented:{}'.format(method))
Example #49
0
def job(r):
    ID = int(r['ID'])
    if CATALOGUE == "TIC":
        file = f"{PATH}/{ID}_psd_tot_nw.txt"
    elif CATALOGUE == "EPIC":
        file = f"{PATH}/{ID}/{ID}_psd_tot.txt"
    df = pd.read_csv(file, delim_whitespace=True, names=['ν', 'P1', 'P2', 'P'])

    Δν = float(r['Δν'])
    νmax = float(r['ν_max'])
    if νmax < 0:
        return

    # plt.plot(df['ν'], df['P'])
    # plt.gca().set_xscale('log')
    # plt.gca().set_yscale('log')
    # plt.axvline(νmax, c='red')

    # def Harvey(ν, t, g):
    #     return np.power(10, t) / (1 + (ν/np.power(10, g))**2)

    def Harvey(ν, A, ν0):
        return 2 * np.sqrt(2) / np.pi * A**2 / (ν0 * (1 + (ν / ν0)**4))

    from scipy.optimize import least_squares

    def model(q):
        p = q[1:]
        return q[0] + np.sum([
            Harvey(df.ν.values, *p[2 * i:2 * i + 2])
            for i in range(len(p) // 2)
        ],
                             axis=0)

    def cost(p):
        return (df.P.values - model(p))  #/np.sqrt(df.P.values)

    p0 = [0, 10, 100, 10, 500]
    j = least_squares(cost, p0)

    # for i in range(len(p0[1:])//2):
    #     plt.plot(df['ν'].values, Harvey(df['ν'].values, *j['x'][2*i+1:2*i+3]))
    # plt.plot(df['ν'].values, model(j['x']))
    # # plt.plot(df['ν'].values, model(session.bg[:-3]))
    # plt.show()

    # In[ ]:

    from os.path import isfile
    # if isfile(f"./Seed/{CATALOGUE}{ID}.pkl"):
    #     seed = pd.read_pickle(f"Seed/{CATALOGUE}{ID}.pkl")
    #     # for l, r in seed.groupby('ell'):
    #     #     plt.plot(r.nu_med % Δν, r.nu_med, 'o')

    #     if CATALOGUE == "KIC" and ID in app_KIC:
    #         modes = app[app_KIC_all == ID]
    #     #     for l in [0, 1, 2, 3]:
    #     #         ml = modes[modes['degree'] == l]
    #     #         ν = ml.Freq.astype(float)
    #             # plt.plot(ν % Δν, ν, '^')
    #     # plt.show()

    # In[7]:

    ε = 1e-10

    w = νmax**0.88 * 0.66

    v = np.concatenate((np.abs(j['x']) * ε, [1, νmax, w]))
    e = np.concatenate((np.ones_like(j['x']) * 3 / ε, [100, 1.1, 2]))

    # In[8]:

    j['x']

    # In[10]:

    from diamonds import DiamondsSession

    system(f"mkdir -p 'DIAMONDS/temp/{CATALOGUE}'")
    temp = f"DIAMONDS/temp/{CATALOGUE}/{ID}_PS.txt"
    np.savetxt(temp, np.array([df['ν'].values, df['P'].values]).T)
    session = DiamondsSession(int(ID), temp, n_head=0, CATALOGUE=CATALOGUE)
    # session.bg = 1
    session.bg = v
    session.write_bg_hyperparams(e)
    session.run_bg()

    if isfile(f"Seed/{CATALOGUE}{ID}.pkl"):
        seed = pd.read_pickle(f"Seed/{CATALOGUE}{ID}.pkl")
        modes = seed.nu_med.values

    else:
        return

    if CATALOGUE == "KIC" and ID in app_KIC:
        modes = app[app_KIC_all == ID].Freq.astype('float')

    # WIDTH = 8 # μHz
    WIDTH = Δν / 10

    if not isfile(f'mle/{CATALOGUE}{ID}.npy'):
        ν, ps = np.loadtxt(f"DIAMONDS/data/{CATALOGUE}{ID}.txt").T

        from scipy.optimize import least_squares

        def Lorentzian(x, x0, A, Γ):
            # Same parameterisation as DIAMONDS
            return A**2 / ((x - x0)**2 + (Γ / 2)**2)

        def get_params(ν0):
            m = (ν > ν0 - WIDTH / 2) & (ν < ν0 + WIDTH / 2)
            j = least_squares(lambda p: (ps[m] - Lorentzian(ν[m], *p)),
                              [ν0, np.max(ps[m]), WIDTH])
            # plt.plot(ν[m], ps[m])
            # plt.plot(ν[m], Lorentzian(ν[m], *j['x']))
            # plt.show()
            return j

        params = np.array([get_params(ν0) for ν0 in modes])
        np.save(f'mle/{CATALOGUE}{ID}', params)

    else:
        params = np.load(f'mle/{CATALOGUE}{ID}.npy', allow_pickle=True)

    cov = np.array([(lambda J: np.linalg.inv(J.T @ J))(j['jac'])
                    for j in params])

    ν0 = np.array([j['x'][0] for j in params])
    e_ν0 = np.array([np.sqrt(C[0, 0]) for C in cov])

    amp = np.abs([j['x'][1] for j in params])
    e_amp = np.abs([np.sqrt(C[1, 1]) for C in cov])

    width = np.abs([j['x'][2] for j in params])
    e_width = np.abs([np.sqrt(C[2, 2]) for C in cov])

    MIN_SNR = 10
    try:
        m = (e_ν0 < 2.5 * width) & (amp > MIN_SNR * e_amp) & (
            np.abs(ν0 - modes) < WIDTH)
    except ValueError:
        tqdm.write(f"Array length mismatch for target {CATALOGUE} {ID}")
        return

    # if ID == 212586030:
    #     m_debug = (e_ν0 < width)
    #     print(e_ν0)
    #     print(width)
    #     plt.plot(ν0 % Δν, ν0, 'or')
    #     plt.plot(ν0[m_debug] % Δν, ν0[m_debug], 'o')
    #     plt.show()

    ν0 = ν0[m]
    e_ν0 = e_ν0[m]
    amp = amp[m]
    e_amp = e_amp[m]
    width = width[m]
    e_width = e_width[m]

    l = seed['ell'].values[m]

    # plt.plot(ν, ps)
    # plt.scatter(modes, amp)
    # plt.show(block=True)

    # m = amp > 100

    # modes = modes[m]
    # amp = amp[m]

    NMODES = 20
    n_runs = (len(ν0) - 1) // NMODES + 1

    OUTPUT = np.ones((len(ν0), 10)) * np.nan
    OUTPUT[:, 0] = l
    for i in range(n_runs):
        m = np.arange(NMODES) + i * NMODES
        m = m[m < len(ν0)]
        session.pb = ν0[m]
        try:
            session.write_pb_hyperparams(np.maximum(Δν / 20, e_ν0[m] * 5),
                                         amp[m],
                                         width[m],
                                         e_amp=np.maximum(10, e_amp[m] * 20),
                                         e_width=np.maximum(
                                             Δν / 20, e_width[m] * 5),
                                         run=i)
            session.run_pb(run=i)
            result = session._pb_result

            OUTPUT[m, 1] = session._pb_result[::3, 1]
            OUTPUT[m, 2] = session._pb_result[::3, 5]
            OUTPUT[m, 3] = session._pb_result[::3, 4]

            OUTPUT[m, 4] = session._pb_result[1::3, 2]
            OUTPUT[m, 5] = session._pb_result[1::3, 5]
            OUTPUT[m, 6] = session._pb_result[1::3, 4]

            OUTPUT[m, 7] = session._pb_result[2::3, 2]
            OUTPUT[m, 8] = session._pb_result[2::3, 5]
            OUTPUT[m, 9] = session._pb_result[2::3, 4]

        except Exception as e:
            print(amp / e_amp)
            tqdm.write(f"Run {i} failed for {file}: {e}")
    np.savetxt(f"Output/{CATALOGUE}_{ID}.txt", OUTPUT)
Example #50
0
    def fit(self, method="L-BFGS-B", target="nll", verbose=True, **kws):
        """
        Obtain the maximum log-likelihood, or log-posterior, estimate (mode)
        of the objective. Maximising the log-likelihood is equivalent to
        minimising chi2 in a least squares fit.

        Parameters
        ----------
        method : str
            which method to use for the optimisation. One of:

            - `'least_squares'`: :func:`scipy.optimize.least_squares`.
            - `'L-BFGS-B'`: L-BFGS-B.
            - `'differential_evolution'`:
              :func:`scipy.optimize.differential_evolution`
            - `'dual_annealing'`:
              :func:`scipy.optimize.dual_annealing` (SciPy >= 1.2.0)
            - `'shgo'`: :func:`scipy.optimize.shgo` (SciPy >= 1.2.0)

            You can also choose many of the minimizers from
            :func:`scipy.optimize.minimize`.

        target : {'nll', 'nlpost'}, optional
            Minimize the negative log-likelihood (`'nll'`) or the negative
            log-posterior (`'nlpost'`). This is equivalent to maximising the
            likelihood or posterior probabilities respectively.
            Maximising the likelihood is equivalent to minimising chi^2 in a
            least-squares fit.
            This option only applies to the `differential_evolution`, `shgo`,
            `dual_annealing` or `L-BFGS-B` methods.
            These optimisers require lower and upper (box) bounds for each
            parameter. If the `Bounds` on a parameter are not an `Interval`,
            but a `PDF` specifying a statistical distribution, then the lower
            and upper bounds are approximated as
            ``PDF.rv.ppf([0.005, 0.995])``, covering 99 % of the statistical
            distribution.
        verbose : bool, optional
            Gives fitting progress. To see a progress bar tqdm has to be
            installed.
        kws : dict
            Additional arguments are passed to the underlying minimization
            method.

        Returns
        -------
        result, covar : :class:`scipy.optimize.OptimizeResult`, np.ndarray
            `result.x` contains the best fit parameters
            `result.covar` is the covariance matrix for the fit.
            `result.stderr` is the uncertainties on each of the fit parameters.

        Notes
        -----
        If the `objective` supplies a `residuals` method then `least_squares`
        can be used. Otherwise the `nll` method of the `objective` is
        minimised. Use this method just before a sampling run.
        If `self.objective.parameters` is a `Parameters` instance, then each
        of the varying parameters has its value updated by the fit, and each
        `Parameter` has a `stderr` attribute which represents the uncertainty
        on the fit parameter.

        The use of `dual annealing` and `shgo` requires that `scipy >= 1.2.0`
        be installed.

        """
        _varying_parameters = self.objective.varying_parameters()
        init_pars = np.array(_varying_parameters)

        _min_kws = {}
        _min_kws.update(kws)
        _bounds = bounds_list(self.objective.varying_parameters())
        _min_kws["bounds"] = _bounds

        # setup callback default
        _min_kws.setdefault("callback", None)

        cost = self.objective.nll
        if target == "nlpost":
            cost = self.objective.nlpost

        # a decorator for the progress bar updater
        def _callback_wrapper(callback_func, pbar):
            def callback(*args, **kwds):
                pbar.update(1)
                if callback_func is None:
                    return None
                else:
                    return callback_func(*args, **kwds)

            return callback

        # least_squares Trust Region Reflective by default
        if method == "least_squares":
            b = np.array(_bounds)
            _min_kws["bounds"] = (b[..., 0], b[..., 1])

            # least_squares doesn't have a callback
            _min_kws.pop("callback", None)

            res = least_squares(
                self.objective.residuals, init_pars, **_min_kws
            )
        # differential_evolution, dual_annealing, shgo require lower and upper
        # bounds
        elif method in ["differential_evolution", "dual_annealing", "shgo"]:
            mini = getattr(sciopt, method)

            if method == "shgo":
                if "n" not in _min_kws:
                    _min_kws["n"] = 100
                if "iters" not in kws:
                    _min_kws["iters"] = 5

            with get_progress_bar(verbose, None) as pbar:
                _min_kws["callback"] = _callback_wrapper(
                    _min_kws["callback"], pbar
                )

                res = mini(cost, **_min_kws)
        else:
            # otherwise stick it to minimizer. Default being L-BFGS-B
            _min_kws["method"] = method
            _min_kws["bounds"] = _bounds

            with get_progress_bar(verbose, None) as pbar:
                _min_kws["callback"] = _callback_wrapper(
                    _min_kws["callback"], pbar
                )

                res = minimize(cost, init_pars, **_min_kws)

        # OptimizeResult.success may not be present (dual annealing)
        if hasattr(res, "success") and res.success:
            self.objective.setp(res.x)

            # Covariance matrix estimation
            covar = self.objective.covar()
            errors = np.sqrt(np.diag(covar))
            res["covar"] = covar
            res["stderr"] = errors

            # check if the parameters are all Parameter instances.
            flat_params = list(f_unique(flatten(self.objective.parameters)))
            if np.all([is_parameter(param) for param in flat_params]):
                # zero out all the old parameter stderrs
                for param in flat_params:
                    param.stderr = None
                    param.chain = None

                for i, param in enumerate(_varying_parameters):
                    param.stderr = errors[i]

            # need to touch up the output to check we leave
            # parameters as we found them
            self.objective.setp(res.x)

        return res
Example #51
0
def run_min(args, initial_guess):  # (np.random.random(6)).tolist()
    print
    res = least_squares(cost_func, initial_guess, args=args, method="lm", ftol=1e-15, max_nfev=100000)  # 1e-10
    return res
def run_frequentist_analysis(input_h5_file,
                             model,
                             N_s,
                             g_s,
                             L_s,
                             Bbar_s_in,
                             GL_min,
                             GL_max,
                             param_names,
                             x0,
                             method="lm",
                             no_samples=500,
                             run_bootstrap=True,
                             print_info=True):
    """
        Central function to run the frequentist analysis. This function is
        used repeatedly in publication_results.py

        INPUTS:
        -------
        input_h5_file: string, file name of input Binder crossing mass values
        model: Fit anzatz function
        N_s: List of ints of N (SU(N) rank) values to be studied. N values
            should be ints
        g_s: List of ag (coupling constant in lattice units) values to be
            studied, ag values should be floats
        L_s: List of L / a (Lattice Size) values to be studied, L values should
            be ints
        Bbar_s_in: List of Bbar crossing values to be studied, Bbar values
            should be floats
        GL_min: Float, minimum value of the product (ag) * (L / a) to be
            included in the fit
        GL_max: Float, maximum value of the product (ag) * (L / a) to be
            included in the fit
        param_names: list of strings, with each entry being the name of a
            parameter of the fit anzatz
        x0: list of floats, starting parameter guesses for the minimizer
        method: string, name of the minimization routine to be used
        no_samples: int, number of bootstrap samples to use
        run_bootstrap: bool, if False the function won't run a bootstrap
        print_info: bool, if True extra infomation about the fit results is
            returned to std::out

        OUTPUTS:
        --------
        Depends on whether a bootstrap is run. This occurs only if
        run_bootstrap is True and the p-value of the central fit is greater
        than 0.05.

        If a bootstrap is run:
        > p: float, p-value of the central fit
        > param_estimates: list of floats, parameter values at MAP estimate
        > dof: int, number of degrees of freedom in the fit
        > sigmas: list of floats, standard deviation of the parameter estimates
            under the bootstrap

        If a bootstrap isn't run:
        > p: float, p-value of the central fit
        > param_estimates: list of floats, parameter values at MAP estimate
        > dof: int, number of degrees of freedom in the fit
    """

    samples, g_s, L_s, Bbar_s, m_s = load_h5_data(input_h5_file, N_s, g_s, L_s,
                                                  Bbar_s_in, GL_min, GL_max)
    N = N_s[0]

    cov_matrix, different_ensemble = cov_matrix_calc(g_s, L_s, m_s, samples)
    cov_1_2 = numpy.linalg.cholesky(cov_matrix)
    cov_inv = numpy.linalg.inv(cov_1_2)

    res_function = make_res_function(N, m_s, g_s, L_s, Bbar_s)

    # Using scipy.optimize.least_squares
    if method == "least_squares":
        res = least_squares(res_function, x0, args=(cov_inv, model))

    if method == "lm":
        res = least_squares(res_function,
                            x0,
                            args=(cov_inv, model),
                            method="lm")

    # Using scipy.optimize.minimize
    if method in ["dogbox", "Nelder-Mead", "Powell", "CG", "BFGS", "COBYLA"]:
        res = minimize(lambda x, y, z: numpy.sum(res_function(x, y, z)**2),
                       x0,
                       args=(cov_inv, model),
                       method=method)

    chisq = chisq_calc(res.x, cov_inv, model, res_function)
    n_params = len(res.x)
    dof = g_s.shape[0] - n_params
    p = chisq_pvalue(dof, chisq)
    param_central = res.x

    if print_info:
        print("##############################################################")
        print(f"Config: N = {N}, Bbar_s = [{Bbar_s_in[0]}, {Bbar_s_in[1]}],"
              f" gL_min = {GL_min}, gL_max = {GL_max}")
        print(f"chisq = {chisq}")
        print(f"chisq/dof = {chisq / dof}")
        print(f"pvalue = {p}")
        print(f"dof = {dof}")

    # If the pvalue is acceptable, run a bootstrap to get a statistical error
    if run_bootstrap:
        param_estimates = numpy.zeros((no_samples, n_params))

        for i in tqdm(range(no_samples)):
            m_s = samples[:, i]

            res_function = make_res_function(N_s[0], m_s, g_s, L_s, Bbar_s)

            if method == "least_squares":
                res = least_squares(res_function, x0, args=(cov_inv, model))

            if method == "lm":
                res = least_squares(res_function,
                                    x0,
                                    args=(cov_inv, model),
                                    method=method)

            # Using scipy.optimize.minimize
            if method in [
                    "dogbox", "Nelder-Mead", "Powell", "CG", "BFGS", "COBYLA"
            ]:

                def dummy_func(x, y, z):
                    return numpy.sum(res_function(x, y, z)**2)

                res = minimize(dummy_func,
                               x0,
                               args=(cov_inv, model),
                               method=method)

            param_estimates[i] = numpy.array(res.x)

        return p, param_estimates, dof

    return p, res.x, dof
Example #53
0
 def __runOpti(self, scalarResidual=False, optiParam={}, noLBFGS=True):
     from runAbqTools import callbackF
     import numpy as np
     if scalarResidual:
         from opti4AbqResiduals import residualsScalar
         opts = {
             'maxiter': optiParam['maxIter'],
             'disp': self.verbose,
             'ftol': optiParam['ftol'],
             'eps': optiParam['eps']
         }
         from scipy.optimize import minimize
         if self.bounds is None:
             opts['norm'] = 2
             opts['gtol'] = optiParam['gol']
             res = minimize(residualsScalar,
                            self.p0,
                            method='CG',
                            args=(self.modelsPath, self.dataPath,
                                  self.absDiff, self.verbose),
                            jac=False,
                            tol=optiParam['tol'],
                            options=opts,
                            callback=callbackF)
         else:
             opts['ftol'] = optiParam['ftol']
             res = minimize(residualsScalar,
                            self.p0,
                            method='L-BFGS-B',
                            args=(self.modelsPath, self.dataPath,
                                  self.absDiff, self.verbose),
                            jac=False,
                            bounds=self.bounds,
                            tol=optiParam['tol'],
                            options=opts,
                            callback=callbackF)
         d = {}
         d['funcalls'] = res.nfev
         d['task'] = res.message
         d['grad'] = res.jac
         if self.verbose: print res.message
         return res.x, res.fun, d
     else:
         from opti4AbqResiduals import residuals
         if self.bounds is None:
             from scipy.optimize import leastsq
             nbParam = len(self.p0)
             pLSQ, covP, info, msg, ier = leastsq(
                 residuals,
                 self.p0,
                 args=(self.modelsPath, self.dataPath, False, self.absDiff,
                       self.verbose),
                 Dfun=None,
                 full_output=True,
                 maxfev=optiParam['maxIter'] * nbParam ^ nbParam,
                 epsfcn=optiParam['eps'],
                 ftol=optiParam['ftol'])
             fVal = info['fvec']
             d = {}
             d['funcalls'] = info['nfev']
             d['grad'] = info['fjac']
             d['task'] = msg
             if self.verbose: print msg
         else:
             if noLBFGS:
                 nbParam = len(self.p0)
                 if nbParam > 1:
                     newBounds = [[], []]
                     for pair in self.bounds:
                         newBounds[0].append(pair[0])
                         newBounds[1].append(pair[1])
                 else:
                     newBounds = self.bounds
                 from scipy.optimize import least_squares  #trf method - Trust Region Reflective algorithm, particularly suitable for large sparse problems with bounds. Generally robust method.
                 res = least_squares(
                     residuals,
                     self.p0,
                     args=(self.modelsPath, self.dataPath, True,
                           self.absDiff, self.verbose),
                     bounds=newBounds,
                     ftol=optiParam['ftol'],
                     xtol=optiParam['tol'],
                     gtol=optiParam['gtol'],
                     diff_step=optiParam['eps'],
                     max_nfev=optiParam['maxIter'] * nbParam ^ nbParam,
                     verbose=2)
                 pLSQ = res.x
                 fVal = res.fun
                 if self.verbose: print res.message
                 d = {}
                 import counter
                 d['funcalls'] = res.nfev
                 d['task'] = res.message
                 d['grad'] = res.jac
                 if self.verbose: print res.message
             else:
                 from scipy.optimize import fmin_l_bfgs_b
                 factorTol = optiParam['ftol'] / np.finfo(float).eps
                 pLSQ, fVal, d = fmin_l_bfgs_b(
                     residuals,
                     self.p0,
                     args=(self.modelsPath, self.dataPath, True,
                           self.absDiff, self.verbose),
                     approx_grad=True,
                     bounds=self.bounds,
                     factr=factorTol,
                     epsilon=optiParam['eps'],
                     disp=True,
                     maxiter=optiParam['maxIter'],
                     callback=callbackF)
                 if d['warnflag'] == 0: d['task'] = "succesfull convergence"
                 elif d['warnflag'] == 1:
                     d['task'] = "too many function evaluations or too many iterations"
                 if self.verbose: print d
         return pLSQ, fVal, d
Example #54
0
df.dropna(inplace=True)

ReacPs = df.loc[:, "t (min)":"T (K)"].to_numpy(dtype=np.float64)
Yields = df.loc[:, "EXP_BC_daf":"EXP_GAS_DAF"].to_numpy(dtype=np.float64)
Proxanal = df.loc[:, "Carb (daf wt. %)":"Prot (daf wt. %)"].to_numpy(
    dtype=np.float64)

K0 = [5.37, 4.15, 4.52, 5.29, 5.25, 5.32, 3.41, 3.52, 3.36, 4.63]

K0 = np.array(K0, dtype=np.float64)

steps = 10

result = optimize.least_squares(__ODEErrorFunc__,
                                K0,
                                args=(ReacPs, Yields, Proxanal, steps),
                                bounds=(0, 10),
                                ftol=1e-6,
                                verbose=2)

delta = dt.datetime.now() - StartTime

print(
    f"\nCalculation complete!\n\nTotal time taken: {delta}\n\n    Number of function evalutions to reach convergence: {result.nfev}\n    Time taken per function evaluation: {delta/result.nfev}"
)

df2 = pd.DataFrame()
for i in range(len(SSE_log)):
    df2.loc[i, "Step"] = i
    df2.loc[i, "SSE"] = SSE_log[i]
    df2.loc[i, "RMSE"] = RMSE_log[i]
# E2: f(x1)-g(x2)/(x1-x2) - fprime(x1)
def equations(p):
    x1, x2 = p
    E1 = chemPot(x1, popt_cu[1], popt_cu[2]) - chemPot(x2, popt_sn[1],
                                                       popt_sn[2])
    E2 = ((freeEnergy(x1, popt_cu[0], popt_cu[1], popt_cu[2]) -
           freeEnergy(x2, popt_sn[0], popt_sn[1], popt_sn[2])) /
          (x1 - x2)) - chemPot(x1, popt_cu[1], popt_cu[2])
    return (E1, E2)


####LEAST_SQUARE_OPTIMIZATION_METHODS (LSOM)
from scipy.optimize import least_squares
lb = (0.70, 0.91)  # lower bounds on x1, x2
ub = (0.75, 0.95)  # upper bounds
result = least_squares(equations, [0.70, 0.91], bounds=(lb, ub))
result_tight_tols = least_squares(equations, [0.70, 0.91],
                                  ftol=1e-12,
                                  xtol=1e-12,
                                  gtol=1e-12,
                                  bounds=(lb, ub))

#LSOM METHOD-I (DEFAULT_TOLERANCES)
print """
####  ftol=1e-08, xtol=1e-08, gtol=1e-08  #####
"""
print 'result = ', result
print 'result.x = ', result.x
print """
####something
"""
Example #56
0
def linresponse(self, n0, vs0=None):
    """
    wuyang like inversion of the density with response
    """

    n0 = n0[:, None]
    pol   = n0.shape[1] if len(n0.shape) > 1 else 1
    self.pol = pol
    self.n0  = n0
    Nelem = n0.shape[0]
    w = self.grid.w if pol == 1 else np.hstack((self.grid.w, self.grid.w))
    
    if self.solver[0,0].veff is None:
        if vs0 is None:
            #If no initial guess is provided
            #Use the von weizacker inversion
            vs0 = (0.5 * self.grid.elap @ (n0**0.5)) / (n0**0.5 * w) 
            vs0 -= vs[-1]
    else:
        vs0 = self.solver[0,0].veff[:,None] if len(self.solver[0,0].veff.shape) == 1 else self.solver[0,0].veff

    self.vs = np.zeros_like(vs0)
    self.us = np.zeros((1, pol))
    flag   = np.empty_like(self.solver, dtype=object)
    output = np.empty_like(self.solver, dtype=object)

    if self.optInversion["ENS_SPIN_SYM"] is True:
        print("Warning | Multiple solvers may have not been set properly")
        #Invert density n0 to find vs
        i = 0
        #Preallocation
        #B is the inverse of n0 in main diagonal
        B = spdiags( 1./n0[:,i],0, Nele, Nelem)
        self.B = B

        self.solver[0,i].hamiltonian()
        self.solver[0,i].e0 = -20

        res_lsq = least_squares(fun    = self.Ws,
                                x0     = vs0[:, i], 
                                jac    = self.Jacobian, 
                                method = "trf", 
                                args   = (i,))

        #Get solution from least squares object
        self.vs[:,i] = res_lsq.x
        self.us[i] = self.solver[0,i].get_homo()
        print("us from linsresponse",self.us)
        flag[0,i] = res_lsq.status
        output[0,i] = res_lsq

        #Copy information
        self.vs[:, 1] = self.v[:, 0]
        self.us[1] = self.us[0]
        self.solver[0,1].setveff(self.vs[:,0])
        flag[0,1] = res_lsq.status
        output[0,1] = res_lsq

    else:
        for i in range(pol):
            #Invert density n0 to find vs
            #Preallocation
            B = spdiags(1./n0[:, i], 0, Nelem, Nelem)
            self.B = B
            self.solver[0,i].hamiltonian()
            self.solver[0,i].e0 = -20

            res_lsq = least_squares(fun    = self.Ws,
                                    x0     = vs0[:, i], 
                                    jac    = self.Jacobian, 
                                    method = "trf", 
                                    args   = (i,))

            self.vs[:,i] = res_lsq.x
            self.us[i]   = self.solver[0,i].get_homo()
            flag[0,i] = res_lsq.status
            output[0,i] = res_lsq

    return flag, output
Example #57
0
def _fast_ASD_weighted_group(x, y, w, nk_grp, min_length, nx_circ=np.nan):
    # Infers independent groups of smooth coefficients using diagonalized ASD prior
    #
    # Empirical Bayes estimate of regression coefficients under automatic
    # smoothness determination (ASD) prior (also known as Gaussian or
    # squared-exponential kernel), with maximum marginal likelihood estimate of
    # prior covariance parameters.
    #
    # Implementation: uses Fourier representation of stimuli and ASD covariance
    # matrix, so prior is diagonal.
    #
    # INPUT:
    # x - stimulus
    # y - symbol
    # w - gamma
    # nk_grp [1 * 1] - number of elements in each group (assumed to include all coefficients)
    # min_length [1 * 1] or [n_grp * 1], minimum length scale (for all or for each group)
    # nx_circ [n_grp * 1] - circular boundary for each dimension (OPTIONAL)
    #
    # OUTPUT:
    # kest [nk * 1] - ASD estimate of regression weights
    # ASD_stats - dictionary with fitted hyperparameters, Hessian, and posterior covariance
    #
    # Note: does not include a DC term, so should be applied when response and regressors have been standardized to have mean zero

    ## ========== Parse inputs and determine what hyperparams to initialize ==========

    COND_THRESH = 1e8  # threshold for small eigenvalues

    # Compute sufficient statistics
    w = np.diag(w)

    dd = {'xx': np.matmul(np.matmul(x.T, w), x)}  # stimulus auto-covariance
    dd['xy'] = np.matmul(np.matmul(x.T, w),
                         y)  # stimulus-response cross-covariance
    # dd['yy'] = np.matmul(np.matmul(y.T, w), y) # marginal response variance
    dd['yy'] = np.matmul(y.T, y)  # marginal response variance
    dd['n_samps'], nx = x.shape  # total number of samples and number of coeffs
    n_grp = len(nk_grp)  # number of groups

    # Check to make sure same number of elements in 'xx' as group indices
    if np.sum(nk_grp, axis=0) != nx:
        print(
            'Stimulus size dd[xx] does not match number of indices in group_id'
        )

    # Replicate min_length to vector if needed
    if len(min_length) == 1:
        min_length = np.tile(min_length, (n_grp, 1))

    # Set circular boundary for each group of coeffs
    if nx_circ == np.nan:
        nx_circ = np.zeros((n_grp, 1))  # initialize nx_circ
        for i in range(0, n_grp):
            nx_circ_MAX = 1.25 * nk_grp[
                i]  # maximum based on number of coeffs in group
            nx_circ[i] = np.ceil(
                max(nk_grp[i] + 2 * min_length[i],
                    nx_circ_MAX))  # set based on maximal smoothness

    # ----- Initialize range for hyperparameters -----

    # Length scale range
    max_length = np.maximum(min_length * 2, nk_grp / 4).T
    length_range = [min(min_length), max(max_length)]

    # Rho range
    rho_max = 2 * (dd['yy'] / dd['n_samps']) / np.mean(
        np.diag(dd['xx']), axis=0)  # ratio of variance of output to intput
    rho_min = min(1, 0.1 * rho_max)  # minimum to explore
    rho_range = [rho_min, rho_max]

    # Noise variance sigma_n^2
    nsevar_max = dd['yy'] / dd['n_samps']  # marginal variance of y
    nsevar_min = min(1, 0.01 * nsevar_max)  # var ridge regression residuals
    nsevar_range = [nsevar_min, nsevar_max]

    # Change of variables to tilde rho (which separates rho and length scale)
    trho_range = np.sqrt(2 * np.pi) * rho_range * [
        min(length_range), max(length_range)
    ]

    ## ========== Diagonalize by converting to FFT basis ==========

    opts = {'nx_circ': nx_circ}
    opts['cond_thresh'] = COND_THRESH
    opt1 = opts

    # Generate Fourier basis for each group of coeffs
    B_mats = []  # Fourier basis for each group
    w_vecs_per_grp = []  # frequency vector for each group

    B_mats_shape_0 = np.zeros(n_grp)
    B_mats_shape_1 = np.zeros(n_grp)

    n_w_vec = np.zeros(n_grp)

    for i in range(0, n_grp):
        opt1['nx_circ'] = opts['nx_circ'][i]  # pass in ju
        _, B_mats_temp, w_vecs_per_grp_temp = _mkcov_ASD_factored(
            [min_length[i], 1], nk_grp(i), opt1)

        B_mats.append(B_mats_temp)
        w_vecs_per_grp.append(w_vecs_per_grp_temp)

        B_mats_shape_0[i] = B_mats_temp.shape[0]
        B_mats_shape_1[i] = B_mats_temp.shape[1]

        n_w_vec[i] = len(w_vecs_per_grp)

    Bfft = np.zeros((np.sum(B_mats_shape_0,
                            axis=0), np.sum(B_mats_shape_1, axis=0)))
    w_vec = np.array([])

    for i in range(0, n_grp):
        Bfft[np.sum(B_mats_shape_0[0:i], axis=0
                    ):np.sum(B_mats_shape_0[0:i + 1], axis=0),
             np.sum(B_mats_shape_1[0:i], axis=0):np.
             sum(B_mats_shape_1[0:i + 1], axis=0)] = B_mats[
                 i]  # Fourier basis matrices assembled into block diag
        w_vec = np.concatenate(
            (w_vec, w_vecs_per_grp[i]),
            axis=0)  # group Fourier frequencies assembled into one vec

    dd['xx'] = np.matmul(
        np.matmul(Bfft.T, dd['xx']),
        Bfft)  # project xx into Fourier basis for each group of coeffs
    dd.xy = np.matmul(Bfft.T,
                      dd['xy'])  # project xy into Fourier basis for each group

    # Make matrix for mapping hyperparams to Fourier coefficients for each group
    B_grp = np.zeros((np.sum(n_w_vec, axis=0), n_grp))
    for i in range(0, n_grp):
        B_grp[np.sum(n_w_vec[0:i], axis=0):np.sum(n_w_vec[0:i + 1], axis=0),
              i] = np.ones((n_w_vec[i], 1))

    # Compute vector of normalized squared Fourier frequencies
    ww_nrm = np.power(2 * np.pi / np.matmul(B_grp, nx_circ), 2) * np.power(
        w_vec, 2)  # compute normalized DFT frequencies squared

    ## ========== Grid search for initial hyperparameters ==========

    # Set loss function for grid search
    ii_grp = [[np.ones((n_grp, 1))], [2 * np.ones((n_grp, 1))],
              [3]]  # indices for setting group params
    l_fun_0 = lambda prs: _neglogev_ASD_spectral_group(prs(
        ii_grp), dd, B_grp, ww_nrm, COND_THRESH)  # loss function

    # Set up grid
    n_grid = 4  # search a 4 * 4 * 4 grid for initial value of hyperparameters
    rng = [length_range, trho_range, nsevar_range]

    # Do grid search and find minimum
    nll_vals, grid_pts = _grid_eval(n_grid, rng, l_fun_0)
    h_prs_00, _, _ = argmin(nll_vals, grid_pts[:, 0], grid_pts[:, 1],
                            grid_pts[:, 2])  # find minimum
    h_prs_0 = h_prs_00[ii_grp]  # initialize hyperparameters for each group

    ## ========== Optimize evidence using fmincon ==========

    l_fun = lambda prs: _neglogev_ASD_spectral_group(
        prs, dd, B_grp, ww_nrm, COND_THRESH)  # loss function
    LB = [[min_length], [1e-2 * np.ones((n_grp + 1, 1))]]  # lower bounds
    UB = np.inf * np.ones((n_grp * 2 + 1, 1))  # upper bounds

    h_prs_hat = least_squares(l_fun,
                              h_prs_0,
                              jac='2-point',
                              bounds=(LB, UB),
                              method='trf',
                              ftol=1e-08,
                              xtol=1e-08,
                              gtol=1e-08,
                              x_scale=1.0,
                              loss='linear',
                              f_scale=1.0,
                              diff_step=None,
                              tr_solver=None,
                              tr_options={},
                              jac_sparsity=None,
                              max_nfev=None,
                              verbose=0)  # run optimization

    ## ========== Compute posterior mean and covariance at maximizer of hyperparams ==========

    neglogEv, _, H, mu_FFT, L_post_FFT, ii = l_fun(h_prs_hat)
    kest = np.matmul(
        Bfft[:,
             ii], mu_FFT)  # inverse Fourier transform of Fourier-domain mean

    # Report rank of prior at termination
    print('fast_ASD_weighted_group: terminated with rank of C_prior = ' +
          str(np.sum(ii, axis=0)))

    # Check if length scale is at minimum allowed range
    if np.any(h_prs_hat[0:n_grp] <= min_length + 0.001):
        print([
            'Solution is at minimum length scale >>> Consider re-running with shorter min_length'
        ])

    # Assemble summary statistics for output
    # Transform trho back to standard rho
    l_hat = h_prs_hat[0, n_grp]  # transformed rho param
    trho_hat = h_prs_hat[n_grp:2 * n_grp]  # length scale
    a = np.sqrt(2 * np.pi)
    rho_hat = trho_hat / (a * l_hat)  # original rho param
    A_jcb = [[np.identity(n_grp),
              np.zeros((n_grp, n_grp + 1))],
             [a * [np.diag(rho_hat), np.diag(l_hat)],
              np.zeros((n_grp, 1))], [np.zeros((1, n_grp * 2)),
                                      1]]  # Jacobian for parameters

    ASD_stats = {'rho': rho_hat}  # rho hyperparameter
    ASD_stats['len'] = l_hat  # length scale hyperparameter
    ASD_stats['nsevar'] = h_prs_hat[-1]  # noise variance
    # ASD_stats['H'] = H # Hessian of hyperparameters
    ASD_stats['H'] = np.matmul(np.matmul(A_jcb.T, H),
                               A_jcb)  # Hessian of hyperparameters
    ASD_stats['ci'] = np.sqrt(np.diag(np.linalg.inv(
        ASD_stats['H'])))  # 1SD posterior CI for hyperparameters

    ASD_stats['neglogEv'] = neglogEv  # negative log evidence at solution

    # Just compute diagonal of posterior covariance
    ASD_stats['L_post_diag'] = np.sum(Bfft[:, ii].T *
                                      np.matmul(L_post_FFT, Bfft[:, ii].T),
                                      axis=0).T

    # If full posterior cov for filter is desired
    ASD_stats['L_post'] = np.matmul(np.matmul(Bfft[:, ii], L_post_FFT),
                                    Bfft[:, ii].T)

    return kest, ASD_stats
Example #58
0
    def fit(self, method='L-BFGS-B', **kws):
        """
        Obtain the maximum log-likelihood estimate (mode) of the objective. For
        a least-squares objective this would correspond to lowest chi2.

        Parameters
        ----------
        method : str
            which method to use for the optimisation. One of:

            - `'least_squares'`: `scipy.optimize.least_squares`.
            - `'L-BFGS-B'`: L-BFGS-B
            - `'differential_evolution'`: differential evolution

            You can also choose many of the minimizers from
            ``scipy.optimize.minimize``.
        kws : dict
            Additional arguments are passed to the underlying minimization
            method.

        Returns
        -------
        result, covar : OptimizeResult, np.ndarray
            `result.x` contains the best fit parameters
            `result.covar` is the covariance matrix for the fit.
            `result.stderr` is the uncertainties on each of the fit parameters.

        Notes
        -----
        If the `objective` supplies a `residuals` method then `least_squares`
        can be used. Otherwise the `nll` method of the `objective` is
        minimised. Use this method just before a sampling run.
        If `self.objective.parameters` is a `Parameters` instance, then each
        of the varying parameters has its value updated by the fit, and each
        `Parameter` has a `stderr` attribute which represents the uncertainty
        on the fit parameter.

        """
        _varying_parameters = self.objective.varying_parameters()
        init_pars = np.array(_varying_parameters)

        _min_kws = {}
        _min_kws.update(kws)
        _bounds = bounds_list(self.objective.varying_parameters())
        _min_kws['bounds'] = _bounds

        # least_squares Trust Region Reflective by default
        if method == 'least_squares':
            b = np.array(_bounds)
            _min_kws['bounds'] = (b[..., 0], b[..., 1])
            res = least_squares(self.objective.residuals, init_pars,
                                **_min_kws)
        # differential_evolution requires lower and upper bounds
        elif method == 'differential_evolution':
            res = differential_evolution(self.objective.nll, **_min_kws)
        else:
            # otherwise stick it to minimizer. Default being L-BFGS-B
            _min_kws['method'] = method
            _min_kws['bounds'] = _bounds
            res = minimize(self.objective.nll, init_pars, **_min_kws)

        if res.success:
            self.objective.setp(res.x)

            # Covariance matrix estimation
            covar = self.objective.covar()
            errors = np.sqrt(np.diag(covar))
            res['covar'] = covar
            res['stderr'] = errors

            # check if the parameters are all Parameter instances.
            flat_params = list(f_unique(flatten(self.objective.parameters)))
            if np.all([is_parameter(param) for param in flat_params]):
                # zero out all the old parameter stderrs
                for param in flat_params:
                    param.stderr = None
                    param.chain = None

                for i, param in enumerate(_varying_parameters):
                    param.stderr = errors[i]

            # need to touch up the output to check we leave
            # parameters as we found them
            self.objective.setp(res.x)

        return res
Example #59
0
def solve_angles(angles_init, one, two, thr, stator, rotor, gamma, cp, R, GR,
                 phi, DeltaH_prod, bounds_angles):
    def f_angles(angles, one, two, thr, stator, rotor, gamma, cp, R, GR, phi,
                 DeltaH_prod):

        alpha = angles[0]
        beta = angles[1]

        # assume an two.alpha and project velocities

        two.vel.Vx, two.vel.Vu = f.velocity_projections(two.vel.V, alpha)

        # assume a loading factor and calculate peripheral speed
        two.vel.U = np.sqrt(DeltaH_prod / phi)

        # velocity triangle (pithagoras)
        two.vel.Wu = two.vel.Vu - two.vel.U
        two.vel.Wx = two.vel.Vx
        two.vel.W = f.mag(two.vel.Wu, two.vel.Wx)

        # relative inlet angle and relative Mach number
        two.vel.Mr = two.vel.W / two.vel.a

        # relative total quantities at rotor inlet
        two.T0r, two.P0r = f.relative_temperature_pressure(
            two.T, two.P, two.vel.Mr, gamma)

        ############ ROTOR #############
        # in the rotor, the relative total temperature is constant (se conservan rotalpías)
        thr.T0r = two.T0r

        # ideal outlet temperature, real outlet temperature
        # assuming the expanse to thr.P, calculated with the assumed M3
        thr.Ts = f.P2T(thr.T0r, thr.P, two.P0r,
                       gamma)  # = thr.T0r*(thr.P/two.P0r)**((gamma-1)/gamma)
        thr.T = f.stage_efficiency(
            rotor.eta, thr.T0r,
            thr.Ts)  # = thr.T0r - rotor.eta*(thr.T0r - thr.Ts)

        # velocities
        thr.vel.W = f.isen_velocity(cp, thr.T0r,
                                    thr.T)  # = np.sqrt(2*cp*(thr.T0r - thr.T))

        # speed of sound and relative mach number
        thr.vel.a, thr.vel.Mr = f.sonic(gamma, R, thr.T,
                                        thr.vel.W)  # = np.sqrt(gamma*R*thr.T)

        # velocity projections, axial and tangential
        thr.vel.Wx, thr.vel.Wu = f.velocity_projections(thr.vel.W, beta)

        # constant radius, constant peripheral speed
        thr.vel.U = two.vel.U

        # tangential outlet speed
        thr.vel.Vu = thr.vel.Wu + thr.vel.U
        # V3u_euler = -DeltaH_prod/two.vel.U + two.vel.Vu

        # assume axial speed constant
        thr.vel.Vx = thr.vel.Wx
        thr.vel.V = f.mag(
            thr.vel.Vx, thr.vel.Vu)  # = np.sqrt(thr.vel.Vx**2 + thr.vel.Vu**2)

        # find work, check for angle iteration
        DeltaH_calculated = two.vel.U * (two.vel.Vu - thr.vel.Vu)

        diff = DeltaH_calculated - DeltaH_prod

        return np.array([diff[0], 0])

    # angles  = fsolve(f_angles,angles_init,args=(one, two, thr, stator, rotor, gamma, cp, R, GR, phi, DeltaH_prod))
    results = least_squares(f_angles,
                            angles_init,
                            args=(one, two, thr, stator, rotor, gamma, cp, R,
                                  GR, phi, DeltaH_prod),
                            bounds=bounds_angles,
                            method='trf')
    # (the least_squares method allows for the inclusion of bounds)

    return results.x
    def estimate(self, cam_model, track, track_cam_states, debug=False):
        """Estimate feature 3D location by optimizing over inverse depth
        parameterization using Gauss Newton Optimization

        Parameters
        ----------
        cam_model : CameraModel
            Camera model
        track : FeatureTrack
            Feature track
        track_cam_states : list of CameraState
            Camera states where feature track was observed
        debug :
            Debug mode (default: False)

        Returns
        -------
        p_G_f : np.array - 3x1
            Estimated feature position in global frame

        """
        # # Calculate initial estimate of 3D position
        p_C0_f, residual = self.initial_estimate(cam_model, track,
                                                 track_cam_states)

        # Get ground truth
        if track.ground_truth is not None:
            return track.ground_truth

        # Convert ground truth expressed in global frame
        # to be expressed in camera 0
        # C_C0G = C(track_cam_states[0].q_CG)
        # p_G_C0 = track_cam_states[0].p_G
        # p_C0_f = dot(C_C0G, (p_G_f - p_G_C0))

        # print("true: ", p_C0_f.ravel())

        # Create inverse depth params (these are to be optimized)
        alpha = p_C0_f[0, 0] / p_C0_f[2, 0]
        beta = p_C0_f[1, 0] / p_C0_f[2, 0]
        rho = 1.0 / p_C0_f[2, 0]
        theta_k = np.array([alpha, beta, rho])

        # z = 1 / rho
        # X = np.array([[alpha], [beta], [1.0]])
        # C_C0G = C(track_cam_states[0].q_CG)
        # p_G_C0 = track_cam_states[0].p_G
        # init = z * dot(C_C0G.T, X) + p_G_C0

        # Optimize feature location
        args = (cam_model, track, track_cam_states)
        result = least_squares(self.reprojection_error,
                               theta_k,
                               args=args,
                               jac=self.jacobian,
                               verbose=1,
                               method="lm")

        # if result.cost > 1e-4:
        #     return None

        # Calculate feature position in global frame
        alpha, beta, rho = result.x.ravel()
        z = 1 / rho
        X = np.array([[alpha], [beta], [1.0]])
        C_C0G = C(track_cam_states[0].q_CG)
        p_G_C0 = track_cam_states[0].p_G
        p_G_f = z * dot(C_C0G.T, X) + p_G_C0

        # print("ground truth: ", track.ground_truth.ravel())
        # print("cost: ", result.cost)
        # print("initial: ", init.ravel())
        # print("final: ", p_G_f.ravel())

        # p_C_f = dot(C_C0G, (p_G_f - p_G_C0))
        # if p_C_f[2, 0] < 2.0:
        #     return None
        # if p_C_f[2, 0] > 200.0:
        #     return None

        return p_G_f