コード例 #1
0
ファイル: lprmsd.py プロジェクト: raviramanathan/msmbuilder
def AlignToDensity(elem, xyz1, xyz2, binary=False):
    """ 
    Pre-aligns molecules to some density.
    I don't really like this, but I have to start with some alignment
    and a grid scan just plain sucks.
    
    This function can be called by AlignToMoments to get rid of inversion problems
    """

    t0 = np.array([0, 0, 0])
    if binary:
        t1 = optimize.brute(
            ComputeOverlap,
            ((0, np.pi), (0, np.pi), (0, np.pi)),
            args=(elem, xyz1, xyz2),
            Ns=2,
            finish=optimize.fmin_bfgs,
        )
    else:
        t1 = optimize.brute(
            ComputeOverlap,
            ((0, 2 * np.pi), (0, 2 * np.pi), (0, 2 * np.pi)),
            args=(elem, xyz1, xyz2),
            Ns=6,
            finish=optimize.fmin_bfgs,
        )
    xyz2R = (np.array(EulerMatrix(t1[0], t1[1], t1[2]) * np.mat(xyz2.T)).T).copy()
    return xyz2R
コード例 #2
0
ファイル: elastic.py プロジェクト: dwinston/elate
 def Poisson2D(self, x):
   # Optimize this to save some time
   def func1(z): return self.Poisson([x[0], x[1], z])
   r1 = optimize.brute(func1, ((0, np.pi),), Ns = 15, full_output = True, finish = optimize.fmin)[0:2]
   def func2(z): return -self.Poisson([x[0], x[1], z])
   r2 = optimize.brute(func2, ((0, np.pi),), Ns = 15, full_output = True, finish = optimize.fmin)[0:2]
   return (min(0,r1[1]), max(0,r1[1]), -r2[1])
コード例 #3
0
ファイル: test_optimize.py プロジェクト: yanxun827/scipy
    def test_1D(self):
        # test that for a 1D problem the test function is passed an array,
        # not a scalar.
        def f(x):
            assert_(len(x.shape) == 1)
            assert_(x.shape[0] == 1)
            return x ** 2

        optimize.brute(f, [(-1, 1)], Ns=3, finish=None)
コード例 #4
0
ファイル: test_optimize.py プロジェクト: gfyoung/scipy
    def test_brute(self):
        # test fmin
        resbrute = optimize.brute(self.func, self.rranges, args=self.params, full_output=True, finish=optimize.fmin)
        assert_allclose(resbrute[0], self.solution, atol=1e-3)
        assert_allclose(resbrute[1], self.func(self.solution, *self.params), atol=1e-3)

        # test minimize
        resbrute = optimize.brute(self.func, self.rranges, args=self.params, full_output=True, finish=optimize.minimize)
        assert_allclose(resbrute[0], self.solution, atol=1e-3)
        assert_allclose(resbrute[1], self.func(self.solution, *self.params), atol=1e-3)
コード例 #5
0
ファイル: CoF_arb.py プロジェクト: ChengDaHaI/CCF
def CoF_compute_search_pow_flex_beta(P_con, H_a, is_fixed_power, is_dual_hop, rate_sec_hop=[], mod_scheme='sym_mod', quan_scheme='sym_quan'):
    (M, L) = (H_a.nrows(), H_a.ncols())
    '''
    def cof_pow_beta(x):
        power = x[0:L]
        beta = vector(RR, [1,]+list(x[L:2*L-1]))
        -CoF_compute_fixed_pow_flex(power, P_con, False, H_a, is_dual_hop, rate_sec_hop, mod_scheme, quan_scheme, beta)
    '''
    if is_fixed_power == False:
        cof_pow_beta = lambda x: -CoF_compute_fixed_pow_flex(x[0:L], P_con, False, H_a, is_dual_hop, rate_sec_hop, mod_scheme, quan_scheme, vector(RR, [1,]+list(x[L:2*L-1])))
        Pranges = ((P_con/brute_number, P_con), )*L + ((float(beta_max)/brute_number, beta_max), )
    else:
        cof_pow_beta = lambda x: -CoF_compute_fixed_pow_flex((P_con,P_con), P_con, False, H_a, is_dual_hop, rate_sec_hop, mod_scheme, quan_scheme, vector(RR, [1,x]))
        Pranges = ((float(beta_max)/brute_number, beta_max), )
        
    try:
        if P_Search_Alg == 'brute':
            res_cof = optimize.brute(cof_pow_beta, Pranges, Ns=brute_number, full_output=True, finish=None)
            P_opt = res_cof[0]
            sum_rate_opt = -res_cof[1] # negative! see minus sign in cof_pow_beta
        elif P_Search_Alg == 'brute_fmin':
            res_brute = optimize.brute(cof_pow_beta, Pranges, Ns=brute_fmin_number, full_output=True, finish=None)
            P_brute_opt = res_brute[0]
            sum_rate_brute = -res_brute[1] # negative! see minus sign in cof_pow_beta
            res_fmin = optimize.fmin(cof_pow_beta, P_brute_opt, xtol=1, ftol=0.01, maxiter=brute_fmin_maxiter, full_output=True)
            P_fmin_opt = res_fmin[0]
            sum_rate_opt = -res_fmin[1]
        elif P_Search_Alg == 'brute_brute':
            res_brute1 = optimize.brute(cof_pow_beta, Pranges, Ns=brute_brute_first_number, full_output=True, finish=None)
            P_brute_opt1 = res_brute1[0]
            sum_rate_brute1 = -res_brute1[1] # negative! see minus sign in cof_pow_beta
            Pranges_brute_2 = tuple([(max(0,P_i-P_con/brute_brute_first_number), min(P_con,P_i+P_con/brute_brute_first_number)) for P_i in P_brute_opt1])
            res_brute2 = optimize.brute(cof_pow_beta, Pranges_brute_2, Ns=brute_brute_second_number, full_output=True, finish=None)
            P_brute_opt2 = res_brute2[0]
            sum_rate_brute2 = -res_brute2[1] # negative! see minus sign in cof_pow_beta
            sum_rate_opt = sum_rate_brute2
        #add differential evolution algorithm
        elif P_Search_Alg=='differential_evolution':
            Pranges=((float(beta_max)/brute_number, beta_max), )
            res_brute=optimize.differential_evolution(cof_pow_beta,Pranges)
            P_opt=res_brute.x
            sum_rate_opt=-res_brute.fun
        #end
        else:
            raise Exception('error: algorithm not supported')
    except:
        print 'error in search algorithms'
        raise
    return sum_rate_opt
コード例 #6
0
ファイル: fit.py プロジェクト: JoshuaSBrown/langmuir
    def brute(self, a=0, b=1, find_max=False, return_y=False, **kwargs):
        """
        Wrapper around :func:`scipy.optimize.brute`.  Finds minimum in range.

        :param a: lower x-bound
        :param b: upper x-bound
        :param find_max: find max instead of min
        :param return_y: return x and fit(x)

        :returns: xval, yval

        :type a: float
        :type b: float
        :type find_max: bool
        :type return_y: bool

        >>> fit = FitLinear([1, 2, 3], [4, 5, 6])
        >>> xval, yval = fit.brute(a=-1, b=1)
        """
        if find_max:
            func = lambda x : -1.0 * self.__call__(x)
        else:
            func = self.__call__
        _kwargs = dict(func=func, ranges=[(a, b)])
        _kwargs.update(**kwargs)
        x = float(_optimize.brute(**_kwargs)[0])
        if return_y:
            return float(x), float(self(x))
        return float(x)
コード例 #7
0
ファイル: cnn.py プロジェクト: thorstenkranz/eegpy
    def __call__(self, inputs,states,refs):
        if self.a0 == None:
            a0 = np.random.random((3,3))*2-1
        if self.b0 == None:
            b0 = np.random.random((3,3))*2-1
        if self.z0 == None:
            z0 = np.random.random()*2-1

        params = np.array(list(a0.flatten())+list(b0.flatten())+[z0])
        print "Initial parameters:", params
        assert params.shape[0] == 19
        
        #TODO: assert all arrays have same dimensions

        # Do optimization
        if self.opt_method == "anneal":
            #Do the optimization 100 times
            par, fopt, it, fcalls, t5, t6, t7 = anneal(self.error,params, lower=-2, upper=2, full_output=True)
        elif self.opt_method == "simplex":
            #Do the optimization 100 times
            #best_fopt = 10e50
            #for i in range(2):
            #    print "\nStep", i
            #    params = np.random.random((19))*0.2-0.1
            #    print "Initial guess", params
            #    self.do_pp = True
                #par_tmp, ftol, it, fcalls,wf, allv_tmp = fmin(self.error,params,xtol=-1,ftol=-1,maxiter=2000,maxfun=10e10,full_output=True,retall=True)
            par, fopt, it, fcalls,wf, allv_tmp = fmin(self.error,params,maxiter=2000,maxfun=10e10,full_output=True,retall=True)
            #    print fopt,# par_tmp
            #    print "Final:", par_tmp
            #    print "Veränderungen:", par_tmp-params
            #    if fopt < best_fopt:
            #        best_fopt=fopt
            #        par=par_tmp
            #        allv = allv_tmp
            #        print "besser!",
            #par, ftol, it, fcalls,wf, allv = fmin(self.error,params,xtol=-1,ftol=-1,maxiter=2000,maxfun=10e10,full_output=True,retall=True)
            #print "params:", params
            #par, ftol, it, fcalls,wf, allv = fmin(self.error,params,full_output=True,retall=True)
            #print par, ftol, it, fcalls, wf, allv
            #print np.array(allv).shape
                    #allv = np.array(allv)
                    #p.figure(3)
                    #for i in range(allv.shape[1]):
                    #    p.plot(allv[:,i])
                    #p.show()
                    #time.sleep(3)
        elif self.opt_method == "brute":
            par = brute(self.error,[slice(-2,2,2j) for i in range(19)])
            print par
            #par = par[0]

        elif self.opt_method == "powell":
            par = fmin_powell(self.error,params)
            print "Veränderungen:", par-params
        else:
            raise ValueError("Optimization method not known")

        par = np.array(par)
        return par[:9].reshape((3,3)), par[9:18].reshape((3,3)), par[18]
コード例 #8
0
ファイル: motif_tools.py プロジェクト: csfoo/TF_binding
def estimate_unbnd_conc_in_region(
        motif, score_cov, atacseq_cov, chipseq_rd_cov,
        frag_len, max_chemical_affinity_change):
    # trim the read coverage to account for the motif length
    trimmed_atacseq_cov = atacseq_cov[len(motif)+1:]
    chipseq_rd_cov = chipseq_rd_cov[len(motif)+1:]

    # normalzie the atacseq read coverage
    atacseq_weights = trimmed_atacseq_cov/trimmed_atacseq_cov.max()
    
    # build the smoothing window
    sm_window = np.ones(frag_len, dtype=float)/frag_len
    sm_window = np.bartlett(2*frag_len)
    sm_window = sm_window/sm_window.sum()

    def build_occ(log_tf_conc):
        raw_occ = logistic(log_tf_conc + score_cov/(R*T))
        occ = raw_occ*atacseq_weights
        smoothed_occ = np.convolve(sm_window, occ/occ.sum(), mode='same')

        return raw_occ, occ, smoothed_occ

    def calc_lhd(log_tf_conc):
        raw_occ, occ, smoothed_occ = build_occ(-log_tf_conc)
        #diff = (100*smoothed_occ - 100*rd_cov/rd_cov.sum())**2
        lhd = -(np.log(smoothed_occ + 1e-12)*chipseq_rd_cov).sum()
        #print log_tf_conc, diff.sum()
        return lhd

    res = brute(calc_lhd, ranges=(
        slice(0, max_chemical_affinity_change, 1.0),))[0]
    log_tf_conc = max(0, min(max_chemical_affinity_change, res))
                      
    return -log_tf_conc
コード例 #9
0
ファイル: make_grm.py プロジェクト: quattro/labtools
def get_posteriors(row, probs, args):
    n, p = probs.shape

    format = row[FORMAT].split(":")
    glidx = -1
    try:
        glidx = format.index("GL")
    except ValueError:
        # if likelihoods aren't available not much we can do
        return

    miss = np.zeros(n, dtype=bool)
    for idx, entry in enumerate(row[FORMAT + 1 :]):
        # get the log10-scaled likelihoods
        likes = np.power(10, map(float, entry.split(":")[glidx].split(",")))
        miss[idx] = sum(likes) == 3.0

        # convert to posterior probabilities using reference prior
        probs[idx][0] = likes[0]
        probs[idx][1] = likes[1]
        probs[idx][2] = likes[2]

    # estimate the allele frequency by optimizing the likelihood of the data wrt to f
    def nll(f):
        fs = np.array([(1 - f) ** 2, 2 * f * (1 - f), f ** 2])
        # this sums the negative log-likelihood for each sample
        # NLL(D|f) =  - sum_i log( sum_g Pr(Reads_i, G_i = g) * Pr(g | f) )
        val = -np.log((probs * fs.T).sum(axis=1)).sum()
        return val

    if args.maf == MAF_R:
        # estimate MAF directly from reads
        vals = [slice(0.01, 0.49, 0.01)]
        f = opt.brute(nll, ranges=vals)[0]

        fs = np.array([(1 - f) ** 2, 2 * f * (1 - f), f ** 2])
        # convert likelihoods to posterior probabilities
        probs = ((probs * fs).T / (probs * fs).sum(axis=1)).T
        maf = f
        # get posterior mean = dosage
        dose = np.array(map(lambda x: sum(G * x), probs))

        if args.encoding == ENC_B:
            # round to best-guess genotype
            dose = np.round(dose)
    else:
        # assume flat prior & scale likelihoods
        probs = (probs.T / probs.sum(axis=1)).T

        # get posterior mean = dosage
        dose = np.array(map(lambda x: sum(G * x), probs))
        if args.encoding == ENC_D:
            # estimate MAF from genotypes
            maf = np.mean(dose) / 2.0
        else:
            # round to best-guess genotype
            dose = np.round(dose)
            maf = np.mean(dose) / 2.0

    return dose, maf, miss
コード例 #10
0
def fit_GLC_grid( subjdata, z_limit=ZLIMIT ):
    #thesedata=subjdata
    dims = len(subjdata[0])-1
    
    bounds = [(.00001,50)] + [(-25,25) for _ in range( dims+1 ) ]
    optargs = ( subjdata, z_limit, None, True)
    
    xopt, fopt, grid, Jout = optimize.brute(func=negloglike_reduced 
                                              , ranges = bounds
                                              , args = optargs
                                              , Ns = 5
                                              , full_output=True
                                             )
    
    from scipy.ndimage.filters import minimum_filter
    from scipy.ndimage.morphology import generate_binary_structure
    
    neighborhood = generate_binary_structure( dims+2, dims+2 )
    local_mins = minimum_filter( Jout, footprint=neighborhood ) == Jout
    min_coords = np.array([ g[local_mins] for g in grid ]).T
    xoptglobal = xopt
    foptglobal = fopt
    for coords in min_coords:
        xopt, fopt, iter, im, sm = optimize.fmin(func=negloglike_reduced 
                                                  , x0 = coords
                                                  , args = optargs
                                                  , full_output=True
                                                 )
        if fopt < foptglobal:
            xoptglobal = xopt
            foptglobal = fopt
    
    return xoptglobal, foptglobal
コード例 #11
0
    def fit(self, T_prim, delta_mag, delta_mag_error, T_range=(3500, 9000)):
        """
        Fit for the companion temperature given a primary temperature and delta-magnitude measurement

        Parameters:
        ===========
        - T_prim:           float
                            The primary star temperature (in Kelvin)

        - delta_mag:        float
                            The magnitude difference between the primary and companion

        - delta_mag_error:  float
                            Uncertainty in the magnitude difference

        - T_range:          tuple of size 2
                            The lower and upper bounds on the companion temperature.
        """

        def lnlike(T2, T1, dm, dm_err):
            dm_synth = self.__call__(T2) - self.__call__(T1)
            logging.debug('T2 = {}: dm = {}'.format(T2, dm_synth))
            return 0.5 * (dm - dm_synth)**2 / dm_err**2

        T_sec = brute(lnlike, [T_range], args=(T_prim, delta_mag, delta_mag_error))
        return T_sec
コード例 #12
0
ファイル: test_brute.py プロジェクト: lmfit/lmfit-py
def test_brute_lmfit_vs_scipy_default(params_lmfit):
    """TEST 1: using finite bounds with Ns=20, keep=50 and brute_step=None."""
    assert params_lmfit['x'].brute_step is None
    assert params_lmfit['y'].brute_step is None

    rranges = ((-4, 4), (-2, 2))
    ret = optimize.brute(func_scipy, rranges, args=params, full_output=True,
                         Ns=20, finish=None)
    mini = lmfit.Minimizer(func_lmfit, params_lmfit)
    out = mini.minimize(method='brute', Ns=20)

    assert out.method == 'brute'
    assert_equal(out.nfev, 20**len(out.var_names))  # Ns * nmb varying params
    assert_equal(len(out.candidates), 50)  # top-50 candidates are stored

    assert_equal(ret[2], out.brute_grid)  # grid identical
    assert_equal(ret[3], out.brute_Jout)  # function values on grid identical

    # best-fit values identical / stored correctly in MinimizerResult
    assert_equal(ret[0][0], out.brute_x0[0])
    assert_equal(ret[0][0], out.params['x'].value)

    assert_equal(ret[0][1], out.brute_x0[1])
    assert_equal(ret[0][1], out.params['y'].value)

    assert_equal(ret[1], out.brute_fval)
    assert_equal(ret[1], out.residual)
コード例 #13
0
ファイル: functional.py プロジェクト: ChadFulton/statsmodels
def _min_max_band(args):
    """
    Min and max values at `idx`.

    Global optimization to find the extrema per component.

    Parameters
    ----------
    args: list
        It is a list of an idx and other arguments as a tuple:
            idx : int
                Index value of the components to compute
        The tuple contains:
            band : list of float
                PDF values `[min_pdf, max_pdf]` to be within.
            pca : statsmodels Principal Component Analysis instance
                The PCA object to use.
            bounds : sequence
                ``(min, max)`` pair for each components
            ks_gaussian : KDEMultivariate instance

    Returns
    -------
    band : tuple of float
        ``(max, min)`` curve values at `idx`

    """
    idx, (band, pca, bounds, ks_gaussian, use_brute, seed) = args
    if have_de_optim and not use_brute:
        max_ = differential_evolution(_curve_constrained, bounds=bounds,
                                      args=(idx, -1, band, pca, ks_gaussian),
                                      maxiter=7, seed=seed).x
        min_ = differential_evolution(_curve_constrained, bounds=bounds,
                                      args=(idx, 1, band, pca, ks_gaussian),
                                      maxiter=7, seed=seed).x
    else:
        max_ = brute(_curve_constrained, ranges=bounds, finish=fmin,
                     args=(idx, -1, band, pca, ks_gaussian))

        min_ = brute(_curve_constrained, ranges=bounds, finish=fmin,
                     args=(idx, 1, band, pca, ks_gaussian))

    band = (_inverse_transform(pca, max_)[0][idx],
            _inverse_transform(pca, min_)[0][idx])
    return band
コード例 #14
0
 def find_min_s(self):
     """ Use an initial brute force search followed by scipy
     optimisation to find the minimum of the s function.
     """
     search_bounds = [(0, 1)] * self.dimension
     grid = tuple([(0, 1, self.search_radius)] * self.dimension)
     f = self.s_eq
     minimised = opt.minimize(f, opt.brute(f, grid), bounds=search_bounds, method='L-BFGS-B', tol=1e-16, options={'disp': False})
     return minimised['fun']
コード例 #15
0
ファイル: rerate.py プロジェクト: rajatmehndiratta/reRate
 def rate(self):
     """
     Runs brute-force optimization to find a player rating with the highest odds
     of the player's history happening exactly the way it did
     """
     function = self.getOverallOdds()
     ranges = self.getEdges()
     ranges = (ranges,)
     return int(optimize.brute(function, ranges, Ns=max(len(self.store), 80), finish=None))
コード例 #16
0
ファイル: Tools.py プロジェクト: MPBA/pyHRV
    def algorithm(cls, signal, params):
        delta = params['delta']
        opt_method = params['opt_method']
        complete = params['complete']
        par_ranges = params['par_ranges']
        maxiter = params['maxiter']
        n_step_1 = params['n_step_1']
        n_step_2 = params['n_step_2']

        # TODO (Andrea):explain "add **kwargs"

        if params['loss_func'] == 'ben':
            loss_function = OptimizeBateman._loss_benedek
        elif params['loss_func'] == 'all':
            loss_function = OptimizeBateman._loss_function_all

        if opt_method == 'grid':
            min_T1 = float(par_ranges[0])
            max_T1 = float(par_ranges[1])
    
            min_T2 = float(par_ranges[2])
            max_T2 = float(par_ranges[3])
            step_T1 = (max_T1 - min_T1) / n_step_1
            step_T2 = (max_T2 - min_T2) / n_step_2
            rranges = (slice(min_T1, max_T1 + step_T1, step_T1), slice(min_T2, max_T2 + step_T2, step_T2))
            x0, loss, grid, loss_grid = _opt.brute(loss_function, rranges,
                                                   args=(signal, delta),
                                                   full_output=True, finish=None)
            exit_code = -1

        elif opt_method == 'bsh':
            x_opt = _opt.basinhopping(loss_function, [0.75, 2.],
                                      niter=maxiter,
                                      minimizer_kwargs={
                                          "bounds": ((par_ranges[0], par_ranges[1]), (par_ranges[2], par_ranges[3])),
                                          "args": (signal, delta)},
                                      disp=False, niter_success=10)
            x0 = x_opt.x
            loss = float(x_opt.fun)

            if x_opt.minimization_failures == maxiter:
                exit_code = 1
            else:
                exit_code = 0
        else:
            cls.error("opt_method not understood")
            return None

        if complete:
            x0_min, loss_min, niter,\
            nfuncalls, warnflag, allvec = _opt.fmin(loss_function, x0,
                                                    args=(signal, delta),
                                                    full_output=True)
            return x0, x0_min, loss, loss_min, exit_code, warnflag
        else:
            return x0, loss, exit_code
コード例 #17
0
def find_optimal_spread(N, cov_reduction, n):
    """ Use numerical optimization to find best mean_spread value for a given number
    of gaussians N and target covariance reduction cov_reduction. """
    
    def loss(mean_spread):
        return find_optimal_weights(N, cov_reduction, mean_spread, n)[1]
    
    opt_spread = optimize.brute(loss, ranges=[(0,1)], Ns=200)
    opt_w, isd = find_optimal_weights(N, cov_reduction, opt_spread, n)
    return opt_spread, opt_w, isd
コード例 #18
0
ファイル: elastic.py プロジェクト: dwinston/elate
def minimize(func, dim):
  if dim == 2:
    r = ((0, np.pi), (0, np.pi))
    n = 25
  elif dim == 3:
    r = ((0, np.pi), (0, np.pi), (0, np.pi))
    n = 10

  # TODO -- try basin hopping or annealing
  return optimize.brute(func, r, Ns = n, full_output = True, finish = optimize.fmin)[0:2]
コード例 #19
0
ファイル: optimizer.py プロジェクト: isomerase/RoboSkeeter
    def _optimize_brute_force(self):
        x0, fval, grid, Jout = brute(
            self._simulation_wrapper,
            self.bounds,
            Ns=20,
            # args=params,
            full_output=True,
            finish=None)

        return [x0, fval, grid, Jout]
コード例 #20
0
def find_P_prob_params_corr3(N, mean, corr2, corr3, method="tnc"):
# -----------------------------------------------------------------------------
    """
    Taking the parameters it returns P_probs - the probability distribution for
    all Ps, including P=0 
    """
    #from scipy.optimize import fmin_l_bfgs_b
    from scipy.optimize import fmin_tnc # seems better
    from scipy.optimize import anneal
    from scipy.optimize import brute

    assert N > 2
    assert mean < 1
    # TODO - assert corr2 < f(N,mean)
    # TODO - assert corr3 < f(N,mean,corr2) 

    mean_des = mean           # desired average
    corr_des = corr2          # desired correlation
    corr3_des = corr3          # desired correlation
    P_probs_m = np.zeros(N)   # candidate P_probs_m
    P_probs_m[-1] = 1.        # candidate parameter J
    
    if mean == 0:
        result = np.zeros(N+1)
        result[0] = 1.
        return result
    
    def opt_func(P_probs_m, N, mean_des, corr_des):
        P_probs = __get_P_probs__(N, mean_des, P_probs_m)
        corr_curr = corr2_from_P_probs(N, P_probs)
        corr_diff = (corr_curr - corr_des)
        corr3_curr = corr3_from_P_probs(N, P_probs)
        corr3_diff = (corr3_curr - corr3_des)
        #print corr_curr, corr3_curr, P_probs_m
        #return (abs(corr_diff)/abs(corr_des + 0.0000001) + abs(corr3_diff)/abs(corr3_des + 0.0000001))**.1
        return 2*abs(corr_diff) + abs(corr3_diff)
    
    if method == "tnc":
        bounds = [(0,1) for i in xrange(N)]
        opt_output = fmin_tnc(opt_func, P_probs_m, args=(N,mean_des,corr_des),\
                            approx_grad = True, bounds=bounds)
        print opt_output
        opt_result = opt_output[0]
        
    if method == "anneal":
        opt_output = anneal(opt_func, P_probs_m, args=(N,mean_des,corr_des),\
                        lower = np.zeros(N), upper = np.ones(N))
        print opt_output
        opt_result = opt_output[0]
    
    if method == "brute":
        opt_result = brute(opt_func, args=(N,mean_des,corr_des),\
                        ranges = [(0.00000000001,0.99999999999) for i in xrange(N)], Ns=20)
    
    return __get_P_probs__(N, mean_des, opt_result)
コード例 #21
0
ファイル: BruteForce.py プロジェクト: nealegibson/Infer
def Brute(LogLikelihood, par, func_args, epar, type="max", Nsig=3, Niter=1000, verbose=True):
    """
  Function wrapper to find the maximum (or min) of a function using the scipy brute
  force function
  
  LogLikelihood - function to optimise, of the form func(parameters, func_args). Doesn't
    need to be a log likelihood of course - just that's what I use it for!
  par - array of parameters to the func to optimise
  func_args - additional arguments to the func - usually as a tuple
  epar - array of parameter errors
  Nsig - no of sigma to search from the error bars
  Niter - approximate number of iterations - rounded up to next integer for no of points per variable
  type - either max or min to optimise the funciton
  
  """

    # first get fixed parameters
    fixed = ~(np.array(epar) > 0) * 1
    Nvar = fixed.size - fixed.sum()  # no of variable parameters

    # get number of points to slice each variable parameter - round up!
    # first round to 5 dp to avoid machine prec errors
    Npoints = np.ceil(np.round(Niter ** (1.0 / Nvar), decimals=5))

    # define parameter ranges as slice objects
    delta = 0.000001  # make delta par for small increment
    par_ranges = [
        slice(p - Nsig * e, p + (Nsig + delta) * e, 2 * Nsig * e / (Npoints - 1.0)) for p, e in zip(par, epar)
    ]

    # redefine for fixed parameters
    if verbose:
        print "Brute parameter ranges: ({:d} evaluations)".format(int(Npoints ** Nvar))
    for i, f in enumerate(fixed):
        if f == 1:  # redefine slice so only actual par is taken
            par_ranges[i] = slice(par[i], par[i] + delta, 0.1)
        if verbose:
            print " p[{}] => {}".format(i, np.r_[par_ranges[i]])

    assert type == "max" or type == "min", "type must be max or min"
    if type == "max":
        OptFunc = NegFunc
    elif type == "min":
        OptFunc = PosFunc

    # run the brute force algorithm, without finishing algorithm
    B = brute(OptFunc, par_ranges, args=(LogLikelihood, func_args), full_output=1, finish=None)

    # print out results
    if verbose:
        print "Brute force {} grid point @ {}\n".format(type, B[0])

    # return the optimised position
    return B[0]
コード例 #22
0
def estimate(samples):
	N = 10
	MAXS = np.max(samples, axis = 0)
	MINS = np.min(samples, axis = 0)
	#print MINS, adjust(samples, MINS)
	#print MAXS, adjust(samples, MAXS)
	rrange = zip(MINS, MAXS)
	res = optimize.brute(lambda x: -adjust(samples, x), 
			rrange,  Ns=N, full_output=False)
	print res, adjust(samples, res)
	return res, select(samples, res)
コード例 #23
0
ファイル: UmpBase.py プロジェクト: alaofeng/abu
 def brust_min(self):
     """
     全局最优
     :return:
     """
     cprs = self.cprs
     optv = sco.brute(self.min_func_improved, ((round(cprs['lps'].min(), 2), 0, 0.5), (round(cprs['lms'].min(), 2),
                                                                                       round(cprs['lms'].max(), 3),
                                                                                       0.01),
                                               (round(cprs['lrs'].min(), 2), round(cprs['lrs'].max(), 2), 0.1)),
                      finish=None)
     return optv
def fit_traffic_distribution(tt, n_component):
    tt.sort()
    mean = slice(tt[0], tt[0] + 10, 2.0)
    std = slice(2, 10, 2.0)
    eta = [slice(0.1, 0.7, 0.1)] * (n_component - 1)
    delays = [slice(30, 60, 10)]
    for _ in range(1, n_component - 1):
        delays.append(slice(max(5, delays[-1].start - 5), max(5, delays[-1].stop - 5), delays[-1].step))

    ranges = (mean, std) + tuple(eta) + tuple(delays)
    (x0, fval, grid, Jout) = opt.brute(func=obj_fun, ranges=ranges, args=[tt], full_output=True)
    return x0, fval, grid, Jout
コード例 #25
0
    def fit(self, T_prim, delta_mag, delta_mag_error, T_range=(3500, 9000)):
        """
        Fit for the companion temperature given a primary temperature and delta-magnitude measurement
        """

        def lnlike(T2, T1, dm, dm_err):
            dm_synth = self.__call__(T2) - self.__call__(T1)
            logging.debug('T2 = {}: dm = {}'.format(T2, dm_synth))
            return 0.5 * (dm - dm_synth)**2 / dm_err**2

        #T_sec = fmin(lnlike, guess_T, args=(T_prim, delta_mag, delta_mag_error))
        T_sec = brute(lnlike, [T_range], args=(T_prim, delta_mag, delta_mag_error))
        return T_sec
コード例 #26
0
def find_best_cutoff(y1,ypp,verbose=0):
    from scipy import optimize
    def f(x,*params):
        y_true,ypp = params
        y_pred = np.array(map(int,ypp>x))
        res = metrics.f1_score(y_true, y_pred)
        #print "x:",x,"res:",res
        return -res
    rranges = (slice(0,1,0.01),)
    resbrute = optimize.brute(f, rranges, args=(y1,ypp), full_output=False,
                                  finish=optimize.fmin)
    if verbose: print "resbrute:",resbrute
    return resbrute[0]
コード例 #27
0
ファイル: cluster_profile.py プロジェクト: EiffL/python_lib
def confidence(opts, r_scale, bg_density, R_proj, grid, dof):
    
    def get_range(value, grid):
        a = tuple(10.0 ** (np.log10(value) + np.array([-grid / 2, grid / 2]) * 1.4 / grid))
        b = (np.max(a) - np.min(a)) / grid * 2.0
        return a + (b,)

    ranges = (get_range(r_scale, grid), get_range(bg_density, grid))

    if opts.model == 'beta':
        res = brute(bm_proj_maxlik_bg, ranges, args = (R_proj, opts.beta, ), full_output = True)

    else:
        res = brute(nfw_proj_maxlik_bg, ranges, args = (R_proj, ), full_output = True)

    x, y = np.array(res[2]), np.array(res[3]).flatten()
    x = x.reshape(x.shape[0], x.shape[1] * x.shape[2]).T
    
    index = (2.0 * (y - np.min(y)) <= chi2.ppf(0.68, dof))
    limits = x[index, 0]

    return np.min(limits), np.max(limits)
コード例 #28
0
ファイル: gabor.py プロジェクト: mekman/popeye
def brute_force_search(bounds, response, error_function,
                       deg_x, deg_y, stim_arr, 
                       tr_length, frames_per_tr, ppd):

    [x0, y0, s0, hrf0, theta0, phi0, cpd0], err,  _, _ =\
        brute(error_function,
              args=(response, deg_x, deg_y, stim_arr, tr_length, frames_per_tr, ppd),
              ranges=bounds,
              Ns=4,
              finish=None,
              full_output=True,
              disp=False)

    # return the estimates
    return x0, y0, s0, hrf0, theta0, phi0, cpd0
コード例 #29
0
ファイル: experiments.py プロジェクト: strazdas/global_opt
def find_L(f_name):
    from scipy.optimize import minimize
    from scipy.optimize import brute, basinhopping

    D = get_D(f_name)
    grad = get_grad(f_name)
    lb = get_lb(f_name)
    ub = get_ub(f_name)

    def negative_grad_norm(X, grad, lb, ub):
        for i in range(len(lb)):
            if lb[i] > X[i] or ub[i] < X[i]:
                return float('inf')
        return -enorm(grad(X))
    x0 = brute(negative_grad_norm, (lb, ub), Ns=4000, args=(grad, lb, ub), disp=False)
    return -negative_grad_norm(x0, grad, lb, ub)
コード例 #30
0
ファイル: test_brute.py プロジェクト: lmfit/lmfit-py
def test_brute_lmfit_vs_scipy_Ns(params_lmfit):
    """TEST 2: using finite bounds, with Ns=40 and brute_step=None."""
    rranges = ((-4, 4), (-2, 2))
    ret = optimize.brute(func_scipy, rranges, args=params, full_output=True,
                         Ns=40, finish=None)
    mini = lmfit.Minimizer(func_lmfit, params_lmfit)
    out = mini.minimize(method='brute', Ns=40)

    assert_equal(ret[2], out.brute_grid)  # grid identical
    assert_equal(ret[3], out.brute_Jout)  # function values on grid identical
    assert_equal(out.nfev, 40**len(out.var_names))  # Ns * nmb varying params

    # best-fit values and function value identical
    assert_equal(ret[0][0], out.brute_x0[0])
    assert_equal(ret[0][1], out.brute_x0[1])
    assert_equal(ret[1], out.brute_fval)
コード例 #31
0
def kplot(y, label, bw=0.1):
    k = gaussian_kde(y.stack().dropna(), bw)
    m = brute(lambda z: -k(z), ((-20, 10), ))
    plt.plot(x, k(x), label='{}: {:.2f}'.format(label, m[0]))
    return m[0]
コード例 #32
0
# -*- coding:utf-8 -*-

import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as spo


def fo((x, y)):
    z = np.sin(x) + 0.05 * x**2 + np.sin(y) + 0.05 * y**2
    return z


opt1 = spo.brute(fo, ((-10, 10.1, 1), (-10, 10.1, 1)),
                 finish=spo.fmin,
                 full_output=False)
#opt1 = spo.brute(fo, ((-10, 10.1, 0.1), (-10, 10.1, 0.01)), finish = None, full_output = False)

print opt1

print fo(opt1)

opt2 = spo.fmin(fo, opt1, xtol=0.1, ftol=0.01)
print opt2

print fo(opt2)

opt2 = spo.fmin(fo, (2.0, 2.0), maxiter=250)
print opt2
コード例 #33
0
for guess in [10, -10, 3, 4]:
    result = opt.minimize(quartic, x0=np.array([guess]))
    print(f"Minimize quartic from guess={guess:8.3f} x_min={result.x[0]:10.7f} y_min={result.fun:12.7f} ")
    # Keep a list of all unique solutions
    # pytest.approx: Comparing floating point numbers. Checks for relative tolerance of 1e-6 or absolute tolerance of 1e-12
    # break: breaks out of innermost for or while loop
    # else:  after for or while loop only runs if no breaks occur
    for minimum in minima:
        if pytest.approx(minimum) == result.x[0]:
            break
    else:
        minima.add(result.x[0])
print(f"All local minima: {minima}")

print("\nBrute force solution evaluates function at various steps across range")
print(opt.brute(quartic, ranges=[(-100, 100)], Ns=1000))

print("\nEven brute force doesn't work if we don't have enough steps. Example with 20 steps instead of 1000")
print(opt.brute(quartic, ranges=[(-100, 100)], Ns=20))

print("\nbasinhopping finds global minimum from an initial guess. Number of iterations = 100 is not enough in this example")
print(opt.basinhopping(quartic, x0=10, niter=1000))

print("\nshgo is another scipy optimizer to find global minimum")
print(opt.shgo(quartic, bounds=[(-1000, 1000)]))

print("\ndifferential_evolution is another scipy optimizer to find global minimum")
print(opt.differential_evolution(quartic, bounds=[(-1000, 1000)]))

print("\ndual_annealing is another scipy optimizer to find global minimum")
print(opt.dual_annealing(quartic, bounds=[(-1000, 1000)]))
コード例 #34
0
from scipy import optimize
def f(x):    #Defining function
	return   x**3 + x**2 + np.sin(x) +np.cos(x)
x = np.arrange(-50, 50, 0.01)
#First two arguments are the limit and the last argument is the interval
plt.plot(x, f(x))
plt.show()    #To show the graph of the defined function

#This algorithm calculates a gradient descent of the function from the starting point given in the argument and outputs the minima with zero gradient and positive second order derivative
optimize.fmin_bfgs(f, o)
#First argument is function’s name and second argument is the starting point of gradient descent

# combines a local optimizer with stochastic sampling of starting points for the local optimizer, hence giving a costlier global minimum. The syntax to use this function is as follows:
optimize.basinhopping(f, 0)
#Brute force method can also be used for global optimization, but it is less efficient
optimize.brute(f, 0)
#find the local minimum within an interval for variables
optimize.fminbound(f, -50, 50)    #Argument is interval for the variable.

#estimate the roots of a scalar function by finding the solution of the equation; f(x) = 0
roots = optimize.fsolve(f)

#using least squares curve fitting
x_data = np.linspace(-100, 100, 0.1)
y_data = g(x_data)+ np.random.randn(x_data.size)


def h(x, a, b):
	return a*x + b*np.cos(x)
initial_guess_ab = [1, 1]
variables, variables_covariance = optimize.curve_fit(h, x_data, y_data, initial_guess_ab)
コード例 #35
0
    def plot(self,
             x=None,
             y=None,
             yerr=None,
             ax=None,
             plot_kws={},
             plot_seperate=True,
             show=True,
             legend=None,
             data_legend=None,
             xlabel='Frequency (MHz)',
             ylabel='Counts'):
        """Routine that plots the hfs of all the models,
        possibly on top of experimental data.

        Parameters
        ----------
        x: list of arrays
            Experimental x-data. If list of Nones, a suitable region around
            the peaks is chosen to plot the hfs.
        y: list of arrays
            Experimental y-data.
        yerr: list of arrays
            Experimental errors on y.
        plot_seperate: boolean, optional
            Controls if the underlying models are drawn as well, or only
            the sum. Defaults to False.
        no_of_points: int
            Number of points to use for the plot of the hfs if
            experimental data is given.
        ax: matplotlib axes object
            If provided, plots on this axis.
        show: boolean
            If True, the plot will be shown at the end.
        legend: string, optional
            If given, an entry in the legend will be made for the spectrum.
        data_legend: string, optional
            If given, an entry in the legend will be made for the experimental
            data.
        xlabel: string, optional
            If given, sets the xlabel to this string. Defaults to 'Frequency (MHz)'.
        ylabel: string, optional
            If given, sets the ylabel to this string. Defaults to 'Counts'.
        indicate: boolean, optional
            If set to True, dashed lines are drawn to indicate the location of the
            transitions, and the labels are attached. Defaults to False.
        model: boolean, optional
            If given, the region around the fitted line will be shaded, with
            the luminosity indicating the pmf of the Poisson
            distribution characterized by the value of the fit. Note that
            the argument *yerr* is ignored if *model* is True.
        normalized: Boolean
            If True, the data and fit are plotted normalized such that the highest
            data point is one.
        distance: float, optional
            Controls how many FWHM deviations are used to generate the plot.
            Defaults to 4.

        Returns
        -------
        fig, ax: matplotlib figure and axes"""
        if ax is None:
            fig, ax = plt.subplots(1, 1)
        else:
            fig = ax.get_figure()
        toReturn = fig, ax

        if x is not None and y is not None:
            try:
                ax.errorbar(x,
                            y,
                            yerr=[y - yerr['low'], yerr['high'] - y],
                            fmt='o',
                            label=data_legend)
            except:
                ax.errorbar(x, y, yerr=yerr, fmt='o', label=data_legend)

        plot_kws['background'] = False
        plot_copy = copy.deepcopy(plot_kws)
        plot_copy['model'] = False
        x_points = np.array([])
        line_counter = 1
        for m in self.models:
            plot_copy['legend'] = 'I=' + str(m.I)
            try:
                color = ax.lines[-1].get_color()
            except IndexError:
                color = next(ax._get_lines.prop_cycler)['color']
            m.plot(x=x, y=y, yerr=yerr, show=False, ax=ax, plot_kws=plot_copy)
            # plot_kws['indicate'] = False
            x_points = np.append(x_points, ax.lines[-1].get_xdata())
            if not plot_seperate:
                ax.lines.pop(-1)
            if x is not None:
                ax.lines.pop(-1 - plot_seperate)
            while not next(ax._get_lines.prop_cycler)['color'] == color:
                pass
            if plot_seperate:
                c = next(ax._get_lines.prop_cycler)['color']
                for l in ax.lines[line_counter:]:
                    l.set_color(c)
                while not next(ax._get_lines.prop_cycler)['color'] == c:
                    pass
            line_counter = len(ax.lines)
        x = np.sort(x_points)
        model = plot_kws.pop('model', False)
        if model:
            colormap = plot_kws.pop(
                'colormap',
                'bone_r',
            )
            min_loc = [s.locations.min() for s in self.models]
            max_loc = [s.locations.max() for s in self.models]
            range = (min(min_loc), max(max_loc))
            from scipy import optimize
            max_counts = np.ceil(-optimize.brute(lambda x: -self(x), (range, ),
                                                 full_output=True,
                                                 Ns=1000,
                                                 finish=optimize.fmin)[1])
            min_counts = [
                self.params[par_name].value for par_name in self.params
                if par_name.endswith('Background0')
            ][0]
            min_counts = np.floor(max(0, min_counts - 3 * min_counts**0.5))
            y = np.arange(min_counts, max_counts + 3 * max_counts**0.5 + 1)
            X, Y = np.meshgrid(x, y)
            from scipy import stats
            z = stats.poisson(self(X)).pmf(Y)

            z = z / z.sum(axis=0)
            ax.imshow(z,
                      extent=(x.min(), x.max(), y.min(), y.max()),
                      cmap=plt.get_cmap(colormap))
            line, = ax.plot(x, self(x), label=legend, lw=0.5)
        else:
            ax.plot(x, self(x))
        ax.legend(loc=0)

        # ax.set_xlabel(xlabel)
        # ax.set_ylabel(ylabel)

        if show:
            plt.show()
        return toReturn
コード例 #36
0
ファイル: holtwinters.py プロジェクト: RUrlus/statsmodels
    def fit(self,
            smoothing_level=None,
            smoothing_slope=None,
            smoothing_seasonal=None,
            damping_slope=None,
            optimized=True,
            use_boxcox=False,
            remove_bias=False,
            use_basinhopping=False,
            start_params=None,
            initial_level=None,
            initial_slope=None,
            use_brute=True):
        """
        Fit the model

        Parameters
        ----------
        smoothing_level : float, optional
            The alpha value of the simple exponential smoothing, if the value
            is set then this value will be used as the value.
        smoothing_slope :  float, optional
            The beta value of the Holt's trend method, if the value is
            set then this value will be used as the value.
        smoothing_seasonal : float, optional
            The gamma value of the holt winters seasonal method, if the value
            is set then this value will be used as the value.
        damping_slope : float, optional
            The phi value of the damped method, if the value is
            set then this value will be used as the value.
        optimized : bool, optional
            Estimate model parameters by maximizing the log-likelihood
        use_boxcox : {True, False, 'log', float}, optional
            Should the Box-Cox transform be applied to the data first? If 'log'
            then apply the log. If float then use lambda equal to float.
        remove_bias : bool, optional
            Remove bias from forecast values and fitted values by enforcing
            that the average residual is equal to zero.
        use_basinhopping : bool, optional
            Using Basin Hopping optimizer to find optimal values
        start_params : array, optional
            Starting values to used when optimizing the fit.  If not provided,
            starting values are determined using a combination of grid search
            and reasonable values based on the initial values of the data
        initial_level : float, optional
            Value to use when initializing the fitted level.
        initial_slope : float, optional
            Value to use when initializing the fitted slope.
        use_brute : bool, optional
            Search for good starting values using a brute force (grid)
            optimizer. If False, a naive set of starting values is used.

        Returns
        -------
        results : HoltWintersResults class
            See statsmodels.tsa.holtwinters.HoltWintersResults

        Notes
        -----
        This is a full implementation of the holt winters exponential smoothing
        as per [1]. This includes all the unstable methods as well as the
        stable methods. The implementation of the library covers the
        functionality of the R library as much as possible whilst still
        being Pythonic.

        References
        ----------
        [1] Hyndman, Rob J., and George Athanasopoulos. Forecasting: principles
            and practice. OTexts, 2014.
        """
        # Variable renames to alpha,beta, etc as this helps with following the
        # mathematical notation in general
        alpha = float_like(smoothing_level, 'smoothing_level', True)
        beta = float_like(smoothing_slope, 'smoothing_slope', True)
        gamma = float_like(smoothing_seasonal, 'smoothing_seasonal', True)
        phi = float_like(damping_slope, 'damping_slope', True)
        l0 = self._l0 = float_like(initial_level, 'initial_level', True)
        b0 = self._b0 = float_like(initial_slope, 'initial_slope', True)
        if start_params is not None:
            start_params = array_like(start_params,
                                      'start_params',
                                      contiguous=True)
        data = self._data
        damped = self.damped
        seasoning = self.seasoning
        trending = self.trending
        trend = self.trend
        seasonal = self.seasonal
        m = self.seasonal_periods
        opt = None
        phi = phi if damped else 1.0
        if use_boxcox == 'log':
            lamda = 0.0
            y = boxcox(data, lamda)
        elif isinstance(use_boxcox, float):
            lamda = use_boxcox
            y = boxcox(data, lamda)
        elif use_boxcox:
            y, lamda = boxcox(data)
        else:
            lamda = None
            y = data.squeeze()
        self._y = y
        lvls = np.zeros(self.nobs)
        b = np.zeros(self.nobs)
        s = np.zeros(self.nobs + m - 1)
        p = np.zeros(6 + m)
        max_seen = np.finfo(np.double).max
        l0, b0, s0 = self.initial_values()

        xi = np.zeros_like(p, dtype=np.bool)
        if optimized:
            init_alpha = alpha if alpha is not None else 0.5 / max(m, 1)
            init_beta = beta if beta is not None else 0.1 * init_alpha if trending else beta
            init_gamma = None
            init_phi = phi if phi is not None else 0.99
            # Selection of functions to optimize for appropriate parameters
            if seasoning:
                init_gamma = gamma if gamma is not None else 0.05 * \
                                                             (1 - init_alpha)
                xi = np.array([
                    alpha is None, trending and beta is None, gamma is None,
                    initial_level is None, trending and initial_slope is None,
                    phi is None and damped
                ] + [True] * m)
                func = SMOOTHERS[(seasonal, trend)]
            elif trending:
                xi = np.array([
                    alpha is None, beta is None, False, initial_level is None,
                    initial_slope is None, phi is None and damped
                ] + [False] * m)
                func = SMOOTHERS[(None, trend)]
            else:
                xi = np.array([
                    alpha is None, False, False, initial_level is None, False,
                    False
                ] + [False] * m)
                func = SMOOTHERS[(None, None)]
            p[:] = [init_alpha, init_beta, init_gamma, l0, b0, init_phi] + s0
            if np.any(xi):
                # txi [alpha, beta, gamma, l0, b0, phi, s0,..,s_(m-1)]
                # Have a quick look in the region for a good starting place for alpha etc.
                # using guesstimates for the levels
                txi = xi & np.array([True, True, True, False, False, True] +
                                    [False] * m)
                txi = txi.astype(np.bool)
                bounds = np.array([(0.0, 1.0), (0.0, 1.0), (0.0, 1.0),
                                   (0.0, None), (0.0, None), (0.0, 1.0)] + [
                                       (None, None),
                                   ] * m)
                args = (txi.astype(np.uint8), p, y, lvls, b, s, m, self.nobs,
                        max_seen)
                if start_params is None and np.any(txi) and use_brute:
                    res = brute(func,
                                bounds[txi],
                                args,
                                Ns=20,
                                full_output=True,
                                finish=None)
                    p[txi], max_seen, _, _ = res
                else:
                    if start_params is not None:
                        if len(start_params) != xi.sum():
                            msg = 'start_params must have {0} values but ' \
                                  'has {1} instead'
                            nxi, nsp = len(xi), len(start_params)
                            raise ValueError(msg.format(nxi, nsp))
                        p[xi] = start_params
                    args = (xi.astype(np.uint8), p, y, lvls, b, s, m,
                            self.nobs, max_seen)
                    max_seen = func(np.ascontiguousarray(p[xi]), *args)
                # alpha, beta, gamma, l0, b0, phi = p[:6]
                # s0 = p[6:]
                # bounds = np.array([(0.0,1.0),(0.0,1.0),(0.0,1.0),(0.0,None),
                # (0.0,None),(0.8,1.0)] + [(None,None),]*m)
                args = (xi.astype(np.uint8), p, y, lvls, b, s, m, self.nobs,
                        max_seen)
                if use_basinhopping:
                    # Take a deeper look in the local minimum we are in to find the best
                    # solution to parameters, maybe hop around to try escape the local
                    # minimum we may be in.
                    res = basinhopping(func,
                                       p[xi],
                                       minimizer_kwargs={
                                           'args': args,
                                           'bounds': bounds[xi]
                                       },
                                       stepsize=0.01)
                    success = res.lowest_optimization_result.success
                else:
                    # Take a deeper look in the local minimum we are in to find the best
                    # solution to parameters
                    res = minimize(func, p[xi], args=args, bounds=bounds[xi])
                    success = res.success

                if not success:
                    from warnings import warn
                    from statsmodels.tools.sm_exceptions import ConvergenceWarning
                    warn("Optimization failed to converge. Check mle_retvals.",
                         ConvergenceWarning)
                p[xi] = res.x
                opt = res
            else:
                from warnings import warn
                from statsmodels.tools.sm_exceptions import EstimationWarning
                message = "Model has no free parameters to estimate. Set " \
                          "optimized=False to suppress this warning"
                warn(message, EstimationWarning)

            [alpha, beta, gamma, l0, b0, phi] = p[:6]
            s0 = p[6:]

        hwfit = self._predict(h=0,
                              smoothing_level=alpha,
                              smoothing_slope=beta,
                              smoothing_seasonal=gamma,
                              damping_slope=phi,
                              initial_level=l0,
                              initial_slope=b0,
                              initial_seasons=s0,
                              use_boxcox=use_boxcox,
                              remove_bias=remove_bias,
                              is_optimized=xi)
        hwfit._results.mle_retvals = opt
        return hwfit
コード例 #37
0
    def brute_search(
        self,
        weights,
        metaParamNames=list(),
        objective=lambda x: x.loss,
        negative_objective=False,
        stochastic_objective=True,
        stochastic_samples=25,
        stochastic_precision=0.01,
    ):
        """
        Uses BFS to find optimal simulation hyperparameters
        Note:
            You want runs passed in the solver to have __no randomness__
            Solving a stochastic simulation will cause problems with solver

        Arguments
        ---------
        Weights: list<floats>
                 weights to be optimized on
        metaParamName: list<string>
                 list of names of arguments to be optimized on
                 the index respects the weights argument above
                 the strings should be the same as in a simulation run input
                 Weights and metaparamNames together would form the
                 algoParams dict in a normal simulation
        objective: function(Market) -> float
            objective function
            by default number of matches
            can be changed to loss, for example, by
                "objective=lambda x: x.loss"
        returns
        -------
        np.array of weights where function is minimized
                if negative_objective=True, where maximized
        """
        def this_run(w):
            # note - sign here
            # TODO fix negative sign
            sign = 1 if negative_objective else -1
            if stochastic_objective:
                result = 0
                # If objective stochastic, make montecarlo draws & average
                for i in range(stochastic_samples):
                    result += sign * \
                            self.single_run(w,
                                            metaParamNames=metaParamNames,
                                            objective=objective)
                # Average montecarlo draws
                result = result / stochastic_samples
                # Tune precision for convergence
                result = int(
                    result / stochastic_precision) * stochastic_precision
                return result
            else:
                return sign * self.single_run(
                    w, metaParamNames=metaParamNames, objective=objective)

        # res = []
        # for i in range(10):
        #    res.append(this_run(weights))
        res = optimize.brute(this_run,
                             weights,
                             full_output=True,
                             disp=True,
                             finish=optimize.fmin)
        return res[0]
コード例 #38
0
def findRotMaxRect(data_in,
                   flag_opt=False,
                   flag_parallel=False,
                   nbre_angle=10,
                   flag_out=None,
                   flag_enlarge_img=False,
                   limit_image_size=300):
    '''
    flag_opt     : True only nbre_angle are tested between 90 and 180 
                        and a opt descent algo is run on the best fit
                   False 100 angle are tested from 90 to 180.
    flag_parallel: only valid when flag_opt=False. the 100 angle are run on multithreading
    flag_out     : angle and rectangle of the rotated image are output together with the rectangle of the original image
    flag_enlarge_img : the image used in the function is double of the size of the original to ensure all feature stay in when rotated
    limit_image_size : control the size numbre of pixel of the image use in the function. 
                       this speeds up the code but can give approximated results if the shape is not simple
    '''

    #time_s = datetime.datetime.now()

    #make the image square
    #----------------
    nx_in, ny_in = data_in.shape
    if nx_in != ny_in:
        n = max([nx_in, ny_in])
        data_square = np.ones([n, n])
        xshift = (n - nx_in) / 2
        yshift = (n - ny_in) / 2
        if yshift == 0:
            data_square[xshift:(xshift + nx_in), :] = data_in[:, :]
        else:
            data_square[:, yshift:(yshift + ny_in)] = data_in[:, :]
    else:
        xshift = 0
        yshift = 0
        data_square = data_in

    #apply scale factor if image bigger than limit_image_size
    #----------------
    if data_square.shape[0] > limit_image_size:
        data_small = cv2.resize(data_square,
                                (limit_image_size, limit_image_size),
                                interpolation=0)
        scale_factor = 1. * data_square.shape[0] / data_small.shape[0]
    else:
        data_small = data_square
        scale_factor = 1

    # set the input data with an odd number of point in each dimension to make rotation easier
    #----------------
    nx, ny = data_small.shape
    nx_extra = -nx
    ny_extra = -ny
    if nx % 2 == 0:
        nx += 1
        nx_extra = 1
    if ny % 2 == 0:
        ny += 1
        ny_extra = 1
    data_odd = np.ones([
        data_small.shape[0] + max([0, nx_extra]),
        data_small.shape[1] + max([0, ny_extra])
    ])
    data_odd[:-nx_extra, :-ny_extra] = data_small
    nx, ny = data_odd.shape

    nx_odd, ny_odd = data_odd.shape

    if flag_enlarge_img:
        data = np.zeros([2 * data_odd.shape[0] + 1, 2 * data_odd.shape[1] + 1
                         ]) + 1
        nx, ny = data.shape
        data[nx / 2 - nx_odd / 2:nx / 2 + nx_odd / 2,
             ny / 2 - ny_odd / 2:ny / 2 + ny_odd / 2] = data_odd
    else:
        data = np.copy(data_odd)
        nx, ny = data.shape

    #print (datetime.datetime.now()-time_s).total_seconds()

    if flag_opt:
        myranges_brute = ([
            (90., 180.),
        ])
        coeff0 = np.array([
            0.,
        ])
        coeff1 = optimize.brute(residual,
                                myranges_brute,
                                args=(data, ),
                                Ns=nbre_angle,
                                finish=None)
        popt = optimize.fmin(residual,
                             coeff1,
                             args=(data, ),
                             xtol=5,
                             ftol=1.e-5,
                             disp=False)
        angle_selected = popt[0]

        #rotation_angle = np.linspace(0,360,100+1)[:-1]
        #mm = [residual(aa,data) for aa in rotation_angle]
        #plt.plot(rotation_angle,mm)
        #plt.show()
        #pdb.set_trace()

    else:
        rotation_angle = np.linspace(90, 180, 100 + 1)[:-1]
        args_here = []
        for angle in rotation_angle:
            args_here.append([angle, data])

        if flag_parallel:

            # set up a pool to run the parallel processing
            cpus = multiprocessing.cpu_count()
            pool = multiprocessing.Pool(processes=cpus)

            # then the map method of pool actually does the parallelisation

            results = pool.map(residual_star, args_here)

            pool.close()
            pool.join()

        else:
            results = []
            for arg in args_here:
                results.append(residual_star(arg))

        argmin = np.array(results).argmin()
        angle_selected = args_here[argmin][0]
    rectangle, M_rect_max, RotData = get_rectangle_coord(angle_selected,
                                                         data,
                                                         flag_out=True)
    #rectangle, M_rect_max  = get_rectangle_coord(angle_selected,data)

    #print (datetime.datetime.now()-time_s).total_seconds()

    #invert rectangle
    M_invert = cv2.invertAffineTransform(M_rect_max)
    rect_coord = [
        rectangle[:2], [rectangle[0], rectangle[3]], rectangle[2:],
        [rectangle[2], rectangle[1]]
    ]

    #ax = plt.subplot(111)
    #ax.imshow(RotData.T,origin='lower',interpolation='nearest')
    #patch = patches.Polygon(rect_coord, edgecolor='k', facecolor='None', linewidth=2)
    #ax.add_patch(patch)
    #plt.show()

    rect_coord_ori = []
    for coord in rect_coord:
        rect_coord_ori.append(
            np.dot(M_invert, [coord[0], (ny - 1) - coord[1], 1]))

    #transform to numpy coord of input image
    coord_out = []
    for coord in rect_coord_ori:
        coord_out.append(    [ scale_factor*round(       coord[0]-(nx/2-nx_odd/2),0)-xshift,\
                               scale_factor*round((ny-1)-coord[1]-(ny/2-ny_odd/2),0)-yshift])

    coord_out_rot = []
    coord_out_rot_h = []
    for coord in rect_coord:
        coord_out_rot.append( [ scale_factor*round(       coord[0]-(nx/2-nx_odd/2),0)-xshift, \
                                scale_factor*round(       coord[1]-(ny/2-ny_odd/2),0)-yshift ])
        coord_out_rot_h.append( [ scale_factor*round(       coord[0]-(nx/2-nx_odd/2),0), \
                                  scale_factor*round(       coord[1]-(ny/2-ny_odd/2),0) ])

    #M = cv2.getRotationMatrix2D( ( (data_square.shape[0]-1)/2, (data_square.shape[1]-1)/2 ), angle_selected,1)
    #RotData = cv2.warpAffine(data_square,M,data_square.shape,flags=cv2.INTER_NEAREST,borderValue=1)
    #ax = plt.subplot(121)
    #ax.imshow(data_square.T,origin='lower',interpolation='nearest')
    #ax = plt.subplot(122)
    #ax.imshow(RotData.T,origin='lower',interpolation='nearest')
    #patch = patches.Polygon(coord_out_rot_h, edgecolor='k', facecolor='None', linewidth=2)
    #ax.add_patch(patch)
    #plt.show()

    #coord for data_in
    #----------------
    #print scale_factor, xshift, yshift
    #coord_out2 = []
    #for coord in coord_out:
    #    coord_out2.append([int(np.round(scale_factor*coord[0]-xshift,0)),int(np.round(scale_factor*coord[1]-yshift,0))])

    #print (datetime.datetime.now()-time_s).total_seconds()

    if flag_out is None:
        return coord_out
    elif flag_out == 'rotation':
        return coord_out, angle_selected, coord_out_rot
    else:
        print 'bad def in findRotMaxRect input. stop'
        pdb.set_trace()
コード例 #39
0
# Optimize
# -------------------------------------------------------------------------

# some parameteres
Nt = Time.size
tmax = np.max(Time)
sigma = 2 * np.log(Nt) / tmax
Lap_deltaG = laplace(delta_groundload, sigma, Time)

# Boundaries
Rb = (0.07, 0.1)
lmS = (2, 4)
Tungest = (10, 15)

bounds = [Rb, Tungest, lmS]
initialGuess = [0.09, 10, 3]

# Brute
resbrute = optimize.brute(optimizer,
                          bounds,
                          full_output=False,
                          finish=optimize.fmin)
print(resbrute)
'''
# basinhopping
minimizer_kwargs = {"method":"L-BFGS-B", "jac":False, "bounds": bounds}
results = dict()
results= optimize.basinhopping(optimizer, initialGuess,  minimizer_kwargs=minimizer_kwargs, niter = 10, niter_success = 2)
print(results)
'''
コード例 #40
0
def Fit(x,y,Options):
    """
    General fiting function.

    Fits to a WLC model using the given parameters. If Extensible is set to 
    true, then fits an extensible model, subject to running nIters times, 
    or until the relative error between fits is less than rtol, whichever
    is first

    Args:
       x: 1D array of size N, independent
       y: what we want to fit to
       Options: FitInfo Object, giving the options for the fit
    Returns: 
       FitInfo, with updated parmaeters and standard deviations associated
       with the fit.
    """
    func = Options.Model
    # scale everything to avoid convergence problems
    xNormalization,yNormalization = Options.FitOptions.\
                                    GetNormalizationCoeffs(x,y)
    xScaled = x.copy()/xNormalization
    yScaled = y.copy()/yNormalization
    # get and scale the actual parameters (note this also scales the bounds!)
    Params = Options.ParamVals
    Params.NormalizeParams(xNormalization,yNormalization)
    # varyDict record our initial guesses; what does the user want use to fit?
    varyDict = Options.GetVaryingParamDict()
    fixed = Options.GetFixedParamDict()
    # get the bounds, convert to CurveFit's conventions
    boundsRaw = Options.DictToValues(Options.GetVaryingBoundsDict())
    # curve_fit wans a list of [ [lower1,lower2,...],[upper1,upper2,...]]
    boundsCurvefitRaw = [BoundsObj.ToCurveFitConventions(*b) for b in boundsRaw]
    boundsCurvefitLower = [b[0] for b in boundsRaw]
    boundsCurvefitUpper = [b[1] for b in boundsRaw]
    boundsCurvefit = boundsCurvefitLower,boundsCurvefitUpper
    # figure out what is fixed and varying
    fixedNames = fixed.keys()
    varyNames = varyDict.keys()
    varyGuesses = varyDict.values()
    # force all parameters to be positive
    # number of evaluations should depend on the number of things we are fitting
    nEval = 1000*len(varyNames)
    mFittingFunc = GetFunctionCall(func,varyNames,fixed)
    # set up things for basin / brute force initalization
    toMin = GetMinimizingFunction(xScaled,yScaled,mFittingFunc)
    boundsBasin = [BoundsObj.ToMinimizeConventions(*b) for b in boundsRaw]
    initObj = Options.Initialization
    if (initObj.Type == Initialization.HOP):
        # Use the basin hopping mode
        obj = FitUtil.BasinHop(toMin,varyGuesses,boundsBasin,*initObj.Args,
                               **initObj.ParamDict)
        varyGuesses = obj.x
    elif (initObj.Type == Initialization.BRUTE):
        # check and make sure the bounds are infinity
        flatBounds = list(np.array(boundsBasin).flatten())
        assert (None not in flatBounds) and (np.inf not in flatBounds) , \
            "Brute force (grid) initialization method recquires closed bounds"
        # use the brute force method
        x0,fval,grid,jout= brute(toMin,ranges=boundsBasin,disp=False,
                                 full_output=True,
                                 *initObj.Args,**initObj.ParamDict)
        Options.Initialization.SetInitializationInfo(x0=x0,
                                                     fval=fval,
                                                     grid=grid,
                                                     jout=jout)
        varyGuesses = x0
    # now, set up a slightly better-quality fit, based on the local minima
    # that the basin-hopping function
    jacFunc = '3-point'
    fitOpt = dict(gtol=1e-15,
                  xtol=1e-15,
                  ftol=1e-15,
                  method='trf', # trf support bounds, which is good!
                  jac=jacFunc,
                  # XXX kind of a kludge...
                  bounds=boundsCurvefit,
                  max_nfev=nEval,
                  verbose=0)
    InfiniteFlag = -max(yScaled)
    FinalFitFunc = lambda *args,**kwargs: SafeCall(mFittingFunc,InfiniteFlag,
                                                   *args,**kwargs)
    # note: we use p0 as the initial guess for the parameter values
    params,paramsStd,predicted = FitUtil.GenFit(xScaled,yScaled,
                                                FinalFitFunc,p0=varyGuesses,
                                                **fitOpt)
    # all done!
    # make a copy of the information object; we will return a new one
    finalInfo = copy.deepcopy(Options)
    # update the final parameter values
    finalVals = GetFullDictionary(varyNames,fixed,*params)
    # the fixed parameters have no stdev, by the fitting
    fixedStdDict = dict((name,None) for name in fixedNames)
    finalStdevs = GetFullDictionary(varyNames,fixedStdDict,
                                    *paramsStd)
    # set the values, their standard deviations, then denomalize everything
    finalInfo.ParamVals.SetParamValues(**finalVals)
    finalInfo.ParamVals.SetParamStdevs(**finalStdevs)
    finalInfo.ParamVals.DenormalizeParams(xNormalization,
                                          yNormalization)
    finalInfo.FitOptions.SetNormCoeffs(xNormalization,
                                       yNormalization)
    # update the actual values and parameters; update the prediction scale
    finalPrediction = finalInfo.FunctionToPredict(xScaled,**finalVals)*\
                      yNormalization
    return FitReturnInfo(finalInfo,finalPrediction)
コード例 #41
0
def identify_clutch_window(
        times, accelerations, gear_shifts, engine_speeds_out,
        engine_speeds_out_hot, cold_start_speeds_delta,
        max_clutch_window_width):
    """
    Identifies clutching time window [s].

    :param times:
        Time vector [s].
    :type times: numpy.array

    :param accelerations:
        Acceleration vector [m/s2].
    :type accelerations: numpy.array

    :param gear_shifts:
        When there is a gear shifting [-].
    :type gear_shifts: numpy.array

    :param engine_speeds_out:
        Engine speed [RPM].
    :type engine_speeds_out: numpy.array

    :param engine_speeds_out_hot:
        Engine speed at hot condition [RPM].
    :type engine_speeds_out_hot: numpy.array

    :param cold_start_speeds_delta:
        Engine speed delta due to the cold start [RPM].
    :type cold_start_speeds_delta: numpy.array

    :param max_clutch_window_width:
        Maximum clutch window width [s].
    :type max_clutch_window_width: float

    :return:
        Clutching time window [s].
    :rtype: tuple
    """

    model = RANSACRegressor(
        base_estimator=LinearRegression(fit_intercept=False),
        random_state=0
    )

    phs = partial(calculate_clutch_phases, times, gear_shifts)

    delta = engine_speeds_out - engine_speeds_out_hot - cold_start_speeds_delta
    threshold = np.std(delta) * 2

    def error(v):
        clutch_phases = phs(v) & ((-threshold > delta) | (delta > threshold))
        y = delta[clutch_phases]
        # noinspection PyBroadException
        try:
            X = np.array([accelerations[clutch_phases]]).T
            return -model.fit(X, y).score(X, y)
        except:
            return np.inf

    dt = max_clutch_window_width / 2
    Ns = int(dt / max(times[1] - times[0], 0.5)) + 1
    return tuple(brute(error, ((0, -dt), (0, dt)), Ns=Ns, finish=None))
コード例 #42
0
def opt_ts(rslcs, method="weights", verbose=True):
	"""
	I optimize the timeshifts between the rslcs to minimize the wtv between them.
	Note that even if the wtvdiff is only about two curves, we cannot split this into optimizing
	AB AC AD in a row, as this would never calculate BC, and BC is not contained into AB + AC.
	
	:param rslcs: a list of rslc objects
	
	"""
	rslcsc = [rs.copy() for rs in rslcs] # We'll work on copies.
	
	# No need for reverse combis, as wtvdiff is symmetric.
	#couplelist = [couple for couple in [[rs1, rs2] for rs1 in rslcsc for rs2 in rslcsc] if couple[0] != couple[1]]
	
	indexes = np.arange(len(rslcsc))
	indlist = [c for c in [[i1, i2] for i1 in indexes for i2 in indexes] if c[1] > c[0]]
	couplelist = [[rslcsc[i1], rslcsc[i2]] for (i1, i2) in indlist]
	# So the elements in couplelist are the SAME as those from rslcsc
	
	inishifts = np.array([rs.timeshift for rs in rslcsc[1:]]) # We won't move the first curve.

	def errorfct(timeshifts):
		if timeshifts.shape == ():
			timeshifts = np.array([timeshifts])
		for (rs, timeshift) in zip(rslcsc[1:], timeshifts):
			rs.timeshift = timeshift
		
		tvs = np.array([wtvdiff(rs1, rs2, method=method) for (rs1, rs2) in couplelist])
		ret = np.sum(tvs)
		#if verbose:
		#	print timeshifts, ret
		return ret
	
	if verbose:
		print "Starting time shift optimization ..."
		print "Initial pars (shifts, not delays) : ", inishifts
	
	# Some brute force exploration, like for the dispersion techniques ...
	
	res = spopt.brute(errorfct, bruteranges(5,3,inishifts), full_output = 0, finish=None)
	# This would finish by default with fmin ... we do not want that.
	if verbose:
		print "Brute 1 shifts : %s" % res
		print "Brute 1 errorfct : %f" % errorfct(res)
	
	res = spopt.brute(errorfct, bruteranges(2.5,3,res), full_output = 0, finish=None)
	if verbose:
		print "Brute 2 shifts : %s" % res
		print "Brute 2 errorfct : %f" % errorfct(res)
	
	res = spopt.brute(errorfct, bruteranges(1.25,3,res), full_output = 0, finish=None)
	if verbose:
		print "Brute 3 shifts : %s" % res
		print "Brute 3 errorfct : %f" % errorfct(res)
	
	res = spopt.brute(errorfct, bruteranges(0.5,3,res), full_output = 0, finish=None)
	if verbose:
		print "Brute 4 shifts : %s" % res
		print "Brute 4 errorfct : %f" % errorfct(res)
	
	
	minout = spopt.fmin_powell(errorfct, res, xtol=0.001, full_output=1, disp=verbose)
	#minout = spopt.fmin_bfgs(errorfct, inishifts, maxiter=None, full_output=1, disp=verbose, retall=0, callback=None)
		
	popt = minout[0]
	minwtv = errorfct(popt) # This sets popt, and the optimal ML and source.
	
	if verbose:
		print "Final shifts : %s" % popt
		print "Final errorfct : %f" % minwtv
	
	# We set the timeshifts of the originals :
	for (origrs, rs) in zip(rslcs[1:], rslcsc[1:]):
		origrs.timeshift = rs.timeshift
	
	return minwtv
コード例 #43
0
    def doTrain(self, sess1, sess2, maxIters=15):
        tfModelDirPath = os.path.join(self.phrasesDir, self.flags.model)
        print "Model path is ", tfModelDirPath
        if not os.path.isdir(tfModelDirPath): os.mkdir(tfModelDirPath)
        tfModelPath = os.path.join(tfModelDirPath, 'compmodel.tf')
        if os.path.exists(tfModelPath):
            logging.info("Warmstart from %s", tfModelPath)
            print "Welcome to training"
        sess1.run(self.initopm, feed_dict={self.place: self.embeds})
        sess2.run(self.initope, feed_dict={self.place: self.embeds})
        print "Training Started"
        prev_labl = tf.random_uniform([self.numDatInst, 1])

        for xiter in xrange(maxIters):

            for miter in xrange(2):
                _, dlabel = sess1.run(
                    [self.trainopm, tf.transpose(self.labelm)],
                    feed_dict={
                        self.w1m: self.allw1,
                        self.w12m: self.allw12,
                        self.labelm: sess2.run(prev_labl)
                    })

            self.errs = sess2.run(self.errore,
                                  feed_dict={
                                      self.w1e: self.allw1,
                                      self.w12e: self.allw12,
                                      self.M1e: sess1.run(self.M1m),
                                      self.M2e: sess1.run(self.M2m)
                                  })

            print 'iter=', xiter

            allerrs = sess2.run(tf.squeeze(tf.transpose(
                self.errs)))  #error for all data instances, in list format
            v, ind = tf.nn.top_k(
                tf.transpose(self.errs), self.numDatInst,
                sorted=True)  #sorted error, ind has sorted indexs of error
            idex_nc = sess2.run(tf.squeeze(ind))  #indexs in list format

            for i in idex_nc:
                print self.allylit[i], ' ', self.vocabArray[self.allw1[
                    i]].word, ' ', self.vocabArray[self.allw12[i]].word
            emin = tf.reduce_min(allerrs)  #for scaling error
            emax = tf.reduce_max(allerrs)
            self.errs = tf.expand_dims((2. * allerrs - tf.ones_like(allerrs) *
                                        (emin + emax)) * 40.0 / (emax - emin),
                                       1)
            self.errsnumpy = sess2.run(self.errs)
            ranges = [
                -100., 100., .1
            ]  #range to perform brute force search of root of shifted sigmoid
            k = optimize.brute(self.f, (ranges, ),
                               finish=None)  #actual root finding
            #k = bisect(self.f,100.0,5000.0)
            print "X is "
            print k
            adjerre = tf.sub(self.errs,
                             tf.cast(tf.constant(k * 1.0), tf.float32))
            prev_labl = tf.map_fn(
                self.sigmoid_01,
                adjerre)  #final label in this step, sigmoid of adjusted errors
def optimize():
    """
    This function optimizes the DC offsets of the quantum controller to minimize the leakage at 7GHz
    :return: Prints the ideal values of the DC offsets and correction matrix variables
    """

    # Connects to the quantum machine through the network
    qmManager = QuantumMachinesManager()

    # Initial guess for the best offsets
    # Initial guess for correction variables corvars = [0, 1]
    offsets = [-0.02505083, -0.03288917]
    corvars = [0.50478125, 0.74776809]
    DC_I = offsets[0]
    DC_Q = offsets[1]
    correction = calc_cmat(corvars[0], corvars[1])

    # Searching parameters, range of parameters for brute force, num of step in the brute force and max iteration fmin
    # OFFSETS
    nstepbruteoffset  = 10                          # Num of steps in the initial brute force stage(N^2)
    rangebruteoffset  = [(-0.1, 0.1), (-0.1, 0.1)]  # Range to look in the inital brute force stage
    maxiterfminoffset = 100                         # maximum number of iteration in the fmin stage

    # CORRECTION VARIABLES
    nstepbrutecorvars = 20                         # Num of steps in the initial brute force stage(N^2)
    rangebrutecorvars = [(0.4, 0.6), (0.6, 1)]  # Range to look in the inital brute force stage
    maxiterfmincorvars = 100                       # maximum number of iteration in the fmin stage

    # Try to initialize the spectrum analyzer
    try:
        # Initializes the instrument
        inst = setinstrument(7e9, 25e5)

    except Exception as e:
        print("An error has occurred trying to initialize the instrument.\nThe Error:\n", e)
        exit()

    try:
        # The program that will run on the quantum machine
        with program() as prog:
            with infinite_loop_():
                play('pulse1', 'qe1')

        # The configuration of the quantum program, this is a dictionary, see documentation
        config = setconf(DC_I, DC_Q, correction)

        # Open quantum machine from configuration and force execute it
        qm1 = qmManager.open_qm(config)
        job = qm1.execute(prog, forceExecution=True)

        offsets = brute(power, rangebruteoffset, args=(corvars, qm1, inst, 'offset'), Ns=nstepbruteoffset, finish=None)

        print('\n    Initial guess for the offsets [DC_I, DC_Q]: ', offsets, "\n\n")
        # Using fmin function to find the best offsets to minimize the leakage
        xopt = fmin(power, offsets, (corvars, qm1, inst, 'offset'), maxiter=maxiterfminoffset)

    # If there's an error trying to use the "power" function
    except Exception as e:
        print("An error has occurred in the 'power' function. \nThe Error:\n", e)
        inst.close()  # Closes the spectrum analyzer
        exit()

    # Redefine the offsets to the values we found
    offsets = xopt
    print("\nOptimal offsets [DC_I, DC_Q]: " + str(offsets) + "\n\n")

    # Define the spectrum analyzer frequency to the left spike frequency
    inst.frequency(7e9 - 25e6)
    try:
        corvars = brute(powerdiffargs, rangebrutecorvars, args=(offsets, qm1, inst), Ns=nstepbrutecorvars,
                        finish=None)
        correction = calc_cmat(corvars[0], corvars[1])
        print(
            "\n    Initial guess from brute force [th, k]: " + str(corvars) + "\n\n")

        xopt = fmin(powerdiffargs, corvars, (offsets, qm1, inst),
                    maxiter=maxiterfmincorvars)

    except Exception as e:
        print("An error has occurred trying to find optimal correction matrix.\nThe Error:\n", e)
        exit()

    corvars = xopt
    print("\nOptimal correction variables [theta, k]: " + str(corvars))

    inst.close()  # Closes the instrument

    print("""
    ---------------------------------------------------------------------------
    Final reslults:
        * Offsets [DC_I, DC_Q]: {}
        * Correction variables: {}
    ---------------------------------------------------------------------------
    """.format(offsets, corvars))
コード例 #45
0
fig.savefig('ch6-examaple-two-dim.pdf')

# ## Brute force search for initial point

# In[42]:


def f(X):
    x, y = X
    return (4 * np.sin(np.pi * x) +
            6 * np.sin(np.pi * y)) + (x - 1)**2 + (y - 1)**2


# In[43]:

x_start = optimize.brute(f, (slice(-3, 5, 0.5), slice(-3, 5, 0.5)),
                         finish=None)

# In[44]:

x_start

# In[45]:

f(x_start)

# In[46]:

x_opt = optimize.fmin_bfgs(f, x_start)

# In[47]:
コード例 #46
0
def costfun_method(distances_to_anchors, anchor_positions):
    anchor_positions = np.array(anchor_positions)
    tag_pos = lsq_method(distances_to_anchors, anchor_positions)
    anc_z_ls_mean = np.mean(np.array([i[2] for i in anchor_positions]))
    new_z = (np.array([i[2] for i in anchor_positions]) -
             anc_z_ls_mean).reshape(4, 1)
    new_anc_pos = np.concatenate(
        (np.delete(anchor_positions, 2, axis=1), new_z), axis=1)
    new_disto_anc = np.sqrt(
        abs(distances_to_anchors[:]**2 - (tag_pos[0] - new_anc_pos[:, 0])**2 -
            (tag_pos[1] - new_anc_pos[:, 1])**2))
    new_z = new_z.reshape(4, )

    a = (np.sum(new_disto_anc[:]**2) -
         3 * np.sum(new_z[:]**2)) / len(anchor_positions)
    b = (np.sum((new_disto_anc[:]**2) *
                (new_z[:])) - np.sum(new_z[:]**3)) / len(anchor_positions)
    # print('a,b: ',a,b)
    cost = lambda z: np.sum(((z - new_z[:])**4 - 2 * (
        ((new_disto_anc[:]) *
         (z - new_z[:]))**2) + new_disto_anc[:]**4)) / len(anchor_positions)

    function = lambda z: z**3 - a * z + b
    derivative = lambda z: 3 * z**2 - a

    def newton(function,
               derivative,
               x0,
               tolerance,
               number_of_max_iterations=100):
        x1 = 0
        if (abs(x0 - x1) <= tolerance and abs((x0 - x1) / x0) <= tolerance):
            return x0
        k = 1
        while (k <= number_of_max_iterations):
            x1 = x0 - (function(x0) / derivative(x0))
            if (abs(x0 - x1) <= tolerance and abs(
                (x0 - x1) / x0) <= tolerance):
                return x1
            x0 = x1
            k = k + 1
            if (k > number_of_max_iterations):
                print("ERROR: Exceeded max number of iterations", a, b)
        return x1

    newton_z_from_postive = newton(function, derivative, 100, 0.01)
    # newton_z_from_negative = newton(function, derivative, -100, 0.01)

    # def find_newton_global(newton_z_from_postive, newton_z_from_negative):
    #     if cost(newton_z_from_postive) < cost(newton_z_from_negative):
    #         return newton_z_from_postive
    #     elif cost(newton_z_from_negative) < cost(newton_z_from_postive):
    #         return newton_z_from_negative

    # newton_z = find_newton_global(newton_z_from_postive, newton_z_from_negative)
    # print('from_postive, from_negative: ', newton_z_from_postive, newton_z_from_negative)
    # print('The approximate value of Height is: ' +str(newton_z))

    newton_z = newton_z_from_postive

    ranges = (slice(0, 100, 1), )
    resbrute = optimize.brute(cost,
                              ranges,
                              full_output=True,
                              finish=optimize.fmin)
    # print('resbrute: ', resbrute[0] + anc_z_ls_mean)

    new_tag_min = np.concatenate((np.delete(np.array(tag_pos),
                                            2), [newton_z] + anc_z_ls_mean))
    return new_tag_min, newton_z + anc_z_ls_mean, a, b, float(
        resbrute[0]) + anc_z_ls_mean, anc_z_ls_mean
コード例 #47
0
#用scipy.optimize中的算数来求最小值
def f(x):
    return x**2 + 10 * np.sin(x)


x = np.arange(-10, 10, 0.1)
plt.plot(x, f(x))
plt.show()

print(optimize.fmin_bfgs(f, 0))
# optimize.fmin_bfgs(f,3,disp=0)

#不加预判的情况下求全局的最小值
grid = (-10, 10, 0.1)
xmin_global = optimize.brute(f, (grid, ))
print(xmin_global)

#求局部最小值
xmin_local = optimize.fminbound(f, 0, 10)
print(xmin_local)

#print(dir(norm))#看下norm对应都有哪些函数属性

#非线性方程组求解,optimize中的fsolve函数
# fsolve(func,x0)

# f1(u1,u2,u3)=0
# f2(u1,u2,u3)=0
# f3(u1,u2,u3)=0
#
コード例 #48
0
def mode(y, bw=0.1):
    k = gaussian_kde(y.stack().dropna(), bw)
    m = brute(lambda z: -k(z), ((-20, 10), ))
    return m[0]
コード例 #49
0
    #
    # options['Maturity'] = datetime.datetime.fromtimestamp(floor(options['Maturity'] / 1e9))
    mats = sorted(set(options['Maturity']))
    options = options.set_index('Strike')
    for i, mat in enumerate(mats):
        options[options['Maturity'] == mat][['Call', 'Model']].\
            plot(style=['b-', 'ro'], title='%s' % str(mat)[:10])
        plt.ylabel('option value')
        plt.savefig('./M76_calibration_3_%s.pdf' % i)


if __name__ == '__main__':
    #
    # Calibration
    #
    i = 0  # counter initialization
    min_RMSE = 100  # minimal RMSE initialization
    p0 = sop.brute(M76_error_function_FFT,
                   ((0.075, 0.201, 0.025), (0.10, 0.401, 0.1),
                    (-0.5, 0.01, 0.1), (0.10, 0.301, 0.1)),
                   finish=None)

    # p0 = [0.15, 0.2, -0.3, 0.2]
    opt = sop.fmin(M76_error_function_FFT,
                   p0,
                   maxiter=500,
                   maxfun=750,
                   xtol=0.000001,
                   ftol=0.000001)
    generate_plot(opt, options)
コード例 #50
0
ファイル: phasecorr.py プロジェクト: slamajakub/visnav-py
    def findstate(self, imgfile, outfile, min_options={}, **kwargs):
        self.debug_filebase = outfile        
        min_options = dict(min_options)
        
        if self._image_file != imgfile or self._image is None:
            self._image = self.load_target_image(imgfile)
            self._image_file = imgfile

        self._target_image = np.float32(self._image)
        
        # consider if useful at all
        if kwargs.pop('centroid_init', False):
            try:
                CentroidAlgo.update_sc_pos(
                        self.system_model, self._image)
                ok = True
            except PositioningException as e:
                if not e.args or e.args[0] != 'No asteroid found':
                    print(str(e))
                print('|', end='', flush=True)
                ok = False
            if not ok:
                return False
        
        self.extra_values = []
        
        #hwsize = kwargs.get('hwin_size', 4)
        #tmp = cv2.createHanningWindow((hwsize, hwsize), cv2.CV_32F)
        #sd = int((self._cam.height - hwsize)/2)
        #self._hannw = cv2.copyMakeBorder(tmp,
        #        sd, sd, sd, sd, cv2.BORDER_CONSTANT, 0)
        
        self._hannw = None
        self.start_time = datetime.now()
        method = min_options.pop('method', False)
        init_vals = np.array(list(p.nvalue for n, p in
                                  self.system_model.get_params()))
        
        if method=='simplex':
            options={'maxiter':100, 'xtol':2e-2, 'ftol':5e-2}
            options.update(min_options)
            res = optimize.minimize(lambda x: self.optfun(*x), init_vals,
                                    method='Nelder-Mead', options=options)
            x = res.x
        elif method=='powell':
            options = {'maxiter':100, 'xtol':2e-2, 'ftol':5e-2,
                       'direc':np.identity(len(init_vals))*0.01}
            options.update(min_options)
            res = optimize.minimize(lambda x: self.optfun(*x), init_vals,
                                    method='Powell', options=options)
            x = res.x
        elif method=='cobyla':
            options = {'maxiter':100, 'rhobeg': 0.1}
            options.update(min_options)
            res = optimize.minimize(lambda x: self.optfun(*x), init_vals,
                                    method='COBYLA', options=options)
            x = res.x
        elif method=='cg':
            options = {'maxiter':1000, 'eps':0.01, 'gtol':1e-4}
            options.update(min_options)
            res = optimize.minimize(lambda x: self.optfun(*x), init_vals,
                                    method='CG', options=options)
            x = res.x
        elif method=='bfgs':
            options = {'maxiter':1000, 'eps':0.01, 'gtol':1e-4}
            options.update(min_options)
            res = optimize.minimize(lambda x: self.optfun(*x), init_vals,
                                    method='BFGS', options=options)
            x = res.x
        elif method=='anneal':
            options = {
                'niter':350, 'T':1.25, 'stepsize':0.3,
                'minimizer_kwargs':{
#                    'method':'Nelder-Mead',
#                    'options':{'maxiter':25, 'xtol':2e-2, 'ftol':1e-1}
                    'method':'COBYLA',
                    'options':{'maxiter':25, 'rhobeg': 0.1},
            }}
            options.update(min_options)
            res = optimize.basinhopping(lambda x: self.optfun(*x),
                                        init_vals, **options)
            x = res.x
        elif method=='brute':
            min_opts = dict(min_options)
            max_iter = min_opts.pop('max_iter', 1000)
            init = list((-0.5, 0.5) for n, p in self.system_model.get_params())
            options = {'Ns':math.floor(math.pow(max_iter, 1/len(init))),
                       'finish':None}
            options.update(min_opts)
            optfun = self.optfun if len(init)==1 else lambda x: self.optfun(*x)
            x = res = optimize.brute(optfun, init, **options)
            x = (x,) if len(init)==1 else x
                
        elif method=='two-step-brute':
            min_opts = dict(min_options)
            
            ## PHASE I
            ## >> 
            first_opts = dict(min_opts.pop('first'))

            im_scale = first_opts.pop('scale', self.im_def_scale)
            self.set_image_zoom_and_resolution(im_scale=im_scale)

            max_iter = first_opts.pop('max_iter', 50)
            init = list((-0.5, 0.5) for n, p in self.system_model.get_params())
            optfun = self.optfun if len(init)==1 else lambda x: self.optfun(*x)            
            options = {'Ns':math.floor(math.pow(max_iter, 1/len(init))),
                       'finish':None}
            options.update(first_opts)
            x = res = optimize.brute(optfun, init, **options)
            x = (x,) if len(init)==1 else x
            self.iter_count = -1
            self.optfun(*x) # set sc params to result values
            ## <<
            ## PHASE I
            
            QCoreApplication.processEvents()
            img = self.render()
            cv2.imwrite(self.debug_filebase+'.png', img)

            if DEBUG:
                print('Phase I: (%.3f, %.3f, %.3f)\n'%(
                    self.system_model.x_off.value,
                    self.system_model.y_off.value,
                    -self.system_model.z_off.value,
                ))
            
            ## PHASE II
            ## >>
            second_opts = dict(min_opts.pop('second'))
            margin = second_opts.pop('margin', 50)
            distance_margin = second_opts.pop('distance_margin', 0.2)
            max_width = second_opts.pop('max_width', -self.system_model.view_width)
            
            # calculate min_dist and max_dist
            min_dist = (-self.system_model.z_off.value) * (1-distance_margin)
            max_dist = (-self.system_model.z_off.value) * (1+distance_margin)

            # calculate im_xoff, im_xoff, im_width, im_height
            render_result = self.render()
            xo, yo, w, h, sc = self._get_bounds(render_result, margin, max_width)
            
            if DEBUG:
                print('min_d, max_d, xo, yo, w, h, sc: %.3f, %.3f, %.0f, %.0f, %.0f, %.0f, %.3f\n'%(
                    min_dist, max_dist, xo, yo, w, h, sc
                ))
            
            if True:
                self.debug_filebase = outfile+'r2'
                
                # limit distance range
                self.system_model.z_off.range = (-max_dist, -min_dist)
                
                # set cropping & zooming
                self.set_image_zoom_and_resolution(
                                            im_xoff=xo, im_yoff=yo,
                                            im_width=w, im_height=h, im_scale=sc)
                self._target_image = np.float32(self._image)
                img = self.render()
                cv2.imwrite(self.debug_filebase + '.png', img)

                # set optimization params
                max_iter = second_opts.pop('max_iter', 18)
                init = list((-0.5, 0.5) for n, p in self.system_model.get_params())
                optfun = self.optfun if len(init)==1 else lambda x: self.optfun(*x)
                options = {'Ns':math.floor(math.pow(max_iter, 1/len(init))),
                           'finish':None}
                options.update(second_opts)
            
                # optimize
                x = res = optimize.brute(optfun, init, **options)
                x = (x,) if len(init)==1 else x
            
        self.iter_count = -1;
        self.optfun(*x)
        outfile = imgfile[0:-4]+'r3'+'.png'
        img = self.render()
        cv2.imwrite(outfile, img)

        if method=='two-step-brute':
            self.system_model.z_off.range = (-self.system_model.max_distance, -self.system_model.min_med_distance)
            if DEBUG:
                print('Phase II: (%s, %s, %s)\n'%(
                    self.system_model.x_off.value,
                    self.system_model.y_off.value,
                    -self.system_model.z_off.value,
                ))
        
        if not BATCH_MODE or DEBUG:
            print('%s'%res)
            print('seconds: %s'%(datetime.now()-self.start_time))
            
        if self.im_def_scale != self.im_scale:
            self.set_image_zoom_and_resolution(im_scale=self.im_def_scale)
        
        return True
コード例 #51
0
ファイル: scipy_test.py プロジェクト: zbcstudy/python
from scipy import optimize
import numpy as np
import matplotlib.pyplot as plt


def f(x):
    return x ** 2 + 10 * np.sin(x)


if __name__ == '__main__':
    # 绘制目标函数图形
    plt.figure(figsize=(10, 5))
    x = np.arange(-10, 10, 0.1)
    plt.xlabel('x')
    plt.ylabel('y')
    plt.title("optimize")
    plt.plot(x, f(x), 'r-', label='$f(x)=x^2 + 10*sin(x)')
    # 图像中的最低点函数值
    a = f(-1.3)
    optimize.fmin_bfgs(f, 0)
    grid = (-10, 10, 0.1)
    print(optimize.brute(f, (grid,)))  # 全局最小值
    plt.annotate('min', xy=(-1.3, a), xytext=(3, 40), arrowprops=dict(facecolor='black', shrink=0.05))
    plt.legend()
    plt.show()

コード例 #52
0
def main():
    sess = tf.Session()
    sess.__enter__()

    snapshot = joblib.load(latent_policy_pkl)
    latent_policy = snapshot["policy"]
    ntasks = latent_policy.task_space.shape[0]
    tasks = np.eye(ntasks)
    latents = [
        latent_policy.get_latent(tasks[t])[1]["mean"] for t in range(ntasks)
    ]
    print("Latents:\n\t", "\n\t".join(map(str, latents)))

    inner_env = SimpleSequenceReacherEnv(sequence=GOAL_SEQUENCE,
                                         control_method="position_control",
                                         completion_bonus=0.,
                                         randomize_start_jpos=False,
                                         action_scale=0.04,
                                         distance_threshold=0.05)

    env = DiscreteEmbeddedPolicyEnv(inner_env,
                                    latent_policy,
                                    latents=latents,
                                    skip_steps=SKIP_STEPS,
                                    deterministic=True)

    if SEARCH_METHOD == "brute":

        def f(x):
            env.reset()
            reward = 0.
            for i in range(ITERATIONS):
                obs, r, done, info = env.step(int(x[i]))
                reward += r
            print(x, "\tr:", reward)
            return -reward  # optimizers minimize by default

        print("Brute-forcing", ntasks**ITERATIONS, "combinations...")
        ranges = (slice(0, ntasks, 1), ) * ITERATIONS
        result = brute(f, ranges, disp=True, finish=None)
    elif SEARCH_METHOD == "greedy":
        result = greedy(env, ntasks)
    elif SEARCH_METHOD == "ucs":
        result = ucs(env, ntasks)
    else:
        raise NotImplementedError

    print("Result:", result)

    markers = []
    for i, g in enumerate(GOAL_SEQUENCE):
        markers.append(
            dict(pos=g,
                 size=0.01 * np.ones(3),
                 label="Goal {}".format(i + 1),
                 rgba=np.array([1., 0.8, 0., 1.])))
    for i, g in enumerate(TASKS):
        markers.append(
            dict(pos=g,
                 size=0.01 * np.ones(3),
                 label="Task {}".format(i + 1),
                 rgba=np.array([0.5, 0.5, 0.5, 0.8])))

    print("Collecting %i rollouts..." % SAVE_N_ROLLOUTS)
    rollouts = []
    for _ in tqdm(range(SAVE_N_ROLLOUTS)):
        env.reset()
        reward = 0.
        seq_info = {
            "latents": [],
            "latent_indices": [],
            "observations": [],
            "actions": [],
            "infos": [],
            "rewards": [],
            "dones": []
        }
        for i in range(ITERATIONS):
            obs, r, done, info = env.step(int(result[i]))
            seq_info["latents"] += info["latents"]
            seq_info["latent_indices"] += info["latent_indices"]
            seq_info["observations"] += info["observations"]
            seq_info["actions"] += info["actions"]
            seq_info["infos"] += info["infos"]
            seq_info["rewards"] += info["rewards"]
            seq_info["dones"] += info["dones"]
            reward += r
        # print(result, "\tr:", reward)
        rollouts.append(copy.deepcopy(seq_info))

    pickle.dump(rollouts, open("rollout_search_sequencer.pkl", "wb"))

    while True:
        env.reset()
        reward = 0.
        for i in range(ITERATIONS):
            obs, r, done, info = env.step(int(result[i]),
                                          animate=True,
                                          markers=markers)
            reward = r
        print(result, "\tr:", reward)
コード例 #53
0
plt.legend(loc=4)
plt.savefig("/home/mxh909/Desktop/magee_resolution_images/Resolution-calculated_and_theoretical_poro.pdf",bbox_inches='tight')
plt.show()



#### Try grid search for backstripping parameters ####
from scipy.optimize import brute 
def misfit2(par, log_data=log_data): 
    # Fit theoretical model 
    theoretical_poro = par[0] * np.exp(-par[1]*(log_data['DEPTH'][2145:12100]/1000))    
    mse = np.sum(np.power(theoretical_poro - log_data['Raymer_Hunt_Gardner_porosity_sonic'][214x05:12100], 2))
    return mse 

# First slice in ranges is phi0, second is c. 
x0, fval, grid, jout = brute(func=misfit2, ranges=(slice(0.3,0.75,0.01),slice(0.25,0.65, 0.01)), 
                              Ns=20, finish=None, full_output=True)

# The extents of the grid is confusing... but I'm fairly sure this is right. 
plt.imshow(jout, extent=[0.75, 0.3, 0.25, 0.65])
plt.xlabel(r'$\phi_0$', fontsize=16)
plt.ylabel('C')
plt.title('MSE of decompaction parameters')
plt.colorbar()
plt.savefig('/home/mxh909/Desktop/magee_resolution_images/Resolution-decomp_grid_search.pdf',bbox_inches='tight')

#########################################
############### Load files ##############
#########################################
## Read in horizons exported from kingdom. 
def read_horizon_files(file):
    assert type(file) == str, "File not a string"
コード例 #54
0
def identify_clutch_window(times, accelerations, gear_shifts,
                           engine_speeds_out, engine_speeds_out_hot,
                           cold_start_speeds_delta, max_clutch_window_width,
                           velocities, gear_box_speeds_in, gears,
                           stop_velocity):
    """
    Identifies clutching time window [s].

    :param times:
        Time vector [s].
    :type times: numpy.array

    :param accelerations:
        Acceleration vector [m/s2].
    :type accelerations: numpy.array

    :param gear_shifts:
        When there is a gear shifting [-].
    :type gear_shifts: numpy.array

    :param engine_speeds_out:
        Engine speed [RPM].
    :type engine_speeds_out: numpy.array

    :param engine_speeds_out_hot:
        Engine speed at hot condition [RPM].
    :type engine_speeds_out_hot: numpy.array

    :param cold_start_speeds_delta:
        Engine speed delta due to the cold start [RPM].
    :type cold_start_speeds_delta: numpy.array

    :param max_clutch_window_width:
        Maximum clutch window width [s].
    :type max_clutch_window_width: float

    :param velocities:
        Velocity vector [km/h].
    :type velocities: numpy.array

    :param gear_box_speeds_in:
        Gear box speed vector [RPM].
    :type gear_box_speeds_in: numpy.array

    :param gears:
        Gear vector [-].
    :type gears: numpy.array

    :param stop_velocity:
        Maximum velocity to consider the vehicle stopped [km/h].
    :type stop_velocity: float

    :return:
        Clutching time window [s].
    :rtype: tuple
    """

    if not gear_shifts.any():
        return 0.0, 0.0

    delta = engine_speeds_out - engine_speeds_out_hot - cold_start_speeds_delta

    X = np.column_stack((accelerations, velocities, gear_box_speeds_in, gears))

    calculate_c_p = functools.partial(calculate_clutch_phases, times,
                                      velocities, gears, gear_shifts,
                                      stop_velocity)

    def _error(v):
        dn, up = v
        if up - dn > max_clutch_window_width:
            return np.inf
        clutch_phases = calculate_c_p(v)
        model = calibrate_clutch_prediction_model(times, clutch_phases,
                                                  accelerations, delta,
                                                  velocities,
                                                  gear_box_speeds_in, gears)
        res = np.mean(np.abs(delta - model.model(times, clutch_phases, X)))
        return np.float32(res)

    dt = max_clutch_window_width
    Ns = int(dt / max(np.min(np.diff(times)), 0.5)) + 1
    return tuple(sci_opt.brute(_error, ((-dt, 0), (dt, 0)), Ns=Ns,
                               finish=None))
コード例 #55
0
ファイル: holtwinters.py プロジェクト: kasunsp/pinalpha_mvp
    def fit(self,
            smoothing_level=None,
            smoothing_slope=None,
            smoothing_seasonal=None,
            damping_slope=None,
            optimized=True,
            use_boxcox=False,
            remove_bias=False,
            use_basinhopping=False):
        """
        fit Holt Winter's Exponential Smoothing

        Parameters
        ----------
        smoothing_level : float, optional
            The alpha value of the simple exponential smoothing, if the value is
            set then this value will be used as the value.
        smoothing_slope :  float, optional
            The beta value of the holts trend method, if the value is
            set then this value will be used as the value.
        smoothing_seasonal : float, optional
            The gamma value of the holt winters seasonal method, if the value is
            set then this value will be used as the value.
        damping_slope : float, optional
            The phi value of the damped method, if the value is
            set then this value will be used as the value.
        optimized : bool, optional
            Should the values that have not been set above be optimized 
            automatically?
        use_boxcox : {True, False, 'log', float}, optional
            Should the boxcox tranform be applied to the data first? If 'log' 
            then apply the log. If float then use lambda equal to float.
        remove_bias : bool, optional
            Should the bias be removed from the forecast values and fitted values 
            before being returned? Does this by enforcing average residuals equal 
            to zero.
        use_basinhopping : bool, optional
            Should the opptimser try harder using basinhopping to find optimal 
            values?

        Returns
        -------
        results : HoltWintersResults class
            See statsmodels.tsa.holtwinters.HoltWintersResults

        Notes
        -----
        This is a full implementation of the holt winters exponential smoothing as
        per [1]. This includes all the unstable methods as well as the stable methods.
        The implementation of the library covers the functionality of the R 
        library as much as possible whilst still being pythonic.

        References
        ----------
        [1] Hyndman, Rob J., and George Athanasopoulos. Forecasting: principles and practice. OTexts, 2014.
        """
        # Variable renames to alpha,beta, etc as this helps with following the
        # mathematical notation in general
        alpha = smoothing_level
        beta = smoothing_slope
        gamma = smoothing_seasonal
        phi = damping_slope

        data = self.endog
        damped = self.damped
        seasoning = self.seasoning
        trending = self.trending
        trend = self.trend
        seasonal = self.seasonal
        m = self.seasonal_periods
        opt = None
        phi = phi if damped else 1.0
        if use_boxcox == 'log':
            lamda = 0.0
            y = boxcox(data, lamda)
        elif isinstance(use_boxcox, float):
            lamda = use_boxcox
            y = boxcox(data, lamda)
        elif use_boxcox:
            y, lamda = boxcox(data)
        else:
            lamda = None
            y = data.squeeze()
        if np.ndim(y) != 1:
            raise NotImplementedError('Only 1 dimensional data supported')
        l = np.zeros((self.nobs, ))
        b = np.zeros((self.nobs, ))
        s = np.zeros((self.nobs + m - 1, ))
        p = np.zeros(6 + m)
        max_seen = np.finfo(np.double).max
        if seasoning:
            l0 = y[np.arange(self.nobs) % m == 0].mean()
            b0 = ((y[m:m + m] - y[:m]) / m).mean() if trending else None
            s0 = list(y[:m] / l0) if seasonal == 'mul' else list(y[:m] - l0)
        elif trending:
            l0 = y[0]
            b0 = y[1] / y[0] if trend == 'mul' else y[1] - y[0]
            s0 = []
        else:
            l0 = y[0]
            b0 = None
            s0 = []
        if optimized:
            init_alpha = alpha if alpha is not None else 0.5 / max(m, 1)
            init_beta = beta if beta is not None else 0.1 * init_alpha if trending else beta
            init_gamma = None
            init_phi = phi if phi is not None else 0.99
            # Selection of functions to optimize for approporate parameters
            func_dict = {
                ('mul', 'add'): _holt_win_add_mul_dam,
                ('mul', 'mul'): _holt_win_mul_mul_dam,
                ('mul', None): _holt_win__mul,
                ('add', 'add'): _holt_win_add_add_dam,
                ('add', 'mul'): _holt_win_mul_add_dam,
                ('add', None): _holt_win__add,
                (None, 'add'): _holt_add_dam,
                (None, 'mul'): _holt_mul_dam,
                (None, None): _holt__
            }
            if seasoning:
                init_gamma = gamma if gamma is not None else 0.05 * \
                    (1 - init_alpha)
                xi = np.array([
                    alpha is None, beta is None, gamma is None, True, trending,
                    phi is None and damped
                ] + [True] * m)
                func = func_dict[(seasonal, trend)]
            elif trending:
                xi = np.array([
                    alpha is None, beta is None, False, True, True, phi is None
                    and damped
                ] + [False] * m)
                func = func_dict[(None, trend)]
            else:
                xi = np.array(
                    [alpha is None, False, False, True, False, False] +
                    [False] * m)
                func = func_dict[(None, None)]
            p[:] = [init_alpha, init_beta, init_gamma, l0, b0, init_phi] + s0

            # txi [alpha, beta, gamma, l0, b0, phi, s0,..,s_(m-1)]
            # Have a quick look in the region for a good starting place for alpha etc.
            # using guestimates for the levels
            txi = xi & np.array([True, True, True, False, False, True] +
                                [False] * m)
            bounds = np.array([(0.0, 1.0), (0.0, 1.0), (0.0, 1.0), (0.0, None),
                               (0.0, None), (0.0, 1.0)] + [
                                   (None, None),
                               ] * m)
            res = brute(func,
                        bounds[txi],
                        (txi, p, y, l, b, s, m, self.nobs, max_seen),
                        Ns=20,
                        full_output=True,
                        finish=None)
            (p[txi], max_seen, grid, Jout) = res
            [alpha, beta, gamma, l0, b0, phi] = p[:6]
            s0 = p[6:]
            #bounds = np.array([(0.0,1.0),(0.0,1.0),(0.0,1.0),(0.0,None),(0.0,None),(0.8,1.0)] + [(None,None),]*m)
            if use_basinhopping:
                # Take a deeper look in the local minimum we are in to find the best
                # solution to parameters, maybe hop around to try escape the local
                # minimum we may be in.
                res = basinhopping(func,
                                   p[xi],
                                   minimizer_kwargs={
                                       'args': (xi, p, y, l, b, s, m,
                                                self.nobs, max_seen),
                                       'bounds':
                                       bounds[xi]
                                   },
                                   stepsize=0.01)
            else:
                # Take a deeper look in the local minimum we are in to find the best
                # solution to parameters
                res = minimize(func,
                               p[xi],
                               args=(xi, p, y, l, b, s, m, self.nobs,
                                     max_seen),
                               bounds=bounds[xi])
            p[xi] = res.x
            [alpha, beta, gamma, l0, b0, phi] = p[:6]
            s0 = p[6:]
            opt = res
        hwfit = self._predict(h=0,
                              smoothing_level=alpha,
                              smoothing_slope=beta,
                              smoothing_seasonal=gamma,
                              damping_slope=phi,
                              initial_level=l0,
                              initial_slope=b0,
                              initial_seasons=s0,
                              use_boxcox=use_boxcox,
                              lamda=lamda,
                              remove_bias=remove_bias)
        hwfit._results.mle_retvals = opt
        return hwfit
コード例 #56
0
ファイル: GSASIIindex.py プロジェクト: isaxs/pyGSAS
def findMV(peaks, controls, ssopt, Inst, dlg):
    def Val2Vec(vec, Vref, values):
        Vec = []
        i = 0
        for j, r in enumerate(Vref):
            if r:
                if values.size > 1:
                    Vec.append(max(0.0, min(1.0, values[i])))
                else:
                    Vec.append(max(0.0, min(1.0, values)))
                i += 1
            else:
                Vec.append(vec[j])
        return np.array(Vec)

    def ZSSfunc(values,
                peaks,
                dmin,
                Inst,
                SGData,
                SSGData,
                vec,
                Vref,
                maxH,
                A,
                wave,
                Z,
                dlg=None):
        Vec = Val2Vec(vec, Vref, values)
        HKL = G2pwd.getHKLMpeak(dmin, Inst, SGData, SSGData, Vec, maxH, A)
        Peaks = np.array(IndexSSPeaks(peaks, HKL)[1]).T
        Qo = 1. / Peaks[-2]**2
        Qc = G2lat.calc_rDsqZSS(Peaks[4:8], A, Vec, Z, Peaks[0], wave)
        chi = np.sum((Qo - Qc)**2)
        if dlg:
            dlg.Pulse()
        return chi

    if 'C' in Inst['Type'][0]:
        wave = G2mth.getWave(Inst)
    else:
        difC = Inst['difC'][1]
    SGData = G2spc.SpcGroup(controls[13])[1]
    SSGData = G2spc.SSpcGroup(SGData, ssopt['ssSymb'])[1]
    A = G2lat.cell2A(controls[6:12])
    Z = controls[1]
    Vref = [True if x in ssopt['ssSymb'] else False for x in ['a', 'b', 'g']]
    values = []
    ranges = []
    for v, r in zip(ssopt['ModVec'], Vref):
        if r:
            ranges += [
                slice(.02, .98, .05),
            ]
            values += [
                v,
            ]
    dmin = getDmin(peaks) - 0.005
    Peaks = np.copy(np.array(peaks).T)
    result = so.brute(ZSSfunc,
                      ranges,
                      finish=so.fmin_cg,
                      args=(peaks, dmin, Inst, SGData, SSGData,
                            ssopt['ModVec'], Vref, ssopt['maxH'], A, wave, Z,
                            dlg))
    return Val2Vec(ssopt['ModVec'], Vref, result)
コード例 #57
0
ファイル: API.py プロジェクト: miluuu/Appolit
    def policy_improvement_for_this_state(self, model, state, t, iteration=0):
        '''Policy improvement step for one particular state at time t (policy stored as dictionary)'''
        const1 = min(state.storage, model.max_discharge)
        const2 = min(model.R_max - state.storage, model.max_charge)
        const3 = min(state.energy, state.demand)

        if self.optimizer_choice == 0:
            # Set initial guess
            initial_guess = model.initial_guess_for_policy_improvement(state)
            res = self.scipy_policy_improvement_for_this_state(
                model, state, t, initial_guess, initial_guess, initial_guess,
                iteration)
            solution = res[0]
            VF_solution = res[1]
            contr_solution = res[2]
###########################
#use gridsearch
        elif self.optimizer_choice == 1:
            #TODO: Use finish function?
            params = (state.storage, state.energy, state.price, state.demand,
                      t)

            def infeasibility_indicator(x, *params):
                x0, x1, x2, x3, x4, x5 = x
                stor, ener, price, dem, t = params
                feas = model.is_feasible(self,
                                         md.Decision(x0, x1, x2, x3, x4, x5),
                                         md.State(stor, ener, price, dem))
                if feas != 0:
                    if feas[0] == 1 and abs(feas[1]) < 0.01:
                        return 0.1
                    return 1
                return 0

            #NOTE: The objective functions are not the same as for the  other solvers, as they include the constraints!
            def objective(x, *params):
                x0, x1, x2, x3, x4, x5 = x
                stor, ener, price, dem, t = params
                state = md.State(stor, ener, price, dem)
                decision = md.Decision(x0, x1, x2, x3, x4, x5)
                input = [
                    model.transition(self, decision, state), state.energy,
                    state.price
                ]
                norm = np.linalg.norm(input)
                normalized_input = input / norm
                return (infeasibility_indicator(x, *params) * 1e6 -
                        model.contribution(self, decision, state) -
                        norm * self.VF_approximation[t].predict(
                            np.array([normalized_input])))

            def contr_objective(x, *params):
                x0, x1, x2, x3, x4, x5 = x
                stor, ener, price, dem, t = params
                state = md.State(stor, ener, price, dem)
                decision = md.Decision(x0, x1, x2, x3, x4, x5)
                return (infeasibility_indicator(x, *params) * 1e6 -
                        model.contribution(self, decision, state))

            #TODO: Adjust to scaling
            def VF_objective(x, *params):
                x0, x1, x2, x3, x4, x5 = x
                stor, ener, price, dem, t = params
                state = md.State(stor, ener, price, dem)
                decision = md.Decision(x0, x1, x2, x3, x4, x5)
                input = [
                    model.transition(self, decision, state), state.energy,
                    state.price
                ]
                norm = np.linalg.norm(input)
                normalized_input = input / norm
                return (infeasibility_indicator(x, *params) * 1e6 -
                        norm * self.VF_approximation[t].predict(
                            np.array([normalized_input])))

            #TODO: different stepsize?
            stepsize = 0.1 * max(const1, const2, const3)
            print("const1:", const1, ", const2:", const2, ", const3:", const3)
            rranges = (slice(0, const3 + stepsize,
                             stepsize), slice(0, const1 + stepsize, stepsize),
                       slice(0, state.demand + stepsize,
                             stepsize), slice(0, const2 + stepsize, stepsize),
                       slice(0, const2 + stepsize,
                             stepsize), slice(0, const1 + stepsize, stepsize))
            #    pt = outputfcts.progress_timer(description = 'Progress', n_iter = 1)
            solution = brute(objective, rranges, args=params, finish=None)
            contr_solution = brute(VF_objective,
                                   rranges,
                                   args=params,
                                   finish=None)
            VF_solution = brute(contr_objective,
                                rranges,
                                args=params,
                                finish=None)

            res = self.scipy_policy_improvement_for_this_state(
                model, state, t, solution, VF_solution, contr_solution,
                iteration)
            solution = res[0]
            VF_solution = res[1]
            contr_solution = res[2]

        #    pt.update()
        #    pt.finish()
################################
#solve maximization problem with Artelys Knitro Solver (student free trial version)
        else:
            solution = policy_improvement.maximize(
                model, self, state,
                lambda decision: self.VF_approximation[t].predict(
                    np.array([[
                        model.transition(
                            self, md.convert_array_to_decision(decision), state
                        ), state.energy, state.price
                    ]])), t)  #GPy version
            VF_solution = policy_improvement_VF_only.maximize(
                model, state,
                lambda decision: self.VF_approximation[t].predict(
                    np.array([[
                        model.transition(
                            self, md.convert_array_to_decision(decision), state
                        ), state.energy, state.price
                    ]])), t)  #GPy version
            contr_solution = policy_improvement_contr_only.maximize(
                model, self, state, t)  #GPy version

        print("Iteration", iteration, " at time", t, solution)
        print("State: st, p, e, d ", state.storage, state.price, state.energy,
              state.demand)
        print(
            "Post-decision storage after", t, ":",
            model.transition(self, md.convert_array_to_decision(solution),
                             state))
        print(
            "Solution is feasible:",
            model.is_feasible(self, md.convert_array_to_decision(solution),
                              state))
        input1 = [
            model.transition(self, md.convert_array_to_decision(solution),
                             state), state.energy, state.price
        ]
        norm1 = np.linalg.norm(input1)
        normalized_input1 = input1 / norm1
        input2 = [
            model.transition(self, md.convert_array_to_decision(VF_solution),
                             state), state.energy, state.price
        ]
        norm2 = np.linalg.norm(input2)
        normalized_input2 = input2 / norm2
        print("VF_approximation at solution:",
              norm1 * self.VF_approximation[t].predict(
                  np.array([normalized_input1])))  #GPy version
        print(
            "Contribution at solution:",
            model.contribution(self, md.convert_array_to_decision(solution),
                               state))
        print(
            "Optimal contribution obtained at ", contr_solution,
            "with contribution value:",
            model.contribution(self,
                               md.convert_array_to_decision(contr_solution),
                               state))
        print(
            "Optimal VF obtained at ", VF_solution, "with VF value:", norm2 *
            self.VF_approximation[t].predict(np.array([normalized_input2])))
        return md.convert_array_to_decision(solution)
コード例 #58
0
def htsensoropt_main(args):
    """
    Main entry for mageck count module
    """
    """
    get input parameters
    """
    # seq abundance in initial concentration
    # {'seq0':float,'seq1':float, ... ,'seqk':float}
    iniab_Dicpickle=args.ini_ab
    iniab_Dic=pickle.load(open(iniab_Dicpickle,'rb'))
    #
    # read count table in Dic format
    # each node in one dic point to a DataFramge
    # {'seq0':DF,'seq1':DF, ..., 'seqk':DF, ...}
    # DF structure:
    # columns = ligand1R1 ligand1R2 ligand2R1 ligand2R2 ... (experiment condition)
    # rows = bins of sorting
    # values = read count
    sensor_read_Dicpickle=args.ctab
    sensor_read_Dic=pickle.load(open(sensor_read_Dicpickle,'rb'))
    sensor_Lst=sensor_read_Dic.keys()
    sensor_Lst.sort()
    #shuffle(sensor_Lst)
    #
    # DataFrame of total read counts for each library (conibinj)
    # columns = ligand1R1 ligand1R2 ligand2R1 ligand2R2 ... (experiment condition)
    # rows = bins of sorting
    # values = read count
    total_read_DFpickle=args.total
    total_read_DF=pickle.load(open(total_read_DFpickle,'rb'))
    #
    # Sub configure file to define the experimental setting
    exp_DF=pd.read_csv(filepath_or_buffer=args.exp_con,sep='\t',index_col='Bin')
    exp_Lst=exp_DF.columns.tolist()
    bins_Lst=exp_DF.index.tolist()
    #
    # Sub configure file to define the cell number sorted into each bin in the original experiment
    cell_DF=pd.read_csv(filepath_or_buffer=args.cell_con,sep='\t',index_col='Bin')
    #
    # Sub configure file to define the Pij (occupation of each bin) for each sorting experiemnt
    # columns = ligand1R1 ligand1R2 ligand2R1 ligand2R2 ... (experiment condition)
    # rows = bins of sorting
    # values = occupation of each bin in its relevant experiment
    Bin_occ_DF=pd.read_csv(filepath_or_buffer=args.bin_occ_con,sep='\t',index_col='Bin')
    #
    # Sub configure file to define the boundaries between bins for each sorting experiemnt
    # columns = ligand1R1 ligand1R2 ligand2R1 ligand2R2 ... (experiment condition)
    # rows = bins of sorting
    # values = [LogA0,LogA1]
    Bin_bou_DF=pd.read_csv(filepath_or_buffer=args.bin_bou_con,sep='\t',index_col='Bin')
    Bin_bou_Dic=Bin_bou_DF.to_dict()
    for exp in exp_Lst:
        for bins in bins_Lst:
            temp=Bin_bou_DF.loc[bins,exp].split(',')
            Bin_bou_Dic[exp][bins]=[float(temp[0]),float(temp[1])]
    Bin_bou_DF=pd.DataFrame(Bin_bou_Dic)
    #
    #search range of Log10u and sigma during optimization
    rrange=args.rrange.split(',')
    range_Log10u=[]
    range_sigma=[]
    if len(rrange)!=4:
        logging.error('incorrect Log10u setting!')
        sys.exit(-1)
    else:
        range_Log10u=[float(rrange[0]),float(rrange[1])]
        range_sigma=[float(rrange[2]),float(rrange[3])]
    #
    # output directory
    list_files=args.output_prefix.split('/')
    output_dir=''
    for i in range(len(list_files)-1):
        output_dir=output_dir+list_files[i]+'/'
    os.system('mkdir -p %s' %(output_dir))
    os.system('mkdir -p %sheatmap/' %(output_dir))
    os.system('mkdir -p %scell_count/'%(output_dir))
    # search calculation intensity
    # number of trial for each parameter in a constructed grid space within the search range
    search_number=args.search
    #
    # construct the 2D grid space
    rrange = ((range_Log10u[0], range_Log10u[1]), (range_sigma[0], range_sigma[1]))
    #
    # cell count threshold to exclude sensor mutant with total cell count below in one sorting experiment
    cellth=args.cell_th
    os.system('cat /dev/null > %seliminated_cell_exp.txt'%(output_dir))
    eliminated_list=open('%seliminated_cell_exp.txt'%(output_dir),'w')
    """
    Search for the peak at the grid 2D space of Log10u and sigma
    """
    # dict to store the optimization process
    sensor_Log10uDic={}
    sensor_sigmaDic={}
    sensor_negLog10PDic={}
    for sensor in sensor_Lst:
        Psensor=iniab_Dic[sensor]
        sensor_Log10uDic[sensor]={}
        sensor_sigmaDic[sensor]={}
        sensor_negLog10PDic[sensor]={}
        cellcountDic={}
        for exp in exp_Lst:
            sensor_Log10uDic[sensor][exp]=0.0
            sensor_sigmaDic[sensor][exp]=0.0
            sensor_negLog10PDic[sensor][exp]=0.0
            cellcountDic[exp]={}
            DF_dic={}
            for bins in bins_Lst:
                cell_count=cell_DF.loc[bins,exp]
                Readj=total_read_DF.loc[bins,exp]
                # check whether Readj>>Cellj
                if Readj>10*cell_count:
                    correction_factor=float(cell_count)/float(Readj)
                else:
                    correction_factor=1.0
                DF_dic[bins]={}
                DF_dic[bins]['Readj']=int(total_read_DF.loc[bins,exp]*correction_factor)
                DF_dic[bins]['Readjsensor']=int(sensor_read_Dic[sensor].loc[bins,exp]*correction_factor)
                cellcountDic[exp][bins]=DF_dic[bins]['Readjsensor']
                DF_dic[bins]['Pj']=Bin_occ_DF.loc[bins,exp]
                DF_dic[bins]['BinBoundary_Lst']=Bin_bou_DF.loc[bins,exp]
            DF=pd.DataFrame(DF_dic).T
            # cell count across all bins > a given threshold
            if np.sum(DF['Readjsensor'])>cellth:
                paras=(DF, Psensor)
                res = optimize.brute(ObjectiveF, rrange, args = paras, Ns = search_number, full_output = True, finish = optimize.fmin)
                # reject those results beyond the search range we defined due to the finish = optimize.fmin step
                if (res[0][0]<range_Log10u[0]) or (res[0][0]>range_Log10u[1]) or (res[0][1]<range_sigma[0]) or (res[0][1]>range_sigma[1]):
                    res = optimize.brute(ObjectiveF, rrange, args = paras, Ns = search_number*3, full_output = True, finish = None)
                sensor_Log10uDic[sensor][exp]=res[0][0]
                sensor_sigmaDic[sensor][exp]=res[0][1]
                sensor_negLog10PDic[sensor][exp]=res[1]
                print ('%s: %s result: %s %s'%(sensor,exp,res[0],res[1]))
                # res[0]: best (u); res[1]: best -sumlnP, res[2]: grid space array; res[3]: -sumlnP landscape array
                X= res[2][0]
                Y= res[2][1]
                Z= res[3]
                # to imporve the resolution of minimum -Log10P region in heatmap, we restructure this matrix by resetting each value as min(10*globalMin,value)
                Zmin=Z.min()
                Z[Z >10*Zmin] = 10*Zmin
                # store the optimization process of each condition as a 2D heatmap
                #plt.contourf(X,Y,Z)
                #plt.colorbar()
                #plt.xlabel('Log10u')
                #plt.ylabel('sigma')
                #plt.savefig('%sheatmap/%s_%s_opt.png'%(output_dir,sensor,exp))
                #plt.clf()
            else:
                sensor_Log10uDic[sensor][exp]=np.NaN
                sensor_sigmaDic[sensor][exp]=np.NaN
                sensor_negLog10PDic[sensor][exp]=np.NaN
                print('%s in %s has total cell count below threshold'%(sensor,exp))
                print('%s_%s'%(sensor,exp),file=eliminated_list)
        # write in the cell count of each bin of one particular sensor
        cellcountDF=pd.DataFrame(cellcountDic)
        cellcountDF.to_csv('%scell_count/%s.csv'%(output_dir,sensor),sep='\t')
    # pandas sort the df automatically during construction
    sensor_Log10uDF=pd.DataFrame(sensor_Log10uDic).T
    sensor_sigmaDF=pd.DataFrame(sensor_sigmaDic).T
    sensor_negLog10PDF=pd.DataFrame(sensor_negLog10PDic).T
    sensor_Log10uDF.index.name='Sensor'
    sensor_sigmaDF.index.name='Sensor'
    sensor_negLog10PDF.index.name='Sensor'
    # write to file
    # save some Python objects as pickle files
    sensor_Log10uDF.to_csv('%ssensor_Log10u.csv'%(output_dir),sep='\t')
    sensor_sigmaDF.to_csv('%ssensor_sigma.csv'%(output_dir),sep='\t')
    sensor_negLog10PDF.to_csv('%ssensor_negLog10P.csv'%(output_dir),sep='\t')
    eliminated_list.close()
コード例 #59
0
def design(n,
           spacing,
           shift,
           fI,
           fC=False,
           r=None,
           r_def=(1, 1, 2),
           reim=None,
           cvar='amp',
           error=0.01,
           name=None,
           full_output=False,
           finish=False,
           save=True,
           path='filters',
           verb=2,
           plot=1):
    r"""Digital linear filter (DLF) design

    This routine can be used to design digital linear filters for the Hankel or
    Fourier transform, or for any linear transform ([Ghos70]_). For this
    included or provided theoretical transform pairs can be used.
    Alternatively, one can use the EM modeller empymod to use the responses to
    an arbitrary 1D model as numerical transform pair.

    This filter designing tool uses the direct matrix inversion method as
    described in [Kong07]_ and is based on scripts by [Key12]_. The tool is an
    add-on to the electromagnetic modeller empymod [Wert17]_. Fruitful
    discussions with Evert Slob and Kerry Key improved the add-on
    substantially.

    Example notebooks of its usage can be found in the documentation-gallery,
    https://empymod.emsig.xyz/en/stable/gallery

    Parameters
    ----------
    n : int
        Filter length.

    spacing: float or tuple (start, stop, num)
        Spacing between filter points. If tuple, it corresponds to the input
        for np.linspace with endpoint=True.

    shift: float or tuple (start, stop, num)
        Shift of base from zero. If tuple, it corresponds to the input for
        np.linspace with endpoint=True.

    fI, fC : transform pairs
        Theoretical or numerical transform pair(s) for the inversion (I) and
        for the check of goodness (fC). fC is optional. If not provided, fI is
        used for both fI and fC.

    r : array, optional
        Right-hand side evaluation points for the check of goodness (fC).
        Defaults to r = np.logspace(0, 5, 1000), which are a lot of evaluation
        points, and depending on the transform pair way too long r's.

    r_def : tuple (add_left, add_right, factor), optional
        Definition of the right-hand side evaluation points r of the inversion.
        r is derived from the base values, default is (1, 1, 2).

        - rmin = log10(1/max(base)) - add_left
        - rmax = log10(1/min(base)) + add_right
        - r = logspace(rmin, rmax, factor*n)

    reim : np.real or np.imag, optional
        Which part of complex transform pairs is used for the inversion.
        Defaults to np.real.

    cvar : string {'amp', 'r'}, optional
        If 'amp', the inversion minimizes the amplitude. If 'r', the inversion
        maximizes the right-hand side evaluation point r. Defaults is 'amp'.

    error : float, optional
        Up to which relative error the transformation is considered good in the
        evaluation of the goodness. Default is 0.01 (1 %).

    name : str, optional
        Name of the filter. Defaults to dlf_+str(n).

    full_output : bool, optional
        If True, returns best filter and output from scipy.optimize.brute; else
        only filter. Default is False.

    finish : None, True, or callable, optional
        If callable, it is passed through to scipy.optimize.brute: minimization
        function to find minimize best result from brute-force approach.
        Default is None. You can simply provide True in order to use
        scipy.optimize.fmin_powell(). Set this to None if you are only
        interested in the actually provided spacing/shift-values.

    save : bool, optional
        If True, best filter is saved to plain text files in ./filters/. Can be
        loaded with fdesign.load_filter(name). If full, the inversion output is
        stored too. You can add '.gz' to `name`, which will then save the full
        inversion output in a compressed file instead of plain text.

    path : string, optional
        Absolute or relative path where output will be saved if `save=True`.
        Default is 'filters'.

    verb : {0, 1, 2}, optional
        Level of verbosity, default is 2:

        - 0: Print nothing.
        - 1: Print warnings.
        - 2: Print additional time, progress, and result

    plot : {0, 1, 2, 3}, optional
        Level of plot-verbosity, default is 1:

        - 0: Plot nothing.
        - 1: Plot brute-force result
        - 2: Plot additional theoretical transform pairs, and best inv.
        - 3: Plot additional inversion result
          (can result in lots of plots depending on spacing and shift)
          If you are using a notebook, use %matplotlib notebook to have
          all inversion results appear in the same plot.

    Returns
    -------
    filter : empymod.filter.DigitalFilter instance
        Best filter for the input parameters.
    full : tuple
        Output from scipy.optimize.brute with full_output=True. (Returned when
        `full_output` is True.)

    """

    # === 1.  LET'S START ============
    t0 = printstartfinish(verb)

    # Check plot with matplotlib (soft dependency)
    if plot > 0 and not plt:
        plot = 0
        if verb > 0:
            print(plt_msg)

    # Ensure fI, fC are lists
    def check_f(f):
        if hasattr(f, 'name'):  # put into list if single tp
            f = [
                f,
            ]
        else:  # ensure list (works for lists, tuples, arrays)
            f = list(f)
        return f

    if not fC:  # copy fI if fC not provided
        fC = dc(fI)
    fI = check_f(fI)
    if fI[0].name == 'j2':
        raise ValueError("j2 (jointly j0 and j1) is only implemented for "
                         "fC, not for fI!")
    fC = check_f(fC)

    # Check default input values
    if finish and not callable(finish):
        finish = fmin_powell
    if name is None:
        name = 'dlf_' + str(n)
    if r is None:
        r = np.logspace(0, 5, 1000)
    if reim not in [np.real, np.imag]:
        reim = np.real

    # Get spacing and shift slices, cast r
    ispacing = _ls2ar(spacing, 'spacing')
    ishift = _ls2ar(shift, 'shift')
    r = np.atleast_1d(r)

    # Initialize log-dict to keep track in brute-force minimization-function.
    log = {
        'cnt1': -1,  # Counter
        'cnt2': -1,  # %-counter;  v Total number of iterations v
        'totnr': np.arange(*ispacing).size * np.arange(*ishift).size,
        'time': t0,  # Timer
        'warn-r': 0
    }  # Warning for short r

    # === 2.  THEORETICAL MODEL rhs ============

    # Calculate rhs
    for i, f in enumerate(fC):
        fC[i].rhs = f.rhs(r)

    # Plot
    if plot > 1:
        _call_qc_transform_pairs(n, ispacing, ishift, fI, fC, r, r_def, reim)

    # === 3. RUN BRUTE FORCE OVER THE GRID ============
    full = brute(_get_min_val, (ispacing, ishift),
                 full_output=True,
                 args=(n, fI, fC, r, r_def, error, reim, cvar, verb, plot,
                       log),
                 finish=finish)

    # Add cvar-information to full: 0 for 'amp', 1 for 'r'
    if cvar == 'r':
        full += (1, )
    else:
        full += (0, )

    # Finish output from brute/fmin; depending if finish or not
    if verb > 1:
        print("")
        if callable(finish):
            print("")

    # Get best filter (full[0] contains spacing/shift of the best result).
    dlf = _calculate_filter(n, full[0][0], full[0][1], fI, r_def, reim, name)

    # If verbose, print result
    if verb > 1:
        print_result(dlf, full)

    # === 4.  FINISHED ============
    printstartfinish(verb, t0)

    # If plot, show result
    if plot > 0:
        print("* QC: Overview of brute-force inversion:")
        plot_result(dlf, full, False)
        if plot > 1:
            print("* QC: Inversion result of best filter (minimum amplitude):")
            _get_min_val(full[0], n, fI, fC, r, r_def, error, reim, cvar, 0,
                         plot + 1, log)

    # Save if desired
    if save:
        if full_output:
            save_filter(name, dlf, full, path=path)
        else:
            save_filter(name, dlf, path=path)

    # Output, depending on full_output
    if full_output:
        return dlf, full
    else:
        return dlf
コード例 #60
0
def Test():
    rranges = (slice(0, 1, 0.01), slice(0, 0.5, 0.01))
    res = opt.brute(Sp, rranges, full_output=True, finish=opt.fmin)
    return res