Beispiel #1
0
    def __call__(self, sat_pos, args=(), **kwds):
        """
        Return the definite integral of *self.fun*(pos, *args*[0],
        ..., *args*[-1]) for the line of site from *stn_pos* to
        *sat_pos* (a :class:`PyPosition`) where pos is a
        :class:`PyPosition` on the line of site (and note the
        integration bounds on h defined in __init__). The remaining
        *kwds* are passed to the quadrature routine (:py:func:`quad`).
        """
        diff = NP.array(sat_pos.xyz) - NP.array(self.stn_pos.xyz)
        S_los = NP.linalg.norm(diff) / 1e3

        def pos(s):
            """
            Return the ECEF vector a distance *s* along the line-of-site (in
            [km]).
            """
            return PyPosition(*(NP.array(self.stn_pos.xyz) + (s / S_los) * diff))

        # determine integration bounds
        # distance along of line of site at which the geodetic height
        # is self.height1
        s1 = minimize_scalar(lambda l: (pos(l).height / 1e3 - self.height1) ** 2, bounds=[0, S_los], method="Bounded").x
        # distance along of line of site at which the geodetic height
        # is self.height2
        s2 = minimize_scalar(lambda l: (pos(l).height / 1e3 - self.height2) ** 2, bounds=[0, S_los], method="Bounded").x

        def wrapper(s, *args):
            return self.fun(pos(s), *args)

        return quad(wrapper, s1, s2, args=args, **kwds)[0]
Beispiel #2
0
def _fit_amplitude_scipy(counts, background, model, optimizer='Brent'):
    """Fit amplitude using scipy.optimize.

    Parameters
    ----------
    counts : `~numpy.ndarray`
        Slice of count map.
    background : `~numpy.ndarray`
        Slice of background map.
    model : `~numpy.ndarray`
        Model template to fit.
    flux : float
        Starting value for the fit.

    Returns
    -------
    amplitude : float
        Fitted flux amplitude.
    niter : int
        Number of function evaluations needed for the fit.
    """
    from scipy.optimize import minimize_scalar
    args = (counts, background, model)
    amplitude_min, amplitude_max = _amplitude_bounds_cython(counts, background, model)
    try:
        result = minimize_scalar(f_cash, bracket=(amplitude_min, amplitude_max),
                                 args=args, method=optimizer, tol=10)
        return result.x, result.nfev
    except ValueError:
        result = minimize_scalar(f_cash, args=args, method=optimizer, tol=0.1)
        return result.x, result.nfev
    def fit(self, data_set, target_set):
        labels = numpy.unique(target_set)
        self.f0 = minimize_scalar(lambda gamma: self.__logistic_loss_function(target_set, gamma)).x
        current_data_set, current_target_set = data_set, target_set

        for m in range(self.n_trees):
            if self.bagging < 1.:
                other_data_set, current_data_set, other_target_set, current_target_set =\
                    train_test_split(data_set, target_set, test_size=self.bagging)
            if current_target_set.shape[0] == 0:
                return None

            feature_indices = numpy.array([i for i in range(data_set.shape[1])])
            rsm_bag_data_set = current_data_set

            if self.rsm < 1.:
                split = ShuffleSplit(data_set.shape[1], test_size=self.rsm, n_iter=1, random_state=numpy.random.randint(0, 100000))
                for o_f, c_f in split:
                    feature_indices = c_f
                    feature_indices = numpy.sort(feature_indices)
                rsm_bag_data_set = current_data_set[:, feature_indices]

            rm = - self.__gradient_loss_function(current_target_set, self.__decision_func(current_data_set))
            cart_tree = CartTree(self.min_leaf_size)
            cart_tree.fit(rsm_bag_data_set, rm)
            gamma_m = minimize_scalar(lambda gamma:
                                      self.__logistic_loss_function(current_target_set, self.__sigmoid(
                                          self.__decision_func(current_data_set) +
                                          gamma*cart_tree.predict(rsm_bag_data_set))), method='bounded', bounds=(-100, 100))
            self.composition.append((self.regularization_coef*gamma_m.x, cart_tree, feature_indices))
Beispiel #4
0
 def run(self):
     self.pybert.status = "Optimizing Rx..."
     max_iter = self.pybert.max_iter
     if(gDebugOptimize):
         res  = minimize_scalar(do_opt_rx, bounds=(0, gMaxCTLEPeak), args=(self.pybert,), method='Bounded', options={'disp' : True, 'maxiter' : max_iter})
     else:
         res  = minimize_scalar(do_opt_rx, bounds=(0, gMaxCTLEPeak), args=(self.pybert,), method='Bounded', options={'disp' : False, 'maxiter' : max_iter})
     self.pybert.status = "Ready."
def agent_response(tmin, tmax):
	# maximize the respective strategies
	res0 = minimize_scalar(lambda e: -U_0(e), method='bounded', bounds=(0,1), tol=1e-12)
	assert(res0.success)

	res1 = minimize_scalar(lambda e: -U_1(e,tmax), method='bounded', bounds=(0,1), tol=1e-12)
	assert(res1.success)

	res2 = minimize_scalar(lambda e: -U_2(e,tmin,tmax), method='bounded', bounds=(0,1), tol=1e-12)
	assert(res2.success)

	s = np.argmin([res0.fun, res1.fun, res2.fun])
	e = [res0.x, res1.x, res2.x][s]

	return (s,e)
def chernoff_information_pmf(p, q):
    """
    Compute the Chernoff information.

    Parameters
    ----------
    p : np.ndarray
        The first pmf.
    q : np.ndarray
        The second pmf.

    Returns
    -------
    ci : float
        The Chernoff information.
    """
    def func(alpha):
        return np.log2((p**alpha * q**(1-alpha)).sum())

    res = minimize_scalar(fun=func, bounds=(0, 1), method='bounded')

    if not -1e-8 <= res.x <= 1 + 1e-8:  # pragma: no cover
        msg = "Appropriate optima could not be found."
        raise OptimizationException(msg)

    ci = -func(res.x)
    # sometimes things are very slightly negative due to optimization fuzziness.
    # since this can throw off some inequalities, we set to zero in this case.
    if ci < 0:  # pragma: no cover
        ci = 0

    return ci
Beispiel #7
0
    def invert_dfg_qpm_to_signal_wl(self, pump_wl_nm, poling_period_mks, 
                                    max_signal_wl_nm = 2000 ):
        r"""Calculate the signal wavelength phasematched in QPM by the given
            poing period for the specified pump wavelength.
        
        Parameters
        ----------
        pump_wl_nm : float
             Wavelength of pump field, bandwidth assumed to be 0 [nm]
        poling_period_mks : float
             Period length of the QPM grating

        Returns
        -------
        Signal wavelength [nm] : float
            
        """

        def err_fn(wl_s):
            return (self.calculate_poling_period(pump_wl_nm, wl_s, None, silent = True)[0] - poling_period_mks )**2
        
        res = optimize.minimize_scalar(err_fn, bounds = [pump_wl_nm*1.001, max_signal_wl_nm],
                                 method = 'bounded')
        
        return res.x
Beispiel #8
0
def get_rv(vel, corr, Npix=None, **fit_kws):
    """
    Get the best radial velocity, with errors.
    This will only work if the ccf was made with the maximum likelihood method!
    Uses the formula given in Zucker (2003) MNRAS, 342, 4  for the rv error.

    :param vel:   The velocities
    :param corr:  The ccf values. Should be the same size as vel
    :param Npix:  The number of pixels used in the CCF.
    :return: rv, rv_err, ccf(rv)
    """
    vel = np.atleast_1d(vel)
    corr = np.atleast_1d(corr)
    sorter = np.argsort(vel)
    fcn = spline(vel[sorter], corr[sorter])
    fcn_prime = fcn.derivative(1)
    fcn_2prime = fcn.derivative(2)

    guess = vel[np.argmax(corr)]

    def errfcn(v):
        ll = 1e6*fcn_prime(v)**2
        return ll

    out = minimize_scalar(errfcn, bounds=(guess-2, guess+2), method='bounded')
    rv = out.x
    if Npix is None:
        Npix = vel.size
    
    rv_var = -(Npix * fcn_2prime(rv) * (fcn(rv) / (1 - fcn(rv) ** 2))) ** (-1)
    return rv, np.sqrt(rv_var), fcn(rv)
Beispiel #9
0
def draw_outline(ax,
                 fmt={'color':'k','linestyle':'solid',},
                 ):

    def rootf(theta1, phi1):
        return abs(mu(phi1, theta1))-muc

    xx = []
    yy = []

    #print "outline:"
    for phi in np.linspace(pi/2, 3*pi/2, 100):
        #print "phi:", phi
        thetamin = pi/2
        thetamax = 0.0

        res = minimize_scalar(rootf, bounds=(thetamin, thetamax), args=(phi,))
        #print "minim:", res.x, " ", res.fun

        theta2 = res.x

        #ax.plot([y(phi, theta2)], [z(phi, theta2)], "b.")
        xx.append( y(phi, theta2))
        yy.append( z(phi, theta2))

    ax.plot(xx, yy, "r-")

    return ax
Beispiel #10
0
def _minimize_cum_dist_diff(fx, fy, xbins, ybins, tol=0.0001):
    """
    find the value which minimizes the difference between two function given a sampling of
    each function: xbins, ybins.
    """
    
    from scipy import optimize
    
    #define function to minimize
    def F(x, y):
        return np.fabs(fy(y)-fx(x))

    x_y = np.zeros(len(ybins)) #empty array to store result of minimization
    
    x0 = np.median(xbins) #initial guess for x in minimization
    
    bounds = [np.min(xbins),np.max(xbins)] #bounds to search for minimization over
    
    #keep the y value fixed and search for the x value
    for i,y in enumerate(ybins):
        f = lambda x: F(x, y) #simplify minimization function
        x_y[i] = optimize.minimize_scalar(f, bounds = bounds, method='bounded').x
    
    y_x = interpolate.interp1d(x_y, ybins, bounds_error=False, fill_value=0.0)
    x_y = interpolate.interp1d(ybins, x_y, bounds_error=False, fill_value=0.0)
    
    return x_y, y_x
def AngleSolver(B, guess, lb, ub, order=2, useFmin=True, tol=1e-9):
  """
Solve for theta to fit row vector B input to MagFit(theta)

Uses scipy.optimize.fminbound(...) if useFmin is True; else use
.minimize_scalar(...,method='bounded')
*** N.B. the former, fminbound, is a wrapper for the latter

Return:

  theta that minimizes |B - MagFit(theta,order)| (magnitude)

Arguments:

  B        row vector to fit with theta via MagFit(theta)
  guess    ignored
  lb,ub    lower and upper bounds, respectively
  order    which mtxlist[order] matrix to use
  useFmin  True to use fminbound
  tol      tolerance
"""

  ######################################################################
  def cost(theta):
    """Cost function to be minimized by AngleSolver via varying theta;
       returns distance betwween input 3-vectors B and MagFit(theta)."""
    e = B - MagFit(theta,order=order)
    return e.dot(e)

  ### Use fminbound if useFmin is True (default) ...
  if useFmin: return so.fminbound(cost, lb, ub, xtol=tol)

  ### ... else use functionally identical minimize_scalar(...,method='bounded')

  return so.minimize_scalar(cost, bounds=(lb, ub,), method='bounded', tol=tol).x
Beispiel #12
0
def compute_xpclr(dat):

    # values of dist cannot be 0. Equiv to ~10 bp

    def fx(s_coef):
        return calculate_cl_romberg(s_coef, dat)

    res = minimize_scalar(fx, method="brent",
                          options={"xtol": 0.0005})

    # maximum Likelihood selec coef and recombination pos
    # removed the np.round on this line, as if rec dist up or down can go
    # beyond boundary
    maxli_sc = res.x

    # likelihood of above sc
    maximum_li = res.fun

    if maxli_sc > 0:
        null_model_li = calculate_cl_romberg(0, dat)
    else:
        null_model_li = res.fun

    if null_model_li < maximum_li:
        print("Convergence failed.", res, null_model_li)
        return np.repeat(np.nan, 3)

    # return maxL, L of null model, maxL sel coef and maxL rp
    # if null model is maxL model, sc=0, and rp is meaningless
    return -maximum_li, -null_model_li, maxli_sc
Beispiel #13
0
 def exactLineSearch(f,x,S):
     """
     :param Function f: An object of the Function class. F is called with numpy array of shape (m,).
     :param array x: A numpy array of shape (m,), containing floats. x = currentValues in OptimizeBase.
     :param array S: A numpy array of shape (m,), containing floats. S is the newton direction.
     :returns alpha s.t. fi(alpha)=f(x+alpha*S) is minimized.
     :rtype: float
     :raises TypeError: If the inparamaters are of the wrong data type, if
     the size of S, or x, is not the same as the number of arguments of f
     or if S or x is not a one dimensional array.
     """
     if(not isinstance(f,Function)):
         raise TypeError('f is not a Function object')
     if(not isinstance(S,np.ndarray)):
         raise TypeError('S is not a numpy array')
     if(not issubclass(S.dtype.type,float)):
         raise TypeError('S does not contain floats')
     if(not S.ndim == 1):
         raise TypeError('S must be one dimensional')
     if(not S.size == f._numArgs):
         raise TypeError('S must have the same size as the number of \
         arguments of f')
     if(not isinstance(x,np.ndarray)):
         raise TypeError('x is not a numpy array')
     if(not issubclass(x.dtype.type,float)):
         raise TypeError('x does not contain floats')
     if(not x.ndim == 1):
         raise TypeError('x must be one dimensional')
     if(not x.size == f._numArgs):
         raise TypeError('x must have the same size as the number of \
         arguments of f')
     def fi(alpha):
         return f(x+alpha*S)
     return minimize_scalar(fi).x
Beispiel #14
0
    def compute_min_property(self, prop):
        ephemeride = self.ephem_object

        def compute_min(time):
            ephemeride.compute(time)
            return getattr(ephemeride, prop)

        if ephemeride:
            now = ephem.now()
            # 100 year from now bounds
            bounds = [ephem.Date(now - ephem.hour * 24 * 365 * 100),
                      ephem.Date(now + ephem.hour * 24 * 365 * 100)]
            result = optimize.minimize_scalar(
                compute_min,
                method='Bounded',
                bounds=bounds
            )
            if result['message'] == 'Solution found.':
                ephemeride.compute(result['x'])
                data = {
                    'ephemeride': ephemeride,
                    'date': ephem.Date(result['x']).datetime()
                }
                data[prop] = getattr(ephemeride, prop)
                return data
            else:
                return None
        else:
            return None
def maxEff(Eg, Ts, Tc, Ps, fs, X):
    uLim = uE(Ts) / sp.e
    if (Eg.size > 1) & all(Eg[i] <= Eg[i + 1] for i in range(Eg.size - 1)):
        # check to see that all Eg values are in order from high to low
        # if not, it is a bad set of bandgaps and the function returns null
        return 0
    else:
        myJ = np.zeros(Eg.size)
        myV = []
        preFactor = np.zeros(Eg.size)

        for i in range(Eg.size):
            if i == 0:
                Emax = uLim
            else:
                Emax = Eg[i - 1]
            preFactor[i] = X * fs * N(Eg[i], Emax, Ts, 0) + (1 - X * fs) * N(Eg[i], Emax, Tc, 0)
            # solve for best chemical potential of each bandgap
            myV.append(
                optimize.minimize_scalar(
                    V, bounds=(0, Eg[i]), args=(preFactor[i], Eg[i], Tc, Ps), method="bounded", options={"disp": True}
                )
            )

        if all(myV[i].success for i in range(Eg.size)):
            # calculate efficiency
            for i in range(Eg.size):
                myJ[i] = sp.e * (preFactor[i] - N(Eg[i], uE(Tc) / sp.e, Tc, myV[i].x))
            minJ = myJ.min()  # current limited by lowest-producing cell
            totalV = sum(myV[i].x for i in range(Eg.size))  # voltages add
            myEff = minJ * totalV / Ps
            return -myEff  # negative because we are using minimize_scalar
        else:
            return float("NaN")
Beispiel #16
0
    def test_minimize_scalar_custom(self):
        # This function comes from the documentation example.
        def custmin(fun, bracket, args=(), maxfev=None, stepsize=0.1,
                maxiter=100, callback=None, **options):
            bestx = (bracket[1] + bracket[0]) / 2.0
            besty = fun(bestx)
            funcalls = 1
            niter = 0
            improved = True
            stop = False

            while improved and not stop and niter < maxiter:
                improved = False
                niter += 1
                for testx in [bestx - stepsize, bestx + stepsize]:
                    testy = fun(testx, *args)
                    funcalls += 1
                    if testy < besty:
                        besty = testy
                        bestx = testx
                        improved = True
                if callback is not None:
                    callback(bestx)
                if maxfev is not None and funcalls >= maxfev:
                    stop = True
                    break

            return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
                                           nfev=funcalls, success=(niter > 1))

        res = optimize.minimize_scalar(self.fun, bracket=(0, 4), method=custmin,
                                       options=dict(stepsize=0.05))
        assert_allclose(res.x, self.solution, atol=1e-6)
Beispiel #17
0
    def train_epoch(self, input_train, target_train):
        train_epoch = self.methods.train_epoch
        prediction_error = self.methods.prediction_error

        params = [param for param, _ in self.init_train_updates()]
        param_defaults = [param.get_value() for param in params]

        def setup_new_step(new_step):
            for param_default, param in zip(param_defaults, params):
                param.set_value(param_default)

            self.variables.step.set_value(asfloat(new_step))
            train_epoch(input_train, target_train)
            # Train epoch returns neural network error that was before
            # training epoch step, that's why we need to compute
            # it second time.
            error = prediction_error(input_train, target_train)

            return np.where(np.isnan(error), np.inf, error)

        options = {'xtol': self.tol}
        if self.search_method == 'brent':
            options['maxiter'] = self.maxiter

        res = minimize_scalar(
            setup_new_step,
            tol=self.tol,
            method=self.search_method,
            options=options,
        )

        return setup_new_step(res.x)
def GaussNewton(x0, fun, funJack, args, maxIter=10, eps=10e-7, verbose=1):
    x = np.array(x0, dtype=np.float32)

    oldCost = -1
    for i in range(maxIter):
        r = fun(x, *args)
        cost = np.sum(r**2)

        if verbose > 0:
            print "Cost at iteration " + str(i) + ": " + str(cost)

        if (cost < eps or abs(cost - oldCost) < eps):
            break
        oldCost = cost

        J = funJack(x, *args)
        grad = np.dot(J.T, r)
        H = np.dot(J.T, J)
        direction = np.linalg.solve(H, grad)

        #optymalizacja dlugosci kroku
        lineSearchRes = optimize.minimize_scalar(LineSearchFun, args=(x, direction, fun, args))
        #dlugosc kroku
        alpha = lineSearchRes["x"]

        x = x + alpha * direction
        
    if verbose > 0:
        print "Gauss Netwon finished after "  + str(i + 1) + " iterations"
        r = fun(x, *args)
        cost = np.sum(r**2)
        print "cost = " + str(cost)
        print "x = " + str(x)

    return x
Beispiel #19
0
    def findna(self, y, h0, h1, p):
        """ this is for parallel process of finding next period a1 for each a0 given y, h0 and h1 """
        [r, w, b, tr, tw, tb, Tr, qh, qr] = p
        aa, hh, beta, psi, gamma = self.aa, self.hh, self.beta, self.psi, self.gamma

        m0 = (aa + self.ltv*qh[y]*hh[h1] >= 0).argmax() # LTV constraint
        # print (aa + self.ltv*qh[y]*hh[h1] > 0)
        # print m0
        # m0 = 0
        for i in range(self.aN):    # l = 0, 1, ..., 50
            # Find a bracket within which optimal a' lies
            m = max(0, m0)  # Rch91v.g uses m = max(0, m0-1)
            m0, a0, b0, c0 = self.GetBracket(y, h0, h1, i, m, p)
            # a0, b0, c0 = self.GetBracket2(y, h0, h1, i, p)
            # print m0, a0, b0, c0
            # Find optimal a' using Golden Section Search
            # print 'a=',self.aa[i],'bracket=','(',a0,',',c0,')'
            if a0 == b0:
                self.at[h1,i] = self.aL
            elif b0 == c0:
                self.at[h1,i] = self.aH
            else:
                # print a0, b0, c0
                def objfn(a1): # Define objective function for optimal a'
                    return -self.findv(y, h0, h1, aa[i], a1, p)
                result = minimize_scalar(objfn, bracket=(a0,b0,c0), method='Golden')
                self.at[h1,i] = result.x
            # Compute consumption, rent and house
            self.casht[h1,i] = self.budget(y,h0,h1,aa[i],self.at[h1,i],p)
            self.ct[h1,i] = (self.casht[h1,i]+qr[y]*(hh[h0]+gamma))/(1+psi)
            self.rt[h1,i] = (self.casht[h1,i]*psi-qr[y]*(hh[h0]+gamma))/((1+psi)*qr[y])
            self.vt[h1,i] = self.util(self.ct[h1,i],self.rt[h1,i]+hh[h0]) + beta*self.vtilde[y+1][h1](self.at[h1,i])
Beispiel #20
0
def get_uncertainty_scalefactor(df):
    """
    Find the factor by which to multiply the 1-sigma measurement uncertainties
    so that they agree with the literature values 68% of the time.

    :param df: A pandas DataFrame with corrected temperatures, such as output by correct_measured_temperature
    :return: The scaling factor. Multiply df['T_uperr'] and df['T_lowerr'] by this to get more realistic uncertainties.
    """

    def get_zscore(x, y, xerr, yerr, f=1.0):
        delta = x - y
        sigma = np.sqrt(f * xerr ** 2 + yerr ** 2)
        return delta / sigma

    def min_func(f, x, y, xerr, yerr):
        zscores = get_zscore(x, y, xerr, yerr, f)
        return (len(zscores[zscores ** 2 > 1]) / float(len(zscores)) - 0.32) ** 2

    df['T_err'] = np.minimum(df['T_uperr'], df['T_lowerr'])  # Be conservative and use the smaller error.

    fitresult = minimize_scalar(min_func, bounds=[0, 10], method='bounded', args=(df['Corrected Temperature'],
                                                                                  df['Tactual'],
                                                                                  df['T_err'],
                                                                                  df['Tact_err']))

    logging.info('Uncertainty scale factor = {:.2g}'.format(fitresult.x))

    return fitresult.x
Beispiel #21
0
    def _condition_matched_galprop(self, sorted_haloprop, galprop_cumprob, 
        ibin, randoms, tol):

        def compute_pearson_difference(r):
            new_randoms = galprop_cumprob + r*randoms
            idx_sorted = np.argsort(new_randoms)
            galprop = (
                self.one_point_lookup_table[ibin](galprop_cumprob[idx_sorted]))
            return abs(pearsonr(galprop, sorted_haloprop)[0]-abs(self.correlation_strength[ibin]))

        if hasattr(self, 'correlation_strength'):
            result = minimize_scalar(compute_pearson_difference, tol=tol)
            new_randoms = galprop_cumprob + result.x*randoms
            idx_sorted = np.argsort(new_randoms)
            galprop = (
                self.one_point_lookup_table[ibin](galprop_cumprob[idx_sorted]))
        else:
            # Zero scatter case
            idx_sorted = np.argsort(galprop_cumprob)
            galprop = (
                self.one_point_lookup_table[ibin](galprop_cumprob[idx_sorted]))

        if self.correlation_strength[ibin] < 0:
            return galprop[::-1]
        else:
            return galprop
Beispiel #22
0
	def fit(self, X, C, y, regions, kernelType, reml=True, maxiter=100):
	
		#construct a list of kernel names (one for each region) 
		if (kernelType == 'adapt'): kernelNames = self.buildKernelAdapt(X, C, y, regions, reml, maxiter)
		else: kernelNames = [kernelType] * len(regions)			
		
		#perform optimization
		kernelObj, hyp_kernels, sig2e, fixedEffects = self.optimize(X, C, y, kernelNames, regions, reml, maxiter)
		
		#compute posterior distribution
		Ktraintrain = kernelObj.getTrainKernel(hyp_kernels)
		post = self.infExact_scipy_post(Ktraintrain, C, y, sig2e, fixedEffects)
		
		#fix intercept if phenotype is binary
		if (len(np.unique(y)) == 2):			
			controls = (y<y.mean())
			cases = ~controls
			meanVec = C.dot(fixedEffects)
			mu, var = self.getPosteriorMeanAndVar(np.diag(Ktraintrain), Ktraintrain, post, meanVec)										
			fixedEffects[0] -= optimize.minimize_scalar(self.getNegLL, args=(mu, np.sqrt(sig2e+var), controls, cases), method='brent').x				
		
		#construct trainObj
		trainObj = dict([])
		trainObj['sig2e'] = sig2e
		trainObj['hyp_kernels'] = hyp_kernels
		trainObj['fixedEffects'] = fixedEffects		
		trainObj['kernelNames'] = kernelNames
		
		return trainObj
Beispiel #23
0
def opt_nlists_rate_all_cluster_inseg_only_msa_drate_fix_drate_times_b(ratesList, segRateDict, multiAlignAllSeg, tree, piProb, qMat, cList):
    """
    optimization for estimating (iRate, dRate) in all segments when dRate*tau is fixed
    """
    res = minimize_scalar(nllk_nlists_rate_all_cluster_inseg_only_msa_drate_fix_drate_times_b, args=(ratesList, segRateDict, multiAlignAllSeg, tree, piProb, qMat, cList), tol=1.e-4)
    delta = res.x
    print delta
    treeNew = deepcopy(tree)
    treeNew.scale_edges(1/delta)
    segIds = segRateDict.keys()
    rateAll = segRateDict.values()
    rateUniqueSet = set(rateAll)
    rateUnique = list(rateUniqueSet)
    segRateDictNew = deepcopy(segRateDict)
    ratesListNew = deepcopy(ratesList)
    for rate in rateUnique:
        iRateVec = []
        dRate = rate[1] * delta
        segIdsInOneCluster = [segId for segId in segIds if segRateDict[segId] == rate]
        for segId in segIdsInOneCluster:
            multiAlign = multiAlignAllSeg[segId]
            seqNames = multiAlign.keys()
            mLen = len(multiAlign.values()[0])
            pc0 = pc0_from_dRate_and_tree(dRate, seqNames, treeNew, qMat, piProb, cList)
            iRateNew = mle_irate_given_drate(dRate, treeNew, pc0, mLen)
            iRateVec.append(iRateNew)
        iRate = sum(iRateVec) / len(iRateVec)
        rateNew = (iRate, dRate)
        ratesListNew[ratesListNew.index(rate)] = rateNew
        for k, v in segRateDictNew.iteritems():
            if v == rate:
                segRateDictNew[k] = rateNew
            else:
                segRateDictNew[k] = v
    return ratesListNew, segRateDictNew, treeNew
Beispiel #24
0
 def __init__(self, simConfig, telConfig, atmosConfig, sciConfig, mask):
     scienceCam.__init__(self, simConfig, telConfig, atmosConfig, sciConfig, mask)
     self.normMask = self.mask / numpy.sqrt(numpy.sum(numpy.abs(self.mask)**2))
     self.fibreSize = opt.minimize_scalar(self.refCouplingLoss, bracket=[1.0, self.simConfig.simSize]).x
     self.refStrehl = 1.0 - self.refCouplingLoss(self.fibreSize)
     self.fibre_efield = self.fibreEfield(self.fibreSize)
     print("Coupling efficiency: {0:.3f}".format(self.refStrehl))
Beispiel #25
0
def optimize(sy,ey):
    boundary = [sy*365*24*60*60,ey*365*24*60*60]
    
    match = so.minimize_scalar(getPAdiff,method="bounded",bounds=boundary)
    
    print "Match found at",match.x
    print "PAdiff is",getPAdiff(match.x),"degrees"
Beispiel #26
0
def nyquist_bandwidth(func, bandwidth_guess, threshold=1e-3):
    """ Get the support of a one-dimensional function in Fourier space given a
        guesstimate location of the bandwidth.
        
        Parameters
            func - The function to determine the bandwidth of
            bandwidth_guess - An approximation to the bandwidth of the
                function. Better as an upper bound (i.e. make it too large) as this
                function uses Brent-method root-finding to find the bandwidth.
            threshold - The threshold for determining the location of the
                bandwidth, given as a fraction of the maximum peak of the Fourier
                function. Optional, defaults to 1e-3.
        
        Returns:
            the support of the function
    """
    result = optimize.minimize_scalar(
        lambda x: -abs(func(x)),
        bounds=(0, bandwidth_guess))
    if result:
        maxloc = result['x']
        threshold = -result['fun'] * threshold
        upper_bound = optimize.brentq(
            lambda x: (abs(func(x)) + abs(func(-x))) - threshold,
            maxloc, bandwidth_guess)
    return upper_bound
Beispiel #27
0
    def flash(self):
        index=[i for i in self.pvt]
        z=[self.pvt[i]['comp'] for i in index]
        n=len(index)
        x0=[ln(self.phases[0]['comp'][i]) for i in index]+[ln(self.phases[1]['comp'][i]) for i in index]
        def fun(x):
            comp_l={index[i]:exp(x[n+i]) for i in range(n)}
            comp_v={index[i]:exp(x[i]) for i in range(n)}
            fug_v=peng_robinson.fug_minimum_gibbs(self.pvt,self.T,self.P,1.,comp=comp_v,phase='light')
            fug_l=peng_robinson.fug_minimum_gibbs(self.pvt,self.T,self.P,1.,comp=comp_l,phase='heavy')
            F=[fug_v[i]+ln(comp_v[i])-fug_l[i]-ln(comp_l[i]) for i in index]
            F+=[(z[i]-exp(x[i]))/(exp(x[n+i])-exp(x[i]))-(z[n-1]-exp(x[n-1]))/(exp(x[-1])-exp(x[n-1])) for i in range(n-1)]
            return np.dot(F,F)

        cons=({'type': 'ineq',
               'fun': lambda x: np.array([-c for c in x]),
               'jac': lambda x: np.array([[-1. if c1 == c2 else 0 for c1 in range(len(x))] for c2 in range(len(x))])},
              {'type': 'eq',
               'fun': lambda x: np.array([1-sum([exp(c) for c in x[:n]]),1-sum([exp(c) for c in x[n:]])]),
               'jac': lambda x: np.array([[-exp(c) if c in x[:n] else 0 for c in x],[-exp(c) if c in x[n:] else 0 for c in x]])
               })
        res=minimize(fun,x0,method='SLSQP',constraints=cons)
        comp_v = [exp(c) for c in res.x[:n]]
        comp_l = [exp(c) for c in res.x[n:]]

        def func(frac):
            return ((1 - frac) ** 2) * np.dot(comp_l, comp_l) + 2 * frac * (1 - frac) * np.dot(comp_l, comp_v) +\
                   (frac ** 2) * np.dot(comp_v, comp_v) - np.dot(z, z) \
                   - 2 * (1 - frac) * np.dot(z, comp_l) - 2 * frac * np.dot(z, comp_v)
        bounds = [0., 1.]
        fraction = minimize_scalar(func, bounds=bounds, method='Bounded').x
        comp_v={index[i]:comp_v[i] for i in range(n)}
        comp_l={index[i]:comp_l[i] for i in range(n)}
        self.phases = [{'frac': fraction, 'comp': comp_v}, {'frac': 1 - fraction, 'comp': comp_l}]
Beispiel #28
0
def _tilt(x, y):
    """
    Determine the tilt of the smallest rectangular bounding box that will fit
    around the set of position samples

    Parameters
    ----------
    x, y : array-like
        Arrays containing the position samples.

    Returns
    -------
    scalar
        Tilt angle

    """
    def _bbox_area(tilt):
        rot_x, rot_y = _rotate(x, y, -tilt)
        dx, dy = (numpy.ptp(rot_x[~numpy.isnan(rot_x)]),
                  numpy.ptp(rot_y[~numpy.isnan(rot_y)]))
        return dx * dy

    pi_4 = .25 * numpy.pi
    tilt = optimize.minimize_scalar(_bbox_area, bounds=(-pi_4, pi_4),
                                    method='bounded').x

    return tilt
Beispiel #29
0
def calculate_MAP_Indel_length_Zipfian_factor(indel_lengths, prior=None):
    ''' calculate the MAP Zipfian constant <indel_zipf> that determines the distribution of
    indel lengths. Assume a Gaussian prior. Input is a list of <indel_lengths> for a particular
    column.
    The probability distribution of indel lengths is assumed to follow the Zipfian distribution
    (Compare Fletcher,W. and Yang,Z. (2009) INDELible: a flexible simulator of biological sequence evolution. Mol Biol Evol, 26, 1879–1888.
    In their publication, <indel_zipf> is denoted as <a>, the gap lengths as <u>.)

    The MAP is calculated numerically, as there did not seem to be a nice analytical solution.
    '''

    if prior is None:
        prior = {'mu': 1.821, 'sigma_squared': 1}

    if len(indel_lengths) == 0:
        return prior['mu']

    else:
        # The posterior is the likelihood * prior * scaling.
        # As we are only interested in the maximum, we leave out all factors in
        # the following equation for the posterior:
        posterior = lambda indel_zipf: - (scipy.special.zeta(indel_zipf, 1) ** - len(indel_lengths)) * np.prod(
            [lIndel ** -indel_zipf for lIndel in indel_lengths]) * math.exp(-0.5 * ((indel_zipf - prior['mu']) ** 2) / prior['sigma_squared'])
        res = minimize_scalar(posterior, method='brent')
        return res.x
Beispiel #30
0
 def __init__(self,a,b):
     if a.ref != b.ref:
         raise AttributeError("Coordinate conversion not implemented yet")
     if not a.isFlying or not b.isFlying:
         raise AttributeError("Both vessels must be orbiting.")
     
     
     self.figure = figure(figsize=(8,8))
     self.axis = self.figure.gca(projection="rectilinear")#,aspect='equal')
     
     
     t = time.time() # Temp
     ts = a.orbit.synodicPeriod(b.orbit)
     
     boundary = [t,t+ts]
     
     X = linspace(boundary[0],boundary[1],1000) #arange(boundary[0],boundary[1],100)
     Y = []
     for i in X:
         Y.append(a.distanceTo(i,b))
     
     self.axis.plot(X,Y)
     d=lambda x: a.distanceTo(x,b)
     closest = so.minimize_scalar(d,method="bounded",bounds=boundary)
     
     self.axis.scatter(closest.x,closest.fun)
     if closest.fun < b.SoI:
         self.axis.plot(boundary,[b.SoI,b.SoI],label="%s SoI"%b.name)
         
         d=lambda x: a.distanceTo(x,b) - b.SoI
         encounter = so.brentq(d,t,closest.x)
         escape = so.brentq(d,closest.x,t+ts)
         
         self.axis.scatter(encounter,b.SoI)
         self.axis.scatter(escape,b.SoI)
Beispiel #31
0
    def fit(self, log_gamma=None, bounds=(-8.0, 8.0), tol=1e-8, maxiter=500):
        r"""Find the triple :math:`(\beta, \sigma^2, \tau^2)` maximizing REML.

        This method sets the attributes `beta`, `sigma_sq`, `tau_sq`, `gamma`,
        `log_gamma`, `h_sq`, and `h_sq_standard_error` as described in the
        top-level class documentation.

        If `log_gamma` is provided, :meth:`fit` finds the REML solution
        with :math:`\log{\gamma}` constrained to this value. In this case,
        `h_sq_standard_error` is ``None`` since `h_sq` is not estimated.

        Otherwise, :meth:`fit` searches for the value of :math:`\log{\gamma}`
        that minimizes :meth:`compute_neg_log_reml`, and also sets the attribute
        `optimize_result` of type `scipy.optimize.OptimizeResult
        <https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html>`__.

        Parameters
        ----------
        log_gamma: :obj:`float`, optional
            If provided, the solution is constrained to have this value of
            :math:`\log{\gamma}`.
        bounds: :obj:`float`, :obj:`float`
            Lower and upper bounds for :math:`\log{\gamma}`.
        tol: :obj:`float`
            Absolute tolerance for optimizing :math:`\log{\gamma}`.
        maxiter: :obj:`float`
            Maximum number of iterations for optimizing :math:`\log{\gamma}`.
        """
        if self._fitted:
            self._reset()

        fit_log_gamma = True if log_gamma is None else False

        if fit_log_gamma:
            from scipy.optimize import minimize_scalar

            self.optimize_result = minimize_scalar(self.compute_neg_log_reml,
                                                   method='bounded',
                                                   bounds=bounds,
                                                   options={
                                                       'xatol': tol,
                                                       'maxiter': maxiter
                                                   })

            if self.optimize_result.success:
                if self.optimize_result.x - bounds[0] < 0.001:
                    raise Exception(
                        "failed to fit log_gamma: optimum within 0.001 of lower bound."
                    )
                elif bounds[1] - self.optimize_result.x < 0.001:
                    raise Exception(
                        "failed to fit log_gamma: optimum within 0.001 of upper bound."
                    )
                else:
                    self.log_gamma = self.optimize_result.x
            else:
                raise Exception(
                    f'failed to fit log_gamma:\n  {self.optimize_result}')
        else:
            self.log_gamma = log_gamma

        _, self.beta, self.sigma_sq, self.tau_sq = self.compute_neg_log_reml(
            self.log_gamma, return_parameters=True)

        self.gamma = np.exp(self.log_gamma)
        self.h_sq = self.sigma_sq / (self.sigma_sq + self.tau_sq)

        self._residual_sq = self.sigma_sq * self._dof
        self._d_alt = self._d
        self._ydy_alt = self._ydy
        self._xdy_alt[1:] = self._xdy
        self._xdx_alt[1:, 1:] = self._xdx

        if fit_log_gamma:
            self.h_sq_standard_error = self._estimate_h_sq_standard_error()

        self._fitted = True
wav = wavfile.read(in_filename)[1]
out_filename = in_filename.rpartition('.')[0] + '.txt'
out_figure = in_filename.rpartition('.')[0] + '.svg'

print("\nProcessing results outputted to file: " + out_filename + "\n")
out = open(out_filename, 'wt')

for t in range(0, wav.size - WINDOW - N, INTERVAL):
    signal = wav[t:t + WINDOW + N].astype(float)
    matsignal = np.empty([N, WINDOW])
    for l in range(WINDOW):
        matsignal[:, l] = signal[l:l + N]
    fourierSparsity = getSparsity(TINY, signal, n=N, matsignal=matsignal)
    result = optimize.minimize_scalar(getSparsity,
                                      args=(signal, N, matsignal),
                                      method='bounded',
                                      bounds=(TINY, 1. - TINY),
                                      options={'maxiter': MAXITER})
    if fourierSparsity < result.fun:
        sparsity = fourierSparsity
        r = TINY
        nonlin = 0.
    else:
        sparsity = result.fun
        r = result.x
        nonlin = np.log(fourierSparsity / sparsity)
    sparsity /= np.average(np.fabs(signal))
    out.write('{} {} {}\n'.format(sparsity, r, nonlin))
    print('{:4.1f}%:  {:5.1f}  {:6.4f}  {:6.4f}'.format(
        100. * t / wav.size, sparsity, r, nonlin))
Beispiel #33
0
            for i in range(1, mx):
                # calculate residual
                R[i,j] = (u_new[i-1,j] + u_old[i+1,j] - \
                     2*(1+lambdasqr)*u_old[i,j] + \
                     lambdasqr*(u_new[i,j-1]+u_old[i,j+1]) )/(2*(1+lambdasqr))
                #
                u_new[i, j] = u_old[i, j] + omega * R[i, j]

        err = np.max(np.abs(u_new - u_old))
        u_old[:] = u_new[:]
        count = count + 1

    return count


res = minimize_scalar(num_iterations, bracket=(1, 2), tol=1e-3)

print(res)

#for n, omega in enumerate(omegas):
#    # set up the solution variables
#    u_old = np.zeros((x.size,y.size))   # u at current time step
#    u_new = np.zeros((x.size,y.size))   # u at next time step
#
#    # intialise the boundary conditions, for both timesteps
#    u_old[1:-1,0] = fB(x[1:-1])
#    u_old[1:-1,-1] = fT(x[1:-1])
#    u_old[0,1:-1] = fL(y[1:-1])
#    u_old[-1,1:-1] = fR(y[1:-1])
#    u_new[:]=u_old[:]
#    count = 1
# read csv
df = pd.read_csv('../Data/UMB_daily_average.csv')
T = df['T']
I = df['I']
D = df['D']
ps = df['ps3']

# parameters
Kc, Vcmax, ca, q, Jmax, z1, z2, R, g1, kxmax, a, L = 460, 31, 400, 0.3, 48, 0.9, 0.9999, 8.314, 50, 7, 1.6, 2
c, p50 = 10, -3

j = 0
for i in range(len(T)):
    Ti, Ii, Di, psi = T[i], I[i], D[i], ps[i]
    pxmin = pxminf(psi, p50)
    pxmax = optimize.minimize_scalar(pxf,
                                     bounds=(pxmin, psi),
                                     method='bounded',
                                     args=(Ti, Ii, Di, psi, Kc, Vcmax, ca, q,
                                           Jmax, z1, z2, R, g1, c, kxmax, p50,
                                           a, L))
    try:
        px = optimize.brentq(pxf,
                             pxmin,
                             pxmax.x,
                             args=(Ti, Ii, Di, psi, Kc, Vcmax, ca, q, Jmax, z1,
                                   z2, R, g1, c, kxmax, p50, a, L))
    except ValueError:
        j += 1
print('{} out of {} days'.format(j, len(T)))
Beispiel #35
0
    def __init__(self, cacheOptTimes=False, staticOptTimes=False, **specs):
        
        #initialize the prototype survey
        SurveySimulation.__init__(self, **specs)

        assert isinstance(staticOptTimes, bool), 'staticOptTimes must be boolean.'
        self.staticOptTimes = staticOptTimes
        self._outspec['staticOptTimes'] = self.staticOptTimes

        assert isinstance(cacheOptTimes, bool), 'cacheOptTimes must be boolean.'
        self._outspec['cacheOptTimes'] = cacheOptTimes


        #some global defs
        self.detmode = filter(lambda mode: mode['detectionMode'] == True, self.OpticalSystem.observingModes)[0]
        self.ohTimeTot = self.Observatory.settlingTime + self.detmode['syst']['ohTime']
        self.maxTime = self.TimeKeeping.missionLife*self.TimeKeeping.missionPortion

        self.constraints = {'type':'ineq',
                            'fun': lambda x: self.maxTime.to(u.d).value - np.sum(x[x*u.d > 0.1*u.s]) - 
                                             np.sum(x*u.d > 0.1*u.s).astype(float)*self.ohTimeTot.to(u.d).value,
                            'jac':lambda x: np.ones(len(x))*-1.}

        self.t0 = None
        if cacheOptTimes:
            #Generate cache Name########################################################################
            cachefname = self.cachefname + 't0'
            
            if os.path.isfile(cachefname):
                self.vprint("Loading cached t0 from %s"%cachefname)
                with open(cachefname, 'rb') as f:
                    self.t0 = pickle.load(f)
                sInds = np.arange(self.TargetList.nStars)
                fZ = np.array([self.ZodiacalLight.fZ0.value]*len(sInds))*self.ZodiacalLight.fZ0.unit
                self.scomp0 = -self.objfun(self.t0.to(u.d).value,sInds,fZ)


        if self.t0 is None:
            #find nominal background counts for all targets in list
            _, Cbs, Csps = self.OpticalSystem.Cp_Cb_Csp(self.TargetList, range(self.TargetList.nStars),  
                    self.ZodiacalLight.fZ0, self.ZodiacalLight.fEZ0, 25.0, self.WAint, self.detmode)

            #find baseline solution with dMagLim-based integration times
            self.vprint('Finding baseline fixed-time optimal target set.')
            t0 = self.OpticalSystem.calc_intTime(self.TargetList, range(self.TargetList.nStars),  
                    self.ZodiacalLight.fZ0, self.ZodiacalLight.fEZ0, self.dMagint, self.WAint, self.detmode)
            comp0 = self.Completeness.comp_per_intTime(t0, self.TargetList, range(self.TargetList.nStars), 
                    self.ZodiacalLight.fZ0, self.ZodiacalLight.fEZ0, self.WAint, self.detmode, C_b=Cbs, C_sp=Csps)

            
            solver = pywraplp.Solver('SolveIntegerProblem',pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
            xs = [ solver.IntVar(0.0,1.0, 'x'+str(j)) for j in range(len(comp0)) ]

            #constraint is x_i*t_i < maxtime
            constraint = solver.Constraint(-solver.infinity(),self.maxTime.to(u.day).value)
            for j,x in enumerate(xs):
                constraint.SetCoefficient(x, t0[j].to(u.day).value + self.ohTimeTot.to(u.day).value)

            #objective is max x_i*comp_i
            objective = solver.Objective()
            for j,x in enumerate(xs):
                objective.SetCoefficient(x, comp0[j])
            objective.SetMaximization()

            cpres = solver.Solve()
            x0 = np.array([x.solution_value() for x in xs])
            self.scomp0 = np.sum(comp0*x0)
            self.t0 = t0

            #now find the optimal eps baseline and use whichever gives you the highest starting completeness
            self.vprint('Finding baseline fixed-eps optimal target set.')
            def totCompfeps(eps):
                compstars,tstars,x = self.inttimesfeps(eps, Cbs.to('1/d').value, Csps.to('1/d').value)
                return -np.sum(compstars*x)
            epsres = minimize_scalar(totCompfeps,method='bounded',bounds = [0,1],options = {'disp':True})
            comp_epsmax,t_epsmax,x_epsmax = self.inttimesfeps(epsres['x'],Cbs.to('1/d').value, Csps.to('1/d').value)
            if np.sum(comp_epsmax*x_epsmax) > self.scomp0:
                x0 = x_epsmax
                self.scomp0 = np.sum(comp_epsmax*x_epsmax) 
                self.t0 = t_epsmax*u.day

            #now optimize the solution
            self.vprint('Optimizing baseline integration times.')
            sInds = np.arange(self.TargetList.nStars)
            fZ = np.array([self.ZodiacalLight.fZ0.value]*len(sInds))*self.ZodiacalLight.fZ0.unit
            bounds = [(0,self.maxTime.to(u.d).value) for i in range(len(sInds))]
            initguess = x0*self.t0.to(u.d).value
            ires = minimize(self.objfun, initguess, jac=self.objfun_deriv, args=(sInds,fZ), 
                    constraints=self.constraints, method='SLSQP', bounds=bounds, options={'maxiter':100,'ftol':1e-4})

            assert ires['success'], "Initial time optimization failed."

            self.t0 = ires['x']*u.d
            self.scomp0 = -ires['fun']

            if cacheOptTimes:
                with open(cachefname,'wb') as f:
                    pickle.dump(self.t0, f)
                self.vprint("Saved cached optimized t0 to %s"%cachefname)
Beispiel #36
0
    def eclipses_since(self, when_utc=None, limit_date=None):
        """
        An iterator that yields all eclipses start and end times between
        when_utc and limit_date.

        The next eclipse with a end strictly after when_utc will be returned,
        possibly the current eclipse.
        The last eclipse returned starts before limit_date, but it can end
        strictly after limit_date.
        No circular orbits are not supported, and will raise NotImplementedError.
        """
        def _get_illumination(t):
            my_start = start + dt.timedelta(seconds=t)
            result = get_satellite_minus_penumbra_verticals(
                self.get_only_position(my_start), my_start)
            return result

        if when_utc is None:
            when_utc = dt.datetime.utcnow()

        orbital_period_s = self.period * 60
        # A third of the orbit period is used as the base window of the search.
        # This window ensures the function get_satellite_minus_penumbra_verticals
        # will not have more than one local minimum (one in the illuminated phase and
        # the other in penumbra).
        base_search_window_s = orbital_period_s / 3
        start = when_utc

        while limit_date is None or start < limit_date:

            # a minimum negative value is aproximatelly the middle point of the eclipse
            minimum_illumination = minimize_scalar(
                _get_illumination,
                bounds=(0, base_search_window_s),
                method="bounded",
                options={"xatol": 1e-2},
            )
            eclipse_center_candidate_delta_s = minimum_illumination.x

            # If found a minimum that is not illuminated, there is an eclipse here
            if _get_illumination(eclipse_center_candidate_delta_s) < 0:
                # The small time interval to search zeros around the center
                # is estimated with the expected eclipse duration (which generally
                # is smaller than expected, and that is the reason of the 1.5 coeficient).
                # Also a minimum of 180 seconds was added because
                # in some cases the estimation is 0 even though there is an eclipse.
                eclipse_duration_estimation_s = self.get_eclipse_duration(
                    start) * 60
                zero_search_window_s = max(180,
                                           1.5 * eclipse_duration_estimation_s)

                # Search now both zeros to get the start and end of the eclipse
                eclipse_start_delta_s = brentq(
                    _get_illumination,
                    eclipse_center_candidate_delta_s - zero_search_window_s,
                    eclipse_center_candidate_delta_s,
                    xtol=1e-2,
                    full_output=False,
                )
                eclipse_end_delta_s = brentq(
                    _get_illumination,
                    eclipse_center_candidate_delta_s,
                    eclipse_center_candidate_delta_s + zero_search_window_s,
                    xtol=1e-2,
                    full_output=False,
                )
                eclipse_start = start + dt.timedelta(
                    seconds=eclipse_start_delta_s)
                eclipse_end = start + dt.timedelta(seconds=eclipse_end_delta_s)
                yield eclipse_start, eclipse_end
                start = eclipse_end + dt.timedelta(
                    seconds=base_search_window_s)
            else:
                start += dt.timedelta(seconds=base_search_window_s)
                          full_output=True)
    if out.converged == True:
        bow_3.append(bow_res)
        bow_1.append(bow_res * np.tan(theta))

#Compute Eq. (22)
ntheta = 100  #number of points to construct curve
theta_vec = np.linspace(0, np.pi / 2, ntheta)
tau_1 = []
tau_3 = []
tau_1.append(0)
tau_3.append(1 - a_z)
u_opt = []
for theta in theta_vec[1:-1]:
    res = minimize_scalar(objective,
                          method='Bounded',
                          bounds=(np.arcsin(a_z), np.pi / 2),
                          args=(theta))
    I, err = dblquad(integrand,
                     -1,
                     1,
                     lambda phi: 0,
                     lambda phi: 2 * np.pi,
                     args=(res.x, theta))
    u_opt.append(res.x)
    tau_3.append(np.cos(theta) * 2 * np.pi / I)
    tau_1.append(np.sin(theta) * 2 * np.pi / I)
    print('Finished angle %s of %s' %
          (np.where(theta_vec == theta)[0][0], ntheta - 2))


#Quantum state boundary
Beispiel #38
0
def correct(profiles):

    menc_fxns   = []
    rdist_names = []
    interp_fxns = []
    ifxn = 0

    r = arange(10,300)

    for profile in profiles:

        base,mod = (profile,None) if ',' not in profile else profile.split(',')

        if   base == 'nfw' :
            menc_fxns += [ lambda rc,t: (log(1.+rc/rs)-1./(rs/rc+1.)) / (log(1.+C) - 1./(1/C + 1)) ]
            rdist_names += ['NFW']
        elif base == 'sis' :
            menc_fxns += [ lambda rc,t: rc/RVIR ]
            rdist_names += ['SIS']
        elif base == 'hern':
            menc_fxns += [ lambda rc,t: ( (1./C+1) / (rs/rc+1) )**2 ]
            rdist_names += ['Hernquist']
        elif base == 'ein':
            C200  = 4.9
            ALPHA = 0.24
            rho = lambda rr: exp(-2./ALPHA * (C200*rr)**ALPHA)
            menc = array([ quad(lambda rr: rho(rr)*rr**2, 0,rend)[0] for rend in r/RVIR ])
            menc /= menc[-1]
            menc = concatenate([[0],menc,[1]])
            interp_fxns += [ interp1d(concatenate([[0],r,[300]]),menc) ] #,fill_value=[0,1],bounds_error=False) ]
            menc_fxns += [ lambda rc,t,i=ifxn: interp_fxns[i](rc) ]
            ifxn += 1
            rdist_names += ['Einasto']
        else:  # we'll try to read menc profile from a data file

            datfn = RDIST_DATDIR+'nenc3d/'+base+'-menc.dat'

            if 'tidal' not in base:
                rr,mm = loadtxt(datfn,unpack=True)
                rr *= 300 if rr[-1]/100 < 1 else 1
                interp_fxns += [ interp1d(rr,mm/mm[-1]) ]
                menc_fxns += [ lambda rc,t,i=ifxn: interp_fxns[i](rc) ]
                ifxn += 1
            else:
                rr,mm = loadtxt(datfn,unpack=True,usecols=(0,3)) # assume they're all severely tidally stripped
                rr *= 300 if rr[-1]/100 < 1 else 1
                interp_fxns += [ interp1d(rr,mm/mm[-1]) ]
                menc_fxns += [ lambda rc,t,i=ifxn: interp_fxns[i](rc) ]
                ifxn += 1
                """
                rts,rnts, mts,mnts = loadtxt(datfn,unpack=True,usecols=(0,1,3,4))
                rts  *= 300 if rts[-1] /100 < 1 else 1
                rnts *= 300 if rnts[-1]/100 < 1 else 1
                interp_fxns += [ interp1d(rts ,mts /mts [-1]) ]  # tidally stripped
                interp_fxns += [ interp1d(rnts,mnts/mnts[-1]) ]  # not tidally stripped
                menc_fxns += [ lambda rc,t,i=ifxn,j=(ifxn+1): (interp_fxns[i] if 'tidal' in t else interp_fxns[j])(rc) ]
                ifxn += 2
                """

            rdist_names += [ base[:].upper() ]

        # read in modification to profile, if provided
        if mod != None:
            datfn = RDIST_DATDIR+'nenc3d/'+mod+'-mod.dat'
            print('reading modification from',datfn)
            rr,mm = loadtxt(datfn,unpack=True)
            rr *= 300 if rr[-1]/100 < 1 else 1
            interp_fxns += [ menc_fxns[len(menc_fxns)-1] ]
            interp_fxns += [ interp1d(rr,mm/mm[-1]) ]
            menc_fxns[-1] = lambda rc,t,i=ifxn: interp_fxns[i](rc,t)*interp_fxns[i+1](rc)
            ifxn += 2

            rdist_names[-1] += '+'+mod

        # solve for radius that encloses half the satellites
        result = minimize_scalar(lambda rr: abs(menc_fxns[-1](rr,1)-0.5), bounds=(10,300), method='Bounded')
        print('for',rdist_names[-1],'r1/2 =',result.x,'kpc')



    # -------------------------------------------------------------------------
    # correction calculation

    crs_tot = [[],[],[]]

    for dwarf_name,dwarf in dwarfs.items():

        if 'classical' in dwarf['type']:  continue

        rc = dmax(dwarf['mv'],MLIMIT_SDSS)
        rc = RVIR if rc > RVIR else rc

        print('{0:<20}  {1:>7} kpc'.format(dwarf_name,round(rc,2)),end=' ')

        for im,maglim in enumerate(['full', MLIMIT_DES, MLIMIT_LSST1]):
            rout = dmax(dwarf['mv'],maglim)
            rout = RVIR if rout > RVIR else rout
            print('| {0:>7} kpc '.format(round(rout,2)),end='')
            crs_tot[im] += [ [ menc(rout,dwarf['type'])/menc(rc,dwarf['type']) for menc in menc_fxns ] ]
            #crs_tot[im] += [ [ crfxn(rc,dwarf['type'])/crfxn(rout,dwarf['type']) for crfxn in crfxns ] ]
            print(' '.join(['{0:<7}'.format(round(cr,3)) for cr in crs_tot[im][-1]]),end=' ')
        print()


    # and print the results
    for iml,maglim in enumerate(['full','DES', 'LSST']):

        print()
        print('MAGLIM',maglim,end=' ')

        if maglim == 'full':  C_OMEGA = AREA_SKY  / AREA_DR8
        if maglim == 'DES' :  C_OMEGA = AREA_DES  / AREA_DR8
        if maglim == 'LSST':  C_OMEGA = AREA_LSST / AREA_DR8

        for ird,name in enumerate(rdist_names):
            crs = [crs_tot[iml][i][ird] for i in range(len(crs_tot[0]))]
            print(name+':',int(round(sum(crs)*C_OMEGA,2)),'total satellites (not including classicals)') # + NCLASSICAL

    return crs_tot, menc_fxns, rdist_names # crs_tot[maglims][dwarfs][profiles]
Beispiel #39
0
    def optimal_t_compressed(self,
                             seq_pair,
                             multiplicity,
                             profiles=False,
                             tol=1e-10):
        """
        Find the optimal distance between the two sequences represented as state_pairs
        or as pair of profiles

        Parameters
        ----------

         seq_pair : state_pair, tuple
            Compressed representation of sequences along a branch, either
            as tuple of state pairs or as tuple of profiles.

         multiplicity : array
            Number of times each state pair in seq_pair appears (if profile==False)

            Number of times an alignment pattern is observed (if profiles==True)

         profiles : bool, default False
            The standard branch length optimization assumes fixed sequences at
            either end of the branch. With profiles==True, optimization is performed
            while summing over all possible states of the nodes at either end of the
            branch. Note that the meaning/format of seq_pair and multiplicity
            depend on the value of :profiles:.

        """
        def _neg_prob(t, seq_pair, multiplicity):
            """
            Probability to observe a child given the the parent state, transition
            matrix, and the time of evolution (branch length).

            Parameters
            ----------

             t : double
                Branch length (time between sequences)

             seq_pair : tuple of profiles
                parent and child sequences

             multiplicity : vector containing the number of times each alignment pattern is observed

            Returns
            -------

             prob : double
                Negative probability of the two given sequences
                to be separated by the time t.
            """
            if profiles:
                res = -1.0 * self.prob_t_profiles(
                    seq_pair, multiplicity, t**2, return_log=True)
                return res + np.exp(t**4 / 10000)
            else:
                return -1.0 * self.prob_t_compressed(
                    seq_pair, multiplicity, t**2, return_log=True)

        try:
            from scipy.optimize import minimize_scalar
            opt = minimize_scalar(_neg_prob,
                                  bounds=[
                                      -np.sqrt(ttconf.MAX_BRANCH_LENGTH),
                                      np.sqrt(ttconf.MAX_BRANCH_LENGTH)
                                  ],
                                  args=(seq_pair, multiplicity),
                                  tol=tol)
            new_len = opt["x"]**2
            if 'success' not in opt:
                opt['success'] = True
                self.logger(
                    "WARNING: the optimization result does not contain a 'success' flag:"
                    + str(opt),
                    4,
                    warn=True)
        except ImportError:
            import scipy
            print('legacy scipy', scipy.__version__)
            from scipy.optimize import fminbound
            new_len = fminbound(_neg_prob,
                                -np.sqrt(ttconf.MAX_BRANCH_LENGTH),
                                np.sqrt(ttconf.MAX_BRANCH_LENGTH),
                                args=(seq_pair, multiplicity))
            new_len = new_len**2
            opt = {'success': True}

        if new_len > .9 * ttconf.MAX_BRANCH_LENGTH:
            self.logger(
                "WARNING: GTR.optimal_t_compressed -- The branch length seems to be very long!",
                4,
                warn=True)

        if opt["success"] != True:
            # return hamming distance: number of state pairs where state differs/all pairs
            new_len = np.sum(multiplicity[
                seq_pair[:, 1] != seq_pair[:, 0]]) / np.sum(multiplicity)

        return new_len
Beispiel #40
0
alpha = 1
n_iter = 5
from scipy.optimize import minimize_scalar
for i in range(n_iter):
    ground.df['Clearsky GHI pvlib'] *= alpha
    ground.calc_all_window_metrics(1, 10, col1='GHI', col2='Clearsky GHI pvlib', 
                                   ratio_label='ratio', abs_ratio_diff_label='abs_diff_ratio', overwrite=True)
    ground2 = cs_detection.ClearskyDetection(ground.df)
    ground2.trim_dates('10-01-2010', '11-01-2010')
    pred = clf.predict(ground2.df[feature_cols].values)
    clear_vals = ground2.df[pred]['GHI']
    clear_model = ground2.df[pred]['Clearsky GHI pvlib']
    prev_alpha = alpha
    def rmse(alpha):
        return np.sqrt(np.mean((clear_vals - alpha*clear_model)**2))
    alpha = minimize_scalar(rmse).x
    if np.isclose(alpha, prev_alpha):
        break
    
    alphatest = groundpred = clf.predict(test.df[feature_cols].values)vis = visualize.Visualizer()vis.add_line_ser(test.df['GHI'], 'GHI')
vis.add_line_ser(test.df['Clearsky GHI pvlib'], 'GHI_cs')
vis.add_circle_ser(test.df[(test.df['sky_status pvlib'] == 0) & (pred)]['GHI'], 'ML clear only')
vis.add_circle_ser(test.df[(test.df['sky_status pvlib'] == 1) & (~pred)]['GHI'], 'PVLib clear only')
vis.add_circle_ser(test.df[(test.df['sky_status pvlib'] == 1) & (pred)]['GHI'], 'ML+PVLib clear only')vis.show()
# ## Statistical Clearsky

# Only making ground predictions using PVLib clearsky model and statistical model.  NSRDB model won't be available to ground measurements.

# In[671]:

Beispiel #41
0
from scipy.optimize import minimize_scalar


def error(w0, w1, x, y_actual):
    y_pred = w0 + w1 * x
    #mse = ((y_actual-y_pred)**2).mean()
    mse = sum((y_actual - y_pred)**2)
    return mse


w0 = 50
x = np.array([1, 2, 3])
y = np.array([52, 54, 56])

res = minimize_scalar(lambda w1: error(w0, w1, x, y), bounds=(-5, 5))

print(res.x)

import numpy as np
from scipy import optimize
x0 = np.array([0.1, 0.1])
fun = lambda x: 0.5 * np.exp(-x[0] * (1 - x[1]))

res = optimize.minimize(fun, x0, method='Nelder-Mead')  # SLSQP, BFGS
print(res)

#min   sum((ydata - f(xdata, params))**2, axis=0)

# x=[eaton_coeff, dtn] to be find using optimization
def bowles_t3_root(tau_3, theta, a_z):
    res = minimize_scalar(bowles_objective,
                          method='Bounded',
                          bounds=(-1, 1),
                          args=(theta, a_z, tau_3))
    return -res.fun - 1
Beispiel #43
0
    def __init__(self,
                 cacheOptTimes=False,
                 staticOptTimes=False,
                 selectionMetric='maxC',
                 Izod='current',
                 maxiter=60,
                 ftol=1e-3,
                 **specs):  #fZminObs=False,

        #initialize the prototype survey
        SurveySimulation.__init__(self, **specs)

        #Calculate fZmax
        self.valfZmax, self.absTimefZmax = self.ZodiacalLight.calcfZmax(
            np.arange(self.TargetList.nStars), self.Observatory,
            self.TargetList, self.TimeKeeping,
            list(
                filter(lambda mode: mode['detectionMode'] == True,
                       self.OpticalSystem.observingModes))[0], self.cachefname)

        assert isinstance(staticOptTimes,
                          bool), 'staticOptTimes must be boolean.'
        self.staticOptTimes = staticOptTimes
        self._outspec['staticOptTimes'] = self.staticOptTimes

        assert isinstance(cacheOptTimes,
                          bool), 'cacheOptTimes must be boolean.'
        self._outspec['cacheOptTimes'] = cacheOptTimes

        assert selectionMetric in [
            'maxC',
            'Izod-Izodmin',
            'Izod-Izodmax',
            '(Izod-Izodmin)/(Izodmax-Izodmin)',
            '(Izod-Izodmin)/(Izodmax-Izodmin)/CIzod',  #(Izod-Izodmin)/(Izodmax-Izodmin)/CIzodmin is simply this but with Izod='fZmin'
            'TauIzod/CIzod',  #TauIzodmin/CIzodmin is simply this but with Izod='fZmin'
            'random',
            'priorityObs'
        ], 'selectionMetric not valid input'  # Informs what selection metric to use
        self.selectionMetric = selectionMetric
        self._outspec['selectionMetric'] = self.selectionMetric

        assert Izod in [
            'fZmin', 'fZ0', 'fZmax', 'current'
        ], 'Izod not valid input'  # Informs what Izod to optimize integration times for [fZmin, fZmin+45d, fZ0, fZmax, current]
        self.Izod = Izod
        self._outspec['Izod'] = self.Izod

        assert isinstance(
            maxiter, int
        ), 'maxiter is not an int'  # maximum number of iterations to optimize integration times for
        assert maxiter >= 1, 'maxiter must be positive real'
        self.maxiter = maxiter
        self._outspec['maxiter'] = self.maxiter

        assert isinstance(
            ftol, float
        ), 'ftol must be boolean'  # tolerance to place on optimization
        assert ftol > 0, 'ftol must be positive real'
        self.ftol = ftol
        self._outspec['ftol'] = self.ftol

        #some global defs
        self.detmode = list(
            filter(lambda mode: mode['detectionMode'] == True,
                   self.OpticalSystem.observingModes))[0]
        self.ohTimeTot = self.Observatory.settlingTime + self.detmode['syst'][
            'ohTime']  # total overhead time per observation
        self.maxTime = self.TimeKeeping.missionLife * self.TimeKeeping.missionPortion  # total mission time

        self.constraints = {
            'type':
            'ineq',
            'fun':
            lambda x: self.maxTime.to(u.d).value - np.sum(x[
                x * u.d > 0.1 * u.s]) -  #maxTime less sum of intTimes
            np.sum(x * u.d > 0.1 * u.s).astype(float) * self.ohTimeTot.to(u.d).
            value,  # sum of True -> goes to 1 x OHTime
            'jac':
            lambda x: np.ones(len(x)) * -1.
        }

        self.t0 = None
        if cacheOptTimes:
            #Generate cache Name########################################################################
            cachefname = self.cachefname + 't0'

            if os.path.isfile(cachefname):
                self.vprint("Loading cached t0 from %s" % cachefname)
                with open(cachefname, 'rb') as f:
                    try:
                        self.t0 = pickle.load(f)
                    except UnicodeDecodeError:
                        self.t0 = pickle.load(f, encoding='latin1')
                sInds = np.arange(self.TargetList.nStars)
                fZ = np.array([self.ZodiacalLight.fZ0.value] *
                              len(sInds)) * self.ZodiacalLight.fZ0.unit
                self.scomp0 = -self.objfun(self.t0.to('day').value, sInds, fZ)

        if self.t0 is None:
            #1. find nominal background counts for all targets in list
            dMagint = 25.0  # this works fine for WFIRST
            _, Cbs, Csps = self.OpticalSystem.Cp_Cb_Csp(
                self.TargetList, np.arange(self.TargetList.nStars),
                self.ZodiacalLight.fZ0, self.ZodiacalLight.fEZ0, dMagint,
                self.WAint, self.detmode)

            #find baseline solution with dMagLim-based integration times
            #3.
            t0 = self.OpticalSystem.calc_intTime(
                self.TargetList, np.arange(self.TargetList.nStars),
                self.ZodiacalLight.fZ0, self.ZodiacalLight.fEZ0, self.dMagint,
                self.WAint, self.detmode)
            #4.
            comp0 = self.Completeness.comp_per_intTime(
                t0,
                self.TargetList,
                np.arange(self.TargetList.nStars),
                self.ZodiacalLight.fZ0,
                self.ZodiacalLight.fEZ0,
                self.WAint,
                self.detmode,
                C_b=Cbs,
                C_sp=Csps)

            #### 5. Formulating MIP to filter out stars we can't or don't want to reasonably observe
            solver = pywraplp.Solver(
                'SolveIntegerProblem', pywraplp.Solver.
                CBC_MIXED_INTEGER_PROGRAMMING)  # create solver instance
            xs = [
                solver.IntVar(0.0, 1.0, 'x' + str(j))
                for j in np.arange(len(comp0))
            ]  # define x_i variables for each star either 0 or 1
            self.vprint('Finding baseline fixed-time optimal target set.')

            #constraint is x_i*t_i < maxtime
            constraint = solver.Constraint(
                -solver.infinity(),
                self.maxTime.to(u.day).value
            )  #hmmm I wonder if we could set this to 0,maxTime
            for j, x in enumerate(xs):
                constraint.SetCoefficient(
                    x, t0[j].to('day').value + self.ohTimeTot.to(
                        u.day).value)  # this forms x_i*(t_0i+OH) for all i

            #objective is max x_i*comp_i
            objective = solver.Objective()
            for j, x in enumerate(xs):
                objective.SetCoefficient(x, comp0[j])
            objective.SetMaximization()

            #solver.EnableOutput()# this line enables output of the CBC MIXED INTEGER PROGRAM (Was hard to find don't delete)
            solver.SetTimeLimit(5 * 60 *
                                1000)  #time limit for solver in milliseconds
            cpres = solver.Solve()  # actually solve MIP
            x0 = np.array([x.solution_value()
                           for x in xs])  # convert output solutions

            self.scomp0 = np.sum(comp0 * x0)  # calculate sum Comp from MIP
            self.t0 = t0  # assign calculated t0

            #Observation num x0=0 @ dMagint=25 is 1501
            #Observation num x0=0 @ dMagint=30 is 1501...

            #now find the optimal eps baseline and use whichever gives you the highest starting completeness
            self.vprint('Finding baseline fixed-eps optimal target set.')

            def totCompfeps(eps):
                compstars, tstars, x = self.inttimesfeps(
                    eps,
                    Cbs.to('1/d').value,
                    Csps.to('1/d').value)
                return -np.sum(compstars * x)

            #Note: There is no way to seed an initial solution to minimize scalar
            #0 and 1 are supposed to be the bounds on epsres. I could define upper bound to be 0.01, However defining the bounds to be 5 lets the solver converge
            epsres = minimize_scalar(
                totCompfeps,
                method='bounded',
                bounds=[0, 7],
                options={
                    'disp': 3,
                    'xatol': self.ftol,
                    'maxiter': self.maxiter
                })  #adding ftol for initial seed. could be different ftol
            #https://docs.scipy.org/doc/scipy/reference/optimize.minimize_scalar-bounded.html#optimize-minimize-scalar-bounded
            comp_epsmax, t_epsmax, x_epsmax = self.inttimesfeps(
                epsres['x'],
                Cbs.to('1/d').value,
                Csps.to('1/d').value)
            if np.sum(comp_epsmax * x_epsmax) > self.scomp0:
                x0 = x_epsmax
                self.scomp0 = np.sum(comp_epsmax * x_epsmax)
                self.t0 = t_epsmax * x_epsmax * u.day

            ##### Optimize the baseline solution
            self.vprint('Optimizing baseline integration times.')
            sInds = np.arange(self.TargetList.nStars)
            if self.Izod == 'fZ0':  # Use fZ0 to calculate integration times
                fZ = np.array([self.ZodiacalLight.fZ0.value] *
                              len(sInds)) * self.ZodiacalLight.fZ0.unit
            elif self.Izod == 'fZmin':  # Use fZmin to calculate integration times
                fZ = self.valfZmin[sInds]
            elif self.Izod == 'fZmax':  # Use fZmax to calculate integration times
                fZ = self.valfZmax[sInds]
            elif self.Izod == 'current':  # Use current fZ to calculate integration times
                fZ = self.ZodiacalLight.fZ(
                    self.Observatory, self.TargetList, sInds,
                    self.TimeKeeping.currentTimeAbs.copy() +
                    np.zeros(self.TargetList.nStars) * u.d, self.detmode)

            maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife = self.TimeKeeping.get_ObsDetectionMaxIntTime(
                self.Observatory, self.detmode,
                self.TimeKeeping.currentTimeNorm.copy())
            maxIntTime = min(maxIntTimeOBendTime, maxIntTimeExoplanetObsTime,
                             maxIntTimeMissionLife)  # Maximum intTime allowed
            bounds = [(0, maxIntTime.to(u.d).value)
                      for i in np.arange(len(sInds))]
            initguess = x0 * self.t0.to(u.d).value
            self.save_initguess = initguess

            #While we use all sInds as input, theoretically, this can be solved faster if we use the following lines:
            #sInds = np.asarray([sInd for sInd in sInds if np.bool(x0[sInd])])
            #bounds = [(0,maxIntTime.to(u.d).value) for i in np.arange(len(sInds))]
            #and use initguess[sInds], fZ[sInds], and self.t0[sInds].
            #There was no noticable performance improvement
            ires = minimize(self.objfun,
                            initguess,
                            jac=self.objfun_deriv,
                            args=(sInds, fZ),
                            constraints=self.constraints,
                            method='SLSQP',
                            bounds=bounds,
                            options={
                                'maxiter': self.maxiter,
                                'ftol': self.ftol,
                                'disp': True
                            })  #original method

            assert ires['success'], "Initial time optimization failed."

            self.t0 = ires['x'] * u.d
            self.scomp0 = -ires['fun']

            if cacheOptTimes:
                with open(cachefname, 'wb') as f:
                    pickle.dump(self.t0, f)
                self.vprint("Saved cached optimized t0 to %s" % cachefname)

        #Redefine filter inds
        self.intTimeFilterInds = np.where(
            (self.t0.value > 0.) *
            (self.t0.value <= self.OpticalSystem.intCutoff.value) > 0.)[
                0]  # These indices are acceptable for use simulating
Beispiel #44
0
    def findvpath(self, p):
        """ Given prices, transfers, benefits and tax rates over one's life-cycle, 
        value and decision functions are calculated ***BACKWARD*** """
        [r, w, b, tr, tw, tb, Tr, qh, qr] = p
        T, aa, hh, aN, hN, trx = self.T, self.aa, self.hh, self.aN, self.hN, self.trx
        psi, tcost, beta, gamma, sp = self.psi, self.tcost, self.beta, self.gamma, self.sp
        ltv, dti = self.ltv, self.dti
        # y = -1 : the oldest generation
        for h0 in range(hN):
            for i in range(aN):
                cash = self.budget(-1, 0, h0, 0, aa[i], 0, p)
                self.co[-1, h0, 0,
                        i] = (cash + qr[-1] * (hh[h0] + gamma)) / (1 + psi)
                self.ro[-1, h0, 0,
                        i] = (cash * psi - qr[-1] *
                              (hh[h0] + gamma)) / ((1 + psi) * qr[-1])
                self.v[-1, h0, 0,
                       i] = self.util(self.co[-1, h0, 0, i],
                                      self.ro[-1, h0, 0, i] + hh[h0])
            self.vtilde[-1][h0][0] = interp1d(aa,
                                              self.v[-1, h0, 0],
                                              kind='cubic')
        # y = -2, -3,..., -60
        for y in range(-2, -(T + 1), -1):
            xx = array([0]) if y >= -(self.R) else self.xx
            nxx = array([0]) if y >= -(self.R + 1) else self.xx
            trx = array([[1]]) if y >= -(self.R) else array(
                [[1], [1]]) if y == -(self.R + 1) else self.trx
            for x0 in range(len(xx)):
                income = Tr[y] + b[y] if y >= -self.R else Tr[y] + (
                    1 - tw[y] - tb[y]) * w[y] * self.ef[y] * xx[x0]
                mdti = (aa + income * dti >= 0).argmax()  # DTI constraint
                # print 'dit',income*0.3,mdti,aa[mdti]
                for h0 in range(hN):
                    at = array([[0 for i in range(aN)] for h in range(hN)],
                               dtype=float)
                    ct = array([[0 for i in range(aN)] for h in range(hN)],
                               dtype=float)
                    rt = array([[0 for i in range(aN)] for h in range(hN)],
                               dtype=float)
                    vt = array([[float('-inf') for i in range(aN)]
                                for h in range(hN)],
                               dtype=float)
                    casht = array([[0 for i in range(aN)] for h in range(hN)],
                                  dtype=float)
                    for h1 in range(hN):
                        mltv = (aa + self.ltv * qh[y] * hh[h1] >=
                                0).argmax()  # LTV constraint
                        m0 = max(0, mltv, mdti)
                        # m0 = 0
                        for i in range(aN):  # l = 0, 1, ..., 50
                            m = max(0, m0)  # Rch91v.g uses m = max(0, m0-1)
                            m0, a0, b0, c0 = self.GetBracket(
                                y, x0, h0, h1, i, m, p)
                            if a0 == b0:
                                at[h1, i] = aa[m0]
                            elif b0 == c0:
                                at[h1, i] = self.aH
                            else:

                                def objfn(
                                    a1
                                ):  # Define objective function for optimal a'
                                    return -self.findv(y, x0, h0, h1, aa[i],
                                                       a1, p)

                                result = minimize_scalar(objfn,
                                                         bracket=(a0, b0, c0),
                                                         method='Golden')
                                at[h1, i] = result.x
                            # Compute consumption, rent and house
                            # print y, x0, h0, h1, aa[i], at[h1,i]
                            # print 'casht',casht[h1,i]
                            # print 'budget',self.budget(y, x0, h0, h1, aa[i], at[h1,i], p)

                            casht[h1, i] = self.budget(y, x0, h0, h1, aa[i],
                                                       at[h1, i], p)
                            ct[h1, i] = (casht[h1, i] + qr[y] *
                                         (hh[h0] + gamma)) / (1 + psi)
                            rt[h1,
                               i] = (casht[h1, i] * psi - qr[y] *
                                     (hh[h0] + gamma)) / ((1 + psi) * qr[y])
                            # print h1,xN[y],y, x0,trx[-2,0,0],trx[-2,0,1],trx[-2,0,1],trx[-2,0,1], [trx[y,x0,x1] for x1 in range(xN[y])]
                            #[self.vtilde[y+1][h1][x1](at[h1,i])*trx[y,x0,x1] for x1 in range(xN[y])]
                            # print y, at[h1,i], trx, xx
                            vt[h1,i] = self.util(ct[h1,i],rt[h1,i]+hh[h0]) \
                                        + beta*sp[y]*sum([self.vtilde[y+1][h1][x1](at[h1,i])*trx[x0,x1] for x1 in range(len(nxx))])
                    for i in range(aN):
                        h1 = vt[:, i].argmax()
                        self.v[y, h0, x0, i] = vt[h1, i]
                        self.co[y, h0, x0, i] = ct[h1, i]
                        self.ro[y, h0, x0, i] = rt[h1, i]
                        self.ho[y, h0, x0, i] = h1
                        self.ao[y, h0, x0, i] = at[h1, i]
                        cash = casht[h1, i]
                    ai = [aN / 4, aN * 2 / 4, aN * 3 / 4, aN - 1]
                    if y % 15 == 0:
                        print '------------'
                        print 'y=', y, 'inc=%2.2f' % (income), 'h0=', h0
                        for i in ai:
                            print 'ltv=%2.2f'%(hh[self.ho[y,h0,x0,i]]*qh[y]*self.ltv),'dti=%2.2f'%(income*self.dti),\
                                    'a0=%2.2f'%(aa[i]),'h1=%2.0f'%(self.ho[y,h0,x0,i]),\
                                    'a1=%2.2f'%(self.ao[y,h0,x0,i]),\
                                    'c0=%2.2f'%(self.co[y,h0,x0,i]),'r0=%2.2f'%(self.ro[y,h0,x0,i])
                    self.vtilde[y][h0][x0] = interp1d(aa,
                                                      self.v[y, h0, x0],
                                                      kind='cubic')
Beispiel #45
0
def update_R(rep,
             center,
             solver='minimize_scalar',
             scalar_method='brent',
             lp_obj='primal',
             tol=0.001,
             **kwargs):
    """
    Function to update R while leaving the network parameters and center c fixed in a block coordinate optimization.
    Using scipy.optimize.minimize_scalar or linear programming of cvxopt.

    solver: should be either "minimize_scalar" (default) or "lp" (linear program)
    scalar_method: the optimization method used in minimize_scalar ('brent', 'bounded', or 'golden')
    lp_obj: should be either "primal" (default) or "dual"
    """

    assert solver in ("minimize_scalar", "lp")

    if solver == "minimize_scalar":

        from scipy.optimize import minimize_scalar

        assert scalar_method in ("brent", "bounded", "golden")

        print("Updating R with the {} method...".format(scalar_method))

        n, d = rep.shape
        dist = np.sum((rep - center)**2, axis=1, dtype=np.float32)

        # define deep SVDD objective function in R
        def f(x):
            return (x**2 + (1 / (Cfg.nu.get_value() * n)) * np.sum(
                np.max(np.column_stack((np.zeros(n), dist - x**2)), axis=1),
                dtype=np.float32))

        # get lower and upper bounds around the (1-nu)-th quantile of distances
        bracket = None
        bounds = None

        upper_idx = int(np.max((np.floor(n * Cfg.nu.get_value() * 0.1), 1)))
        lower_idx = int(np.min((np.floor(n * Cfg.nu.get_value() * 1.1), n)))
        sort_idx = dist.argsort()
        upper = dist[sort_idx][-upper_idx]
        lower = dist[sort_idx][-lower_idx]

        if scalar_method in ("brent", "golden"):
            bracket = (lower, upper)

        elif scalar_method == "bounded":
            bounds = (lower, upper)

        # solve for R
        res = minimize_scalar(f,
                              bracket=bracket,
                              bounds=bounds,
                              method=scalar_method)

        # Get new R
        R = res.x

    elif solver == "lp":

        from cvxopt import matrix
        from cvxopt.solvers import lp

        assert lp_obj in ("primal", "dual")

        print("Updating R on the {} objective...".format(lp_obj))

        n, d = rep.shape

        # Solve either primal or dual objective
        if lp_obj == "primal":

            # Define LP
            c = matrix(
                np.append(np.ones(1),
                          (1 / (Cfg.nu.get_value() * n)) * np.ones(n),
                          axis=0).astype(np.double))
            G = matrix(-np.concatenate(
                (np.concatenate((np.ones(n).reshape(n, 1), np.eye(n)), axis=1),
                 np.concatenate(
                     (np.zeros(n).reshape(n, 1), np.eye(n)), axis=1)),
                axis=0).astype(np.double))
            h = matrix(
                np.append(-np.sum((rep - center)**2, axis=1),
                          np.zeros(n),
                          axis=0).astype(np.double))

            # Solve LP
            sol = lp(c, G, h)['x']

            # Get new R
            R = np.array(sol).reshape(n + 1).astype(np.float32)[0]

        elif lp_obj == "dual":

            # Define LP
            c = matrix((np.sum((rep - center)**2, axis=1)).astype(np.double))
            G = matrix((np.concatenate((np.eye(n), -np.eye(n)),
                                       axis=0)).astype(np.double))
            h = matrix((np.concatenate(
                ((1 / (Cfg.nu.get_value() * n)) * np.ones(n), np.zeros(n)),
                axis=0)).astype(np.double))
            A = matrix((np.ones(n)).astype(np.double)).trans()
            b = matrix(1, tc='d')

            # Solve LP
            sol = lp(c, G, h, A, b)['x']
            a = np.array(sol).reshape(n)

            # Recover R (using the specified numeric tolerance on the range)
            n_svs = 0  # number of support vectors
            while n_svs == 0:
                lower = tol * (1 / (Cfg.nu.get_value() * n))
                upper = (1 - tol) * (1 / (Cfg.nu.get_value() * n))
                idx_svs = (a > lower) & (a < upper)
                n_svs = np.sum(idx_svs)
                tol /= 10  # decrease tolerance if there are still no support vectors found

            R = np.median(np.array(c).reshape(n)[idx_svs]).astype(np.float32)

    return R
Beispiel #46
0
kxmax = 7
p50 = -5
ps = -1
g1 = 10
f = lambda px: pxf(px,
                   T,
                   I,
                   D,
                   ps=ps,
                   Kc=460,
                   Vcmax=30,
                   ca=400,
                   q=0.3,
                   Jmax=80,
                   z1=0.9,
                   z2=0.9999,
                   R=8.314,
                   g1=g1,
                   c=30,
                   kxmax=kxmax,
                   p50=p50,
                   a=1.6,
                   L=1)
x_min = pxminf(ps, p50)
x_max = optimize.minimize_scalar(f, bounds=(x_min, ps), method='bounded').x
x = np.linspace(x_min, x_max, 10)
y = f(x)

plt.plot(y)
plt.axhline(y=0, color='red')
    else:
        dt = 1
        #dt = mdt * 30

    dt = 5

    bins = np.arange(all_time[cn][0], all_time[cn][-1] + 1, dt)

    t_bins = np.digitize(all_time[cn], bins) - 1


    res = minimize_scalar(min_smo, 
                          bounds=tuple(smooth_bnds), 
                          method='bounded', 
                          #method='golden', 
                          args=(all_time[cn], 
                              all_flux[cn], 
                              weights, 
                              transit_idx, 
                              fit_sds, 
                              sigma, vs, t_bins))


    res.x = res.x / vs

    print(res)

    bsmo = res.x

    # for smo in np.linspace(1e-5, 1-1e-5, 10):

    #     min_smo(smo * vs, all_time[cn], 
        J_tot += J_val

        data_vol[material_]['i_f_opt'][i1] = i_f

    print(data_vol[material_]['i_f_opt'], p_f, J_tot)
    return J_tot


for material_ in materials:

    data_vol[material_]['i_f_opt'] = np.zeros(
        len(data_vol[material_]['UV_levels']))

    res2 = optimize.minimize_scalar(cost_fn,
                                    args=(material_),
                                    bounds=(0.25, 0.90),
                                    method='bounded')
    data_vol[material_]['p_opt'] = res2.x
    data_vol[material_]['D_f'] = np.log2(res2.x * 8)

#%% Fit the three materials

for i1, material_ in enumerate(materials):

    p_opt = data_vol[material_]['p_opt']

    for i2, UV_level_ in enumerate(data_vol[material_]['UV_levels']):

        time_ = data_vol[material_]['i_f_opt'][i2]
        color_ = cmap(np.where(UV_level_ == np.array([0, 2, 6, 12]))[0][0])
        numIter = maxit
        print(numIter)

        sampRate = S / float(ntrain)
        """ privacy budget calculation """
        # (1) to set the same level of burned privacy, we first calculate MA composition
        total_eps_MA = cal_pri.moments_accountant(sigma, totDel, sampRate,
                                                  numIter)
        c2 = 2 * np.log(1.25 / del_iter)
        eps_iter = np.sqrt(c2) / sigma
        budget_MA = [eps_iter, del_iter, total_eps_MA]
        print(budget_MA)
        """save results"""
        method = 'privacy_budget_MA_S=%s_K=%s_sigma=%s' % (S, K, int(sigma))
        # np.save(method + '.npy', budget_MA)
        scipy.io.savemat(method, dict(budget_MA=budget_MA))

        # (2) strong composition
        res = minimize_scalar(cal_pri.strong_composition,
                              bounds=(0, 50),
                              args=(total_eps_MA, totDel, numIter, sampRate,
                                    del_iter),
                              method='bounded')
        eps_iter = res.x
        budget_SC = [eps_iter, del_iter, total_eps_MA]
        print(budget_SC)
        """save results"""
        method = 'privacy_budget_SC_S=%s_K=%s_sigma=%s' % (S, K, int(sigma))
        # np.save(method + '.npy', budget_SC)
        scipy.io.savemat(method, dict(budget_SC=budget_SC))
Beispiel #50
0
def calculate_residuals(args_list):
    def calc_exp_bounds(t_sim, t_exp):
        t_bounds = [max([t_sim[0],
                         t_exp[0]])]  # Largest initial time in SIM and Exp
        t_bounds.append(min([t_sim[-1],
                             t_exp[-1]]))  # Smallest final time in SIM and Exp
        # Values within t_bounds
        exp_bounds = np.where(
            np.logical_and((t_exp >= t_bounds[0]), (t_exp <= t_bounds[1])))[0]

        return exp_bounds

    def time_adjust_func(t_offset,
                         t_adjust,
                         t_sim,
                         obs_sim,
                         t_exp,
                         obs_exp,
                         weights,
                         loss_alpha=2,
                         loss_c=1,
                         scale='Linear',
                         DoF=1,
                         verbose=False):

        t_sim_shifted = t_sim + t_offset + t_adjust

        # Compare SIM Density Grad vs. Experimental
        exp_bounds = calc_exp_bounds(t_sim_shifted, t_exp)
        t_exp, obs_exp, weights = t_exp[exp_bounds], obs_exp[
            exp_bounds], weights[exp_bounds]

        f_interp = CubicSpline(t_sim_shifted.flatten(), obs_sim.flatten())
        obs_sim_interp = f_interp(t_exp)

        if scale == 'Linear':
            resid = np.subtract(obs_exp, obs_sim_interp)
        elif scale == 'Log':
            ind = np.argwhere(((obs_exp != 0.0) & (obs_sim_interp != 0.0)))
            weights = weights[ind].flatten()
            m = np.divide(obs_exp[ind], obs_sim_interp[ind])
            resid = np.log10(np.abs(m)).flatten()

        resid_outlier = outlier(resid, a=loss_alpha, c=loss_c, weights=weights)
        loss = generalized_loss_fcn(resid, a=loss_alpha, c=resid_outlier)

        loss_sqr = loss**2
        wgt_sum = weights.sum()
        N = wgt_sum - DoF
        if N <= 0:
            N = wgt_sum
        stderr_sqr = (loss_sqr * weights).sum() / N
        chi_sqr = loss_sqr / stderr_sqr
        #loss_scalar = (chi_sqr*weights).sum()
        std_resid = chi_sqr**(1 / 2)
        loss_scalar = np.average(std_resid, weights=weights)

        if verbose:
            output = {
                'chi_sqr': chi_sqr,
                'resid': resid,
                'resid_outlier': resid_outlier,
                'loss': loss_scalar,
                'weights': weights,
                'obs_sim_interp': obs_sim_interp
            }
            return output
        else:  # needs to return single value for optimization
            return loss_scalar

    def calc_density(x, data, dim=1):
        stdev = np.std(data)
        [q1, q3] = weighted_quantile(data, [0.25, 0.75])
        iqr = q3 - q1  # interquartile range
        A = np.min([stdev, iqr / 1.34
                    ]) / stdev  # bandwidth is multiplied by std of sample
        bw = 0.9 * A * len(data)**(-1. / (dim + 4))

        return stats.gaussian_kde(data, bw_method=bw)(x)

    def OoM(x):
        if not isinstance(x, np.ndarray):
            x = np.array(x)
        x[x == 0] = 1  # if zero, make OoM 0
        return np.floor(np.log10(np.abs(x)))

    var, coef_opt, x, shock = args_list
    mech = mpMech['obj']

    # Optimization Begins, update mechanism
    update_mech_coef_opt(mech, coef_opt, x)

    T_reac, P_reac, mix = shock['T_reactor'], shock['P_reactor'], shock[
        'thermo_mix']

    SIM_kwargs = {
        'u_reac': shock['u2'],
        'rho1': shock['rho1'],
        'observable': shock['observable'],
        't_lab_save': None,
        'sim_int_f': var['sim_interp_factor'],
        'ODE_solver': var['ode_solver'],
        'rtol': var['ode_rtol'],
        'atol': var['ode_atol']
    }

    if '0d Reactor' in var['name']:
        SIM_kwargs['solve_energy'] = var['solve_energy']
        SIM_kwargs['frozen_comp'] = var['frozen_comp']

    SIM, verbose = mech.run(var['name'], var['t_end'], T_reac, P_reac, mix,
                            **SIM_kwargs)
    ind_var, obs = SIM.independent_var[:, None], SIM.observable[:, None]

    weights = shock['weights_trim']
    obs_exp = shock['exp_data_trim']

    if not np.any(var['t_unc']):
        t_unc = 0
    else:
        t_unc_OoM = np.mean(OoM(
            var['t_unc']))  # Do at higher level? (computationally efficient)
        # calculate time adjust with mse (loss_alpha = 2, loss_c =1)
        time_adj_decorator = lambda t_adjust: time_adjust_func(
            shock['time_offset'],
            t_adjust * 10**t_unc_OoM,
            ind_var,
            obs,
            obs_exp[:, 0],
            obs_exp[:, 1],
            weights,
            scale=var['resid_scale'],
            DoF=len(coef_opt))

        res = minimize_scalar(time_adj_decorator,
                              bounds=var['t_unc'] / 10**t_unc_OoM,
                              method='bounded')
        t_unc = res.x * 10**t_unc_OoM

    output = time_adjust_func(shock['time_offset'],
                              t_unc,
                              ind_var,
                              obs,
                              obs_exp[:, 0],
                              obs_exp[:, 1],
                              weights,
                              loss_alpha=var['loss_alpha'],
                              loss_c=var['loss_c'],
                              scale=var['resid_scale'],
                              DoF=len(coef_opt),
                              verbose=True)

    output['shock'] = shock
    output['independent_var'] = ind_var
    output['observable'] = obs

    plot_stats = True
    if plot_stats:
        x = np.linspace(output['resid'].min(), output['resid'].max(), 300)
        density = calc_density(x, output['resid'],
                               dim=1)  #kernel density estimation
        output['KDE'] = np.column_stack((x, density))

    return output
Beispiel #51
0
    def get_eps(self, delta):  # minimize over \lambda
        if not self.flag:
            self.build_zeroth_oracle()
            self.flag = True

        if delta < 0 or delta > 1:
            print("Error! delta is a probability and must be between 0 and 1")
        if delta == 0:
            return self.RDP_inf
        else:

            def fun(x):  # the input the RDP's \alpha
                if x <= 1:
                    return np.inf
                else:
                    return np.log(1 / delta) / (x - 1) + self.evalRDP(x)

            def fun_int(i):  # the input is RDP's \alpha in integer
                if i <= 1 | i >= len(self.RDPs_int):
                    return np.inf
                else:
                    return np.log(1 / delta) / (i - 1) + self.RDPs_int[i - 1]

            # When do we have computational constraints?
            # Only when we have subsampled items.

            # First check if the forward difference is positive at self.m, or if it is infinite
            while (self.m < self.m_max) and (not np.isposinf(fun(
                    self.m))) and (fun_int(self.m - 1) - fun_int(self.m - 2) <
                                   0):
                # If so, double m, expand logBimomC until the forward difference is positive

                if self.flag_subsample:

                    # The following line is m^2 time.
                    self.logBinomC = utils.get_binom_coeffs(self.m * 2 + 1)

                    # Update deltas_caches
                    for key, val in self.deltas_cache.items():
                        if type(key) is tuple:
                            func_tmp = key[0]
                        else:
                            func_tmp = key
                        cgf = lambda x: x * func_tmp(x + 1)
                        deltas, signs_deltas = utils.get_forward_diffs(
                            cgf, self.m * 2)

                        self.deltas_cache[key] = [deltas, signs_deltas]

                new_alphas = range(self.m + 1, self.m * 2 + 1, 1)
                self.alphas = np.concatenate(
                    (self.alphas, np.array(new_alphas)))  # array of integers
                self.m = self.m * 2

            mm = np.max(self.alphas)

            rdp_int_new = np.zeros_like(self.alphas, float)

            for key, val in self.cache.items():
                idx = self.idxhash[key]
                rdp = self.RDPs[idx]
                newarray = np.zeros_like(self.alphas, float)
                for j in range(2, mm + 1, 1):
                    newarray[j - 1] = rdp(1.0 * j)
                newarray[0] = newarray[1]
                coeff = self.coeffs[idx]
                rdp_int_new += newarray * coeff
                self.cache[key] = newarray

            self.RDPs_int = rdp_int_new

            # # update the integer CGF and the cache for each function
            # rdp_int_new = np.zeros_like(self.RDPs_int)
            # for key,val in self.cache.items():
            #     idx = self.idxhash[key]
            #     rdp = self.RDPs[idx]
            #     newarray = np.zeros_like(self.RDPs_int)
            #     for j in range(self.m):
            #         newarray[j] = rdp(1.0*(j+self.m+1))
            #
            #     coeff = self.coeffs[idx]
            #     rdp_int_new += newarray * coeff
            #     self.cache[key] = np.concatenate((val, newarray))
            #
            # # update the corresponding quantities
            # self.RDPs_int = np.concatenate((self.RDPs_int, rdp_int_new))

            #self.m = self.m*2

            bestint = np.argmin(
                np.log(1 / delta) /
                (self.alphas[1:] - 1) + self.RDPs_int[1:]) + 1

            if bestint == self.m - 1:
                if self.verbose:
                    print('Warning: Reach quadratic upper bound: m_max.')
                # In this case, we matches the maximum qudaratic upper bound
                # Fix it by calling O(1) upper bounds and do logarithmic search
                cur = fun(bestint)
                while (not np.isposinf(cur)
                       ) and fun(bestint - 1) - fun(bestint - 2) < -1e-8:
                    bestint = bestint * 2
                    cur = fun(bestint)
                    if bestint > self.m_lin_max and self.approx == True:
                        print('Warning: Reach linear upper bound: m_lin_max.')
                        return cur

                results = minimize_scalar(fun,
                                          method='Bounded',
                                          bounds=[self.m - 1, bestint + 2],
                                          options={'disp': False})
                if results.success:
                    return results.fun
                else:
                    raise RuntimeError(
                        f"Optimal RDP order not found: {results.message}")
                #return fun(bestint)

            if bestint == 0:
                if self.verbose:
                    print('Warning: Smallest alpha = 1.')

            # find the best integer alpha.
            bestalpha = self.alphas[bestint]

            results = minimize_scalar(fun,
                                      method='Bounded',
                                      bounds=[bestalpha - 1, bestalpha + 1],
                                      options={'disp': False})
            # the while loop above ensures that bestint+2 is at most m, and also bestint is at least 0.
            if results.success:
                return results.fun
            else:
                # There are cases when certain \delta is not feasible.
                # For example, let p and q be uniform the privacy R.V. is either 0 or \infty and unless all \infty
                # events are taken cared of by \delta, \epsilon cannot be < \infty
                return np.inf
Beispiel #52
0
def optFrog(t,
            Et,
            hFunc,
            alpha=0.,
            tLim=(-np.inf, np.inf, 1),
            wLim=(0., np.inf, 1),
            disp=True):
    r"""Time-frequency relolution optimized spectrogram for time-domain
    analytic signal.

    Compute an optimized analytic signal spectrogram for time-domain analytic
    signal. The resulting time-frequency representation allows to interpret the
    time-frequency characteristics of the underlying signal [1]_.  The obtained
    spectrograms are optimal in the sense that they use an optimal parameter
    setting for the user supplied window function that minimizes the integrated
    absolute error between the intensities per unit time and frequancy,
    obtained from the initial analytic signal, and the normalized marginals,
    computed from the spectrogram.

    Parameters
    ----------
    t : array_like
        Array containing time samples.
    Et : array_like
        Array containing analytic signal in time-domain.
    hFunc : object
        Custom window function for short-time fourier transform.
    alpha : float
        Parameter allowing to adjust relative importance of time or frequency
        resolution (default alpha=0.0).
    tLim : tuple(float, float, int)
        Boundary conditions for filtering and slicing of time samples in the
        form (tMin, tMax, mt), specifying lower bound for time samples, upper
        bound for time samples and an integer for filtering every mt-th time
        sample.
    wLim : tuple(float, float, int)
        Boundary conditions for filtering and slicing of time samples in the
        form (wMin, wMax, mw), specifying lower bound for frequency samples,
        upper bound for frequency samples and an integer for filtering every
        mw-th frequency sample.
    disp : bool
        Set to True to display convergece info (default: True).

    Returns
    -------
    res : Results
        The spectrogram ``Results'' data structure with attributes ``tau'' the
        delay time samples for the spectrogram, ``w'' the angular frequency
        samples for the spectrogram, ``P'' the analytic signal spectrogram,
        ``IAE1'' the integrated absolute error of the estimated time marginal,
        and, ``IAE2'' the integrated absolute error of the estimated frequency
        marginal.

    Notes
    -----
    The optimization procedure uses the function vanillaFrog() for the
    calculation of the spectrograms.

    As discussed in Ref. [6]_, if a window function approximates one marginal
    closely but the other one poorly, the spectrogram might appear distorted
    and unreasonable. In contranst, visual inspection confirms that
    spectrograms with the above property yield a reasonable time-frequency
    characterization of the signal under scrutiny.

    References
    ----------
    .. [1] Cohen, L. Time-Frequency distributions - A Review. Proceedings of
       the IEEE, 77 (1989) 941.

    .. [2] Marple, S L. Computing the discrete-time 'analytic' signal via FFT.
       IEEE Trans. Signal Processing, 47 (1999) 2600.

    .. [3] https://en.wikipedia.org/wiki/Short-time_Fourier_transform

    .. [4] Dorrer, C and Kang, I. Simultaneous temporal characterization
       of telecommunication optical pulses and modulators by use of spectrograms
       Opt. Lett., 27 (2002) 1315.

    .. [5] Linden, S and Giessen, H and Kuhl, J. XFROG - A New Method for
       Amplitude and Phase Characterization of Weak Ultrashort Pulses. Phys. Stat.
       Sol. 206 (1998) 119.

    .. [6] Cohen, L and Loughlin, P J. The marginals and time-frequency
       distributions. Proc. SPIE 5205, Advanced Signal Processing Algorithms,
       Architechtures, and Implementations XIII, doi: 10.1117/12.51389.

    """
    # SET REASONABLE BOUNDS FOR WIDTH PARAMETER OF WINDOW FUNCION #######################
    s0Min = 10. * (t[1] - t[0])
    s0Max = 0.25 * (min([t.max(), tLim[1]]) - max([t.min(), tLim[0]]))

    # NORMALIZE INPUT FIELD SO THAT L2-NORM OF AS IS UNITY ##############################
    Et = np.real(Et) / np.sqrt(2. * _L2Norm(t, np.real(Et)))

    if disp:
        sys.stderr.write("# optFrog convergence info:\n")
        sys.stderr.write("# (sigma) (Q(sigma, alpha=%lf)) (IAE1) (IAE2)\n" %
                         (alpha))

    def _objFunc(s0, alpha, t, Et, tLim, wLim):
        res = vanillaFrog(t, Et, hFunc(s0), tLim, wLim)
        QVal = 0.5 * (1. + alpha) * res.IAE1 + 0.5 * (1. - alpha) * res.IAE2
        if disp:
            sys.stderr.write("%lf %lf %lf %lf\n" %
                             (s0, QVal, res.IAE1, res.IAE2))
        return QVal

    optRes = so.minimize_scalar(
        lambda x: _objFunc(x, alpha, t, Et, tLim, wLim),
        bounds=(s0Min, s0Max),
        method='bounded')

    # YIELD SPECTROGRAM WITH OPTIMAL WIDTH PARAMETER ####################################
    return vanillaFrog(t, Et, hFunc(optRes.x), tLim, wLim)
Beispiel #53
0
 def test_minimize_scalar_coerce_args_param(self):
     # Regression test for gh-3503
     optimize.minimize_scalar(self.fun, args=1.5)
Beispiel #54
0
    def test_minimize_scalar(self):
        # combine all tests above for the minimize_scalar wrapper
        x = optimize.minimize_scalar(self.fun).x
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.minimize_scalar(self.fun, method='Brent')
        assert_(x.success)

        x = optimize.minimize_scalar(self.fun, method='Brent',
                                     options=dict(maxiter=3))
        assert_(not x.success)

        x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
                                    args=(1.5, ), method='Brent').x
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.minimize_scalar(self.fun, method='Brent',
                                    args=(1.5,)).x
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
                                    args=(1.5, ), method='Brent').x
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),
                                     args=(1.5, ), method='golden').x
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.minimize_scalar(self.fun, method='golden',
                                     args=(1.5,)).x
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),
                                     args=(1.5, ), method='golden').x
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.minimize_scalar(self.fun, bounds=(0, 1), args=(1.5,),
                                     method='Bounded').x
        assert_allclose(x, 1, atol=1e-4)

        x = optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ),
                                    method='bounded').x
        assert_allclose(x, self.solution, atol=1e-6)

        x = optimize.minimize_scalar(self.fun, bounds=(np.array([1]),
                                                      np.array([5])),
                                    args=(np.array([1.5]), ),
                                    method='bounded').x
        assert_allclose(x, self.solution, atol=1e-6)

        assert_raises(ValueError, optimize.minimize_scalar, self.fun,
                      bounds=(5, 1), method='bounded', args=(1.5, ))

        assert_raises(ValueError, optimize.minimize_scalar, self.fun,
                      bounds=(np.zeros(2), 1), method='bounded', args=(1.5, ))

        x = optimize.minimize_scalar(self.fun, bounds=(1, np.array(5)),
                                     method='bounded').x
        assert_allclose(x, self.solution, atol=1e-6)
Beispiel #55
0
    def fdp(x):
        assert (0 <= x <= 1)

        if x == 0:
            return 1
        elif x == 1:
            return 0

        if alpha == 1:
            # in this case rho is the KL-divergence
            def fun(y):
                assert (0 <= y <= 1 - x)
                if y == 0:
                    if x == 1:
                        return 0
                    else:
                        return np.inf
                elif y == 1:
                    if x == 0:
                        return 0
                    else:
                        return np.inf
                diff1 = (x * (np.log(x) - np.log(1 - y)) + (1 - x) *
                         (np.log(1 - x) - np.log(y)) - rho)
                diff2 = (y * (np.log(y) - np.log(1 - x)) + (1 - y) *
                         (np.log(1 - y) - np.log(x)) - rho)

                return np.maximum(diff1, diff2)
        else:
            # This is the general case for Renyi Divergence with \alpha > 1 or \alpha <1
            # find y such that
            # log(x^alpha (1-y)^{1-alpha} + (1-x)^alpha y^{1-alpha}) =  rho(alpha-1))
            # and log(y^alpha (1-x)^{1-alpha}) + (1-y)^alpha x^{1-alpha} =  rho(alpha-1))
            def fun(y):
                if y == 0:
                    if x == 1:
                        return 0
                    else:
                        return np.inf
                elif y == 1:
                    if x == 0:
                        return 0
                    else:
                        return np.inf

                diff1 = (utils.stable_logsumexp_two(
                    alpha * np.log(x) + (1 - alpha) * np.log(1 - y),
                    alpha * np.log(1 - x) + (1 - alpha) * np.log(y)) - rho *
                         (alpha - 1))
                diff2 = (utils.stable_logsumexp_two(
                    alpha * np.log(y) + (1 - alpha) * np.log(1 - x),
                    alpha * np.log(1 - y) + (1 - alpha) * np.log(x)) - rho *
                         (alpha - 1))
                if alpha > 1:
                    return np.maximum(diff1, diff2)
                else:  # alpha < 1
                    # Notice that the sign of the inequality is toggled
                    return np.minimum(diff1, diff2)

        def normal_equation(y):
            # for finding the root
            return abs(fun(y))

        # there are two roots, we care about the roots smaller than 1-x
        results = minimize_scalar(normal_equation,
                                  bounds=[0, 1 - x],
                                  method='bounded',
                                  options={'xatol': 1e-9 * (1 - x)})
        if results.success:
            return results.x
        else:
            return 0.0
Beispiel #56
0
def main():
    """Run main procedure."""
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument("logfile")
    # TODO(schneiderfelipe): set charge and multiplicity
    parser.add_argument(
        "-a",
        "--acc",
        help="accuracy for SCC calculation, lower is better",
        type=float,
        default=1.0,
    )
    parser.add_argument(
        "--iterations",
        help="number of iterations in SCC",
        type=int,
        default=250,
    )
    parser.add_argument(
        "--gfn", help="specify parametrisation of GFN-xTB", type=int
    )
    parser.add_argument(
        "--etemp", help="electronic temperature", type=float, default=300.0
    )
    parser.add_argument(
        "-s",
        "--solvent",
        help=("solvent (SMD/GBSA implicit solvation models)"),
        default="none",
    )
    parser.add_argument(
        "--do-not-cache-api",
        dest="cache_api",
        help="Do not reuse generate API objects (not recommended)",
        action="store_false",
    )

    parser.add_argument(
        "--pm3", help="use PM3", action="store_true",
    )
    parser.add_argument(
        "--b97-3c", help="use B97-3c", action="store_true",
    )
    parser.add_argument(
        "--minimize", action="store_true",
    )
    parser.add_argument(
        "--transition-state", action="store_true",
    )
    parser.add_argument("--max-omega", type=float, default=1.0)
    parser.add_argument("--tol", type=float, default=1e-3)
    parser.add_argument("--nprocs", type=int, default=4)
    args = parser.parse_args()
    print(args)

    data = ccopen(args.logfile).parse()
    initial_positions = data.atomcoords[-1]
    atoms = Atoms(numbers=data.atomnos, positions=initial_positions)

    if args.gfn:
        method = f"GFN{args.gfn}-xTB"
        solvent = smd2gbsa[args.solvent.lower()]

        calc = XTB(
            method=method,
            accuracy=args.acc,
            electronic_temperature=args.etemp,
            max_iterations=args.iterations,
            solvent=solvent,
            cache_api=args.cache_api,
        )
    else:

        if args.b97_3c:
            method = "B97-3c D3BJ def2-SV(P)"
        elif args.pm3:
            method = "PM3"
        else:

            def allow_keyword(keyword):
                for forbidden in {"freq", "opt", "irc", "print"}:
                    if forbidden in keyword.lower():
                        return False
                return True

            keywords = [
                keyword
                for keyword in data.metadata["keywords"]
                if allow_keyword(keyword)
            ]

            method = " ".join(keywords)

        solvent = args.solvent
        blocks = f"%pal\n nprocs {args.nprocs}\nend\n%scf\n maxiter {args.iterations}\nend"
        if solvent != "none" and not args.pm3:
            blocks += f'\n%cpcm\n smd true\n smdsolvent "{solvent}"\nend'

        if "ORCA_COMMAND" not in os.environ:
            # For parallel runs ORCA has to be called with full pathname
            os.environ["ORCA_COMMAND"] = shutil.which("orca")

        calc = ORCA(
            label="012345_swing", orcasimpleinput=method, orcablocks=blocks
        )

    print(f"*** {method} ***")
    print(f"    : solvent:              {solvent}")

    atoms.set_calculator(calc)
    potential_min = atoms.get_potential_energy()
    print(f"@ potential energy:        {potential_min} eV")

    indices = np.where(data.vibfreqs < 0)[0]
    n_indices = len(indices)
    print(f"@ imaginary frequencies:   {data.vibfreqs[indices]}")
    if not n_indices:
        print("    : nothing to be done, bye")
        return

    ignoring = None
    if args.transition_state:
        ignoring = 0
        print("    : transition state:    ignoring first imaginary frequency")

    omegas = []
    potentials = []

    def f(omega):
        atoms.set_positions(
            initial_positions
            + np.einsum("i,ijk->jk", omega, data.vibdisps[indices])
        )

        potential = 1e3 * (atoms.get_potential_energy() - potential_min)

        omegas.append(omega)
        potentials.append(potential)
        print(f"    : omega:               {omega}")
        print(f"    : potential:           {potential} meV")

        return potential

    if args.minimize:
        guesses = [np.zeros_like(indices, dtype=float)]

    for i in indices:
        if ignoring is not None and i == ignoring:
            continue

        print(f"@ searching in direction   #{i}")

        def g(w):
            z = np.zeros_like(indices, dtype=float)
            z[i] = w
            return f(z)

        if args.minimize:
            res = minimize_scalar(
                g,
                method="bounded",
                bounds=(-args.max_omega, args.max_omega),
                tol=args.tol,
            )
            print(res)

            guess = np.zeros_like(indices, dtype=float)
            guess[i] = res.x
            guesses.append(guess)
        else:
            dx = args.max_omega / 100
            x = [-dx, 0.0, dx]
            y = [g(-dx), 0.0, g(dx)]

            # p[0] * x**2 + p[1] * x + p[2] == k * (x - x0)**2 == k * x**2 - 2 * x0 * k * x + k * x0**2
            p = np.polyfit(x, y, 2)
            print(p)
            print(np.roots(p))

            dp = np.polyder(p)
            print(dp)
            r = np.roots(dp)
            print(r)

            # k = p[0]
            # x0 = np.sqrt(p[2] / k)
            # print(k, x0)
            # print(root(lambda z: [p[0] - z[0], p[1] + 2 * z[0] * z[1], p[2] - z[0] * z[1] ** 2], [k, x0]))

            best_positions = initial_positions + np.einsum(
                "i,ijk->jk", r, data.vibdisps[indices]
            )

    if args.minimize:
        print("@ choosing initial guess for global search")
        if n_indices > 1:
            guesses.append(np.sum(guesses, axis=0))
        x0 = guesses[np.argmin([f(guess) for guess in guesses])]

        print("@ searching in all directions")
        constraints = ()
        if args.transition_state and ignoring is not None:
            constraints = (
                {"type": "eq", "fun": lambda omega: omega[ignoring]},
            )
        res = minimize(
            f,
            x0=x0,
            bounds=n_indices * [(-args.max_omega, args.max_omega)],
            constraints=constraints,
            tol=args.tol,
        )
        print(res)
        best_positions = initial_positions + np.einsum(
            "i,ijk->jk", res.x, data.vibdisps[indices]
        )

        # TODO(schneiderfelipe): correct for when using --transition-state
        omegas = np.array(omegas)
        fig, ax = plt.subplots(n_indices, 1)
        if n_indices == 1:
            ax = [ax]
        xlim = (-args.max_omega - 0.05, args.max_omega + 0.05)
        ylim = (np.min(potentials) - 2.0, 40.0)
        for i in indices:
            if ignoring is not None and i == ignoring:
                continue

            ax[i].plot(omegas[:, i], potentials, "o")
            ax[i].set_title(f"view of normal mode #{i}")
            ax[i].set_ylabel(r"potential energy (meV)")
            ax[i].set_xlabel(rf"$\omega_{i}$")
            ax[i].set_ylim(ylim)
            ax[i].set_xlim(xlim)
        plt.tight_layout()
        plt.show()

    print("@ writing best geometry to swinged.xyz")
    # TODO(schneiderfelipe): print a RMSD between initial and final structures
    atoms.set_positions(best_positions)
    atoms.write("swinged.xyz", format="xyz", plain=True)
Beispiel #57
0
    def log_one_minus_fdp(logx):
        #assert (0 <= x <= 1)
        assert (logx <= 0)

        if logx == 0:  # x==1, f(x) should be 0
            return 0
        elif np.isneginf(logx):  # x = 0,  f(x) should be 1
            return -np.inf

        # Now define the non-linear equation ``fun''
        # such that the u such that fun(u) = 0 gives log(1-f(x))

        if alpha == 1:
            # in this case rho is the KL-divergence
            def fun(u):
                assert (u >= logx)
                # assert (0 <= y <= 1 - x)
                if u == 0:  #y == 0:
                    if logx == 0:  #x == 1:
                        return 0
                    else:
                        return np.inf
                elif np.isneginf(u):  #y == 1:
                    if np.isneginf(logx):  #x == 0:
                        return 0
                    else:
                        return np.inf

                diff1 = diff1_KL(logx, u)
                #diff1 = (x * (np.log(x) - np.log(1 - y))
                #         + (1 - x) * (np.log(1 - x) - np.log(y)) - rho)
                diff2 = diff2_KL(logx, u)

                #diff2 = (y * (np.log(y) - np.log(1 - x))
                #         + (1 - y) * (np.log(1 - y) - np.log(x)) - rho)

                return np.maximum(diff1, diff2)
        else:
            # This is the general case for Renyi Divergence with \alpha > 1 or \alpha <1
            # find y such that
            # log(x^alpha (1-y)^{1-alpha} + (1-x)^alpha y^{1-alpha}) =  rho(alpha-1))
            # and log(y^alpha (1-x)^{1-alpha}) + (1-y)^alpha x^{1-alpha} =  rho(alpha-1))
            def fun(u):
                assert (u >= logx)
                if u == 0:  #y == 0:
                    if logx == 0:  #x == 1:
                        return 0
                    else:
                        return np.inf
                elif np.isneginf(u):  #y == 1:
                    if np.isneginf(logx):  #x == 0:
                        return 0
                    else:
                        return np.inf

                # diff1 = (utils.stable_logsumexp_two(alpha * np.log(x) + (1 - alpha) * np.log(1 - y),
                #                                     alpha * np.log(1 - x) + (1 - alpha) * np.log(y))
                #          - rho * (alpha - 1))
                diff1 = diff1_general(logx, u)
                # diff2 = (utils.stable_logsumexp_two(alpha * np.log(y) + (1 - alpha) * np.log(1 - x),
                #                                     alpha * np.log(1 - y) + (1 - alpha) * np.log(x))
                #          - rho * (alpha - 1))
                diff2 = diff2_general(logx, u)
                if alpha > 1:
                    return np.maximum(diff1, diff2)
                else:  # alpha < 1
                    # Notice that the sign of the inequality is toggled
                    return np.minimum(diff1, diff2)

        def normal_equation(u):
            # for finding the root
            return abs(fun(u))

        # there are two roots, we care about the roots smaller than 1-x
        results = minimize_scalar(normal_equation,
                                  bounds=[logx, 0],
                                  method='bounded',
                                  options={'xatol': 1e-8})
        if results.success:
            return results.x
        else:
            return 0.0
Beispiel #58
0
    def valueNpolicy(self, p):
        """ Given prices, transfers, benefits and tax rates over one's life-cycle, 
        value and decision functions are calculated ***BACKWARD*** """
        [r, w, b, tr, tw, tb, Tr, qh, qr] = p
        T, aa, hh, aN, hN = self.T, self.aa, self.hh, self.aN, self.hN
        psi, tcost, beta, gamma, sp = self.psi, self.tcost, self.beta, self.gamma, self.sp
        # y = -1 : the oldest generation
        for h in range(self.hN):
            for i in range(self.aN):
                cash = self.budget(-1, h, 0, aa[i], 0, p)
                self.co[-1, h,
                        i] = (cash + qr[-1] * (hh[h] + gamma)) / (1 + psi)
                self.ro[-1, h, i] = (cash * psi - qr[-1] *
                                     (hh[h] + gamma)) / ((1 + psi) * qr[-1])
                self.v[-1, h, i] = self.util(self.co[-1, h, i],
                                             self.ro[-1, h, i] + hh[h])
            self.vtilde[-1][h] = interp1d(aa, self.v[-1, h], kind='cubic')
        # y = -2, -3,..., -75
        for y in range(-2, -(T + 1), -1):
            income = Tr[y] + b[y] if y >= -self.R else Tr[y] + (
                1 - tw[y] - tb[y]) * w[y] * self.ef[y]
            mdti = (aa + income * self.dti >= 0).argmax()  # DTI constraint
            # print 'dit',income*0.3,mdti,aa[mdti]
            for h0 in range(self.hN):
                at = array([[0 for i in range(aN)] for h in range(hN)],
                           dtype=float)
                ct = array([[0 for i in range(aN)] for h in range(hN)],
                           dtype=float)
                rt = array([[0 for i in range(aN)] for h in range(hN)],
                           dtype=float)
                vt = array([[float('-inf') for i in range(aN)]
                            for h in range(hN)],
                           dtype=float)
                casht = array([[0 for i in range(aN)] for h in range(hN)],
                              dtype=float)
                # print self.apath
                for h1 in range(self.hN):
                    mltv = (aa + self.ltv * qh[y] * hh[h1] >=
                            0).argmax()  # LTV constraint
                    # print (aa + self.ltv*qh[y]*hh[h1] > 0)
                    # print m0
                    m0 = max(0, mltv, mdti)
                    # m0 = 0
                    for i in range(self.aN):  # l = 0, 1, ..., 50
                        # Find a bracket within which optimal a' lies
                        m = max(0, m0)  # Rch91v.g uses m = max(0, m0-1)
                        # print m
                        m0, a0, b0, c0 = self.GetBracket(y, h0, h1, i, m, p)
                        if a0 == b0:
                            at[h1, i] = aa[m0]
                        elif b0 == c0:
                            at[h1, i] = self.aH
                        else:
                            # print a0, b0, c0
                            def objfn(
                                a1
                            ):  # Define objective function for optimal a'
                                return -self.findv(y, h0, h1, aa[i], a1, p)

                            result = minimize_scalar(objfn,
                                                     bracket=(a0, b0, c0),
                                                     method='Golden')
                            at[h1, i] = result.x
                        # print y, h0, h1, i, m0, a0, b0, c0, 'a1=',at[h1,i]
                        # Compute consumption, rent and house
                        casht[h1, i] = self.budget(y, h0, h1, aa[i], at[h1, i],
                                                   p)
                        ct[h1, i] = (casht[h1, i] + qr[y] *
                                     (hh[h0] + gamma)) / (1 + psi)
                        rt[h1, i] = (casht[h1, i] * psi - qr[y] *
                                     (hh[h0] + gamma)) / ((1 + psi) * qr[y])
                        vt[h1, i] = self.util(
                            ct[h1, i], rt[h1, i] + hh[h0]
                        ) + beta * sp[y] * self.vtilde[y + 1][h1](at[h1, i])
                for i in range(self.aN):
                    h1 = vt[:, i].argmax()
                    self.ho[y, h0, i] = h1
                    self.v[y, h0, i] = vt[h1, i]
                    self.co[y, h0, i] = ct[h1, i]
                    self.ro[y, h0, i] = rt[h1, i]
                    self.ao[y, h0, i] = at[h1, i]
                    cash = casht[h1, i]
                ai = [
                    self.aN / 4, self.aN * 2 / 4, self.aN * 3 / 4, self.aN - 1
                ]
                if y % 15 == 0 and h0 == 0:
                    print '------------'
                    print 'y=', y, 'inc=%2.2f' % (income), 'h0=', h0
                    for i in ai:
                        print 'ltv=%2.2f'%(hh[self.ho[y,h0,i]]*qh[y]*self.ltv),'dti=%2.2f'%(income*self.dti),\
                                'a0=%2.2f'%(aa[i]),'h1=%2.0f'%(self.ho[y,h0,i]),\
                                'a1=%2.2f'%(self.ao[y,h0,i]),\
                                'c0=%2.2f'%(self.co[y,h0,i]),'r0=%2.2f'%(self.ro[y,h0,i])
                self.vtilde[y][h0] = interp1d(aa, self.v[y, h0], kind='cubic')
Beispiel #59
0
def conj_vect(J, eps, u_0):
    """
	Метод сопряженных векторов для минимизации функции многих переменных

	u_k+1 = u_k - alpha_k*p_k
		,где: 
	u_0 - начальное приближение
	p_0 = grad(J)(u_0)
	p_k = grad(J)(u_k) - beta_k*u_(k-1) 
	
	alpha_k >= 0 ; 
	alpha_k = argmin(f_k(alpha) | alpha > 0), 
		where f_k(alpha) = J(u_k - alpha*p_k)
		
	beta_k = - |grad(J)(u_k)|**2/
				|grad(J)(u_k-1)|**2
				
	"""

    #grad = nd.Gradient(J)
    u_old = np.array(u_0)
    p = Gradient(J, u_old)
    f = lambda alph: J(u_old - alph * p)
    print(f(1))
    print(f(0.5))
    #alpha = BrentComb(0, 2, f, 0.0001)[0]
    alpha = minimize_scalar(f, bounds=(0, 1), method='bounded').x
    beta = 0.
    if (notations == True):
        print("Шаг 1")
        print("  Alpha | Beta  - ", alpha, " | ", beta)
        print(" Значение аргумента - ", u_old)
        print(" Значение направления - ", p)
    k = 1
    while (np.linalg.norm(Gradient(J, u_old)) >= eps):
        if (notations == True):
            print("Шаг ", k)
        u_new = u_old - alpha * p

        if (k % dim == 0):
            beta = 0.
        else:
            beta = -np.linalg.norm(Gradient(J, u_new))**2 / (np.linalg.norm(
                Gradient(J, u_old))**2)
        p = Gradient(J, u_new) - beta * u_old
        f = lambda alph: J(u_old - alph * p)
        print(f(1))
        #alpha = BrentComb(0, 1, f, 0.0001)[0]
        alpha = minimize_scalar(f, bounds=(0, 1), method='bounded').x

        u_old = u_new
        if (notations == True):
            print("  Alpha | Beta  - ", alpha, " | ", beta)
            print(" Значение аргумента - ", u_new)
            print(" Значение направления - ", p)
            print("Минимум функции: ", J(u_old))
        k += 1
    print("Результат получен через ", k, "шагов")
    print("Значение аргумента:", np.array(u_old))
    print("Минимум функции: ", J(u_old))
    return (u_new)

def com(a, b):
    return np.dot(a, b) - np.dot(b, a)


Hz = 1 / 2 * com(Hp, Hm)

e, u = np.linalg.eigh(H)
t = np.arange(0, 20, 0.01)
f = np.zeros(np.size(t))
z = zm_state(2, 1, pxp, 1)
psi_energy = np.dot(np.conj(np.transpose(u)), z.prod_basis())

res = minimize_scalar(lambda t: fidelity_eval(psi_energy, e, t),
                      method="golden",
                      bracket=(2.5, 3.5))
f0 = -fidelity_eval(psi_energy, e, res.x)
print("F0 REVIVAL")
print(res.x, fidelity_eval(psi_energy, e, res.x))

for n in range(0, np.size(t, axis=0)):
    f[n] = -fidelity_eval(psi_energy, e, t[n])
plt.plot(t, f)
plt.xlabel(r"$t$")
plt.ylabel(r"$\vert \langle \psi(0) \vert \psi(t) \rangle \vert^2$")
plt.title(r"East Model $PX, Z_2$, 1st Order Lie algebra pertubations" + "\n" +
          r"$\hat{V} = P X Z$, $\lambda$=" + str("%.4f" % coef) + ",N=" +
          str(pxp.N))
plt.show()