def optimize_via_equation_system(traj_d, start, goal, norm):
    '''
    this function solves the equation system implied by the Lagrangian Optimization in "Movement Primitives via Optimization" (Dragan
    2015) in equation 3 and 4 in order to adapt a demonstrated trajectory to two new endpoints (start and goal). The adaptation
    assumes as a norm a finite difference matrix of a spring damper system (new positions are calculated based on
    accelerations).
    :param traj: (T, n)
    :param start: (n,)
    :param goal: (n,)
    :param norm: (T,T) --> assert positive definite
    :return: adapted trajectory (T,n)
    '''
    assert math.is_pos_def(norm), "norm must be positive definite"
    traj_len = traj_d.shape[0]

    start_goal_vec = np.zeros(traj_len)
    start_goal_vec[0] = start
    start_goal_vec[-1] = goal

    b = norm.dot(traj_d - start_goal_vec)

    mask1 = np.zeros(traj_len)
    mask1[0], mask1[-1] = 1, 1

    mask2 = np.ones(traj_len)
    mask2[0], mask2[-1] = 0, 0

    fun = lambda x: norm.dot(np.multiply(mask2, x)) - np.multiply(mask1, x) - b

    traj = broyden2(fun, traj_d)
    traj[0] = start
    traj[-1] = goal
    return traj
def schatters_MLLH(data):
    """
    Deze functie berekend de schatters voor de maximum likelihood methode. k moet numeriek berekend worden.
    Dit wordt gedaan in de functie schatter_k_MLLH.
    Er werd gekozen voor scipy.optimize.broyden2 na het testen van verschillende methodes en het vergelijken van de
    performantie. broyden2 kwam er uit als de snelste voor deze vergelijking.

    :param data: de dataset waaruit k en theta wordt berekend
    :return: De schatters k en theta
    """
    y = data
    gem_y = np.average(y)

    def schatter_k_MLLH(k):
        """
        :param k: de schatter k
        :return: de vergelijking die numeriek moet opgelost worden
        """
        gem_lny = np.average(np.log(y))
        f = gem_lny - np.log(gem_y) + np.log(k) - digamma(k)
        return f

    k = float(optimize.broyden2(schatter_k_MLLH, np.array([3.0])))
    theta = gem_y / k
    return k, theta
    def iterate(self, kspace=None, tol=10**-6, maxiter=200):
        '''
        Iterate the SCMF to get converged order parameters.

        Parameters
        ----------
        kspace : BaseSpace, optional
            The Brillouin zone of the system.
        tol : float, optional
            The tolerance of the order parameter.
        maxiter : int, optional
            The maximum times of the iteration.
        '''
        def gx(values):
            for op, value in zip(self.ops.values(), values):
                op.value = value
            self.opsupdate(kspace)
            return array([self.ops[key].value
                          for key in self.ops.keys()]) - values

        with self.timers.get('Iteration'):
            x0 = array([self.ops[key].value for key in self.ops.keys()])
            ops = broyden2(gx,
                           x0,
                           verbose=True,
                           reduction_method='svd',
                           maxiter=maxiter,
                           x_tol=tol)
        self.log << 'Order parameters:\n%s\n' % Sheet.fromordereddict(
            OrderedDict([(name, op)
                         for name, op in zip(self.ops.keys(), ops)]))
        self.log << 'Iterate: time consumed %ss.\n\n' % self.timers.time(
            'Iteration')
Beispiel #4
0
 def boundary(x):
     # Boundary values obtained by nonlinear solver
     x = np.array(x) / N - 0.5
     def fun(v):
         return [v[0] + v[0] * v[1] ** 2 - v[0] ** 3 / 3 - x[0],
                 -v[1] - v[1] * v[0] ** 2 + v[1] ** 3 / 3 - x[1]]
     v = broyden2(fun, [0, 0])
     return v[0] ** 2 - v[1] ** 2
def method_of_moments_estimator(sequence=None, attributes=None, cache=None):
    """

    Simple Method-of-Moments Estimator to estimate D (Haas et al, 1995)
    can be optimised (training rate, stopping value)

    d = d_moments(l -  e^(-n))/d_moments)

    solve for d_moments in d = d_moments(l -  e^(-n))/d_moments)

    :param sequence: sample sequence of integers
    :type sequence: array of ints
    :param attributes: dictionary with keys as the unique elements and values as
                        counts of those elements
    :type attributes: dictionary where keys can be any type, values must be integers
    :param cache: argument used by median methods to avoid recomputation of variables
    :type cache: dictionary with 4 elements
                 {"n":no_elements,"d":no_unique_elements,"attr": attribute_counts,
                 "freq":frequency_dictionary}
    :return: estimated distinct count
    :rtype: float

    """
    if sequence is None and attributes is None and cache is None:
        raise Exception(
            "Must provide a sequence, or a dictionary of attribute counts ")

    if cache is not None:
        n, d, frequency_dictionary = cache["n"], cache["d"], cache["freq"]
    elif sequence is not None:
        n, d, _, frequency_dictionary = precompute_from_seq(sequence)
    else:
        n, d, _, frequency_dictionary = precompute_from_attr(attributes)

    if n == d:
        return _compute_birthday_problem_probability(d)

    def diff_eqn(D):
        return D * (1 - np.exp((-n / D))) - d

    warnings.filterwarnings('ignore')

    try:
        d_moments_1 = float(broyden1(diff_eqn, d))
    except Exception as e:
        print(e)
        d_moments_1 = 1000000000000000000000000

    try:
        d_moments_2 = float(broyden2(diff_eqn, d))
    except:
        d_moments_2 = 1000000000000000000000000
    warnings.resetwarnings()
    result = min(d_moments_1, d_moments_2)
    if result == 1000000000000000000000000:
        return d
    else:
        return result
 def _get_damage(self):
     if self.w == 0.:
         damage = np.zeros_like(self.sorted_depsf)
     else:
         ff = t.clock()
         try:
             damage = broyden2(self.damage_residuum, 0.2 * np.ones_like(self.sorted_depsf), maxiter=20)
         except:
             print 'broyden2 does not converge fast enough: switched to fsolve for this step'
             damage = fsolve(self.damage_residuum, 0.2 * np.ones_like(self.sorted_depsf))
         print 'damage =', np.sum(damage) / len(damage), 'iteration time =', t.clock() - ff, 'sec' 
     return damage 
Beispiel #7
0
    def compute_ss(self, guess=None, method='fsolve', options={}):
        '''Attempts to solve for the steady state of the model.

        Args:
            guess:      (Pandas Series, Numpy array, or list) An initial guess for the 
                            steady state solution. The result is highly sensisitve to the intial 
                            guess chosen, so be careful. If the guess is a Numpy ndarray or a list
                            then the elements must be ordered to conform with self.names['variables'].
            method:     (str) The function from the Scipy library to use. Your choices are:
                        a. root
                        b. fsolve (default)
                        c. broyden1
                        d. broyden2
            options:    (dict) A dictionary of optional arguments to pass to the numerical solver.
                            Check out the Scipy documentation to see the options available for each routine:
                                http://docs.scipy.org/doc/scipy/reference/optimize.html

        Returns:
            None

        Attributes:
            ss: (Pandas Series) Steady state values of endogenous variables

            '''

        if guess is None:
            guess = np.ones(self.n_vars)
        else:
            if isinstance(guess, pd.Series):
                guess = guess[self.names['variables']]

        # Create function for nonlinear solver
        def ss_fun(variables):

            variables = pd.Series(variables, index=self.names['variables'])

            return self.equilibrium_fun(variables, variables, self.parameters)

        if method == 'fsolve':
            steady_state = fsolve(ss_fun, guess, **options)

        elif method == 'root':
            steady_state = root(ss_fun, guess, **options)['x']

        elif method == 'broyden1':
            steady_state = broyden1(ss_fun, guess, **options)

        elif method == 'broyden2':
            steady_state = broyden2(ss_fun, guess, **options)

        # Add ss attribute
        self.ss = pd.Series(steady_state, index=self.names['variables'])
Beispiel #8
0
 def _get_damage(self):
     #ff = time.clock()
     if self.w == 0.:
         damage = np.zeros_like(self.sorted_depsf)
     else:
         #ff = t.clock()
         try:
             damage = broyden2(self.damage_residuum, 0.2 * np.ones_like(self.sorted_depsf), maxiter=20)
         except:
             #print 'broyden2 does not converge fast enough: switched to fsolve for this step'
             damage = fsolve(self.damage_residuum, 0.2 * np.ones_like(self.sorted_depsf))
         #print 'damage =', np.sum(damage) / len(damage), 'iteration time =', time.clock() - ff, 'sec'
     return damage
def P(phi, phib, df):
	""" Numerically solve for partition coefficient as a
	    function of \phi_s """
	if f(0,phi,phib,df)*f(1,phi,phib,df) < 0:
		#print 'bisect'
		#return opt.brentq(f, 0, 1, args=(phi,phib,df)) # Brent's method
		#return opt.brenth(f, 0, 1, args=(phi,phib,df)) # Brent's method
		#return opt.bisect(f, 0, 1, args=(phi,phib,df)) # Bisection method
		#x,r = opt.bisect(f, 0, 1, args=(phi,phib,df), full_output=True) # Bisection method
		return opt.broyden2(f, 0, 1.0, args=(phi,phib,df)) # Bisection method
		#print r.iterations
		#return x
	else:
		#print 'newton'
		return opt.newton(f, 1.0, args=(phi,phib,df)) # Newton-Raphson
Beispiel #10
0
def VCACPFF(engine,app):
    '''
    This method calculates the chemical potential or filling factor.
    '''
    engine.rundependences(app.name)
    engine.cache.pop('ptmesh',None)
    kmesh,nk=app.BZ.mesh('k'),app.BZ.rank('k')
    fx=lambda omega,mu: (np.trace(engine.mgfmesh(omega=mu+1j*omega,kmesh=kmesh),axis1=1,axis2=2)-engine.nclopt/(1j*omega-app.p)).sum().real
    if app.task=='CP':
        gx=lambda mu: quad(fx,0,np.float(np.inf),args=mu)[0]/nk/engine.nclopt/np.pi-app.cf
        mu=broyden2(gx,app.options.pop('x0',0.0),**app.options)
        engine.log<<'mu(error): %s(%s)\n'%(mu,gx(mu))
        if app.returndata: return mu
    else:
        rquad=quad(fx,0,np.float(np.inf),args=app.cf,full_output=2)
        filling=rquad[0]/nk/engine.nclopt/np.pi
        engine.log<<'Filling factor(mu=%s,err=%.2e,neval=%s): %s\n'%(HP.decimaltostr(app.cf),rquad[1],rquad[2]['neval'],filling)
        if app.returndata: return filling
Beispiel #11
0
def VCACPFF(engine,app):
    '''
    This method calculates the chemical potential or filling factor.
    '''
    engine.rundependences(app.name)
    engine.cache.pop('pt_kmesh',None)
    kmesh,nk=app.BZ.mesh('k'),app.BZ.rank('k')
    fx=lambda omega,mu: (np.trace(engine.mgf_kmesh(omega=mu+1j*omega,kmesh=kmesh),axis1=1,axis2=2)-engine.nclopt/(1j*omega-app.p)).sum().real
    if app.task=='CP':
        gx=lambda mu: quad(fx,0,np.float(np.inf),args=mu)[0]/nk/engine.nclopt/np.pi-app.cf
        mu=broyden2(gx,app.options.pop('x0',0.0),**app.options)
        engine.log<<'mu(error): %s(%s)\n'%(mu,gx(mu))
        if app.returndata: return mu
    else:
        rquad=quad(fx,0,np.float(np.inf),args=app.cf,full_output=2)
        filling=rquad[0]/nk/engine.nclopt/np.pi
        engine.log<<'Filling factor(mu=%s,err=%.2e,neval=%s): %s\n'%(HP.decimaltostr(app.cf),rquad[1],rquad[2]['neval'],filling)
        if app.returndata: return filling
    def __solver__(self, p):
        
        p.xk = p.x0.copy()
        p.fk = asfarray(max(abs(p.f(p.x0)))).flatten()
        
        p.iterfcn()
        if p.istop:
            p.xf, p.ff = p.xk, p.fk
            return 
        
        try: xf = broyden2(p.f, p.x0, iter = p.maxIter)
        except: 
            p.istop = -1000
            return

        p.xk = p.xf = asfarray(xf)
        p.fk = p.ff = asfarray(max(abs(p.f(xf)))).flatten()
        p.istop = 1000
        p.iterfcn()
 def _get_damage( self ):
     ff = time.clock()
     if self.w == 0.:
         damage = np.zeros_like( self.sorted_depsf )
     else:
         ff = t.clock()
         try:
             damage = broyden2( self.damage_residuum, 0.2 * np.ones_like( self.sorted_depsf ) * ( self.sorted_lf < 0 ), iter = 20 )  # maxiter = 20 , )
         except:
             print 'broyden2 does not converge fast enough: switched to fsolve for this step'
             damage = fsolve( self.damage_residuum, 0.2 * np.ones_like( self.sorted_depsf ) * ( self.sorted_lf < 0 ) )
             
         cont_fibers = self.sorted_lf < 0 
         dam_cont = np.sum( damage[cont_fibers] )
         num_cont = np.sum( cont_fibers )
         if num_cont > 0:
             print 'damage of continiuos fibers =', dam_cont / num_cont ,
         print'iteration time =', time.clock() - ff, 'sec'
     return damage
Beispiel #14
0
    def __solver__(self, p):

        p.xk = p.x0.copy()
        p.fk = asfarray(max(abs(p.f(p.x0)))).flatten()

        p.iterfcn()
        if p.istop:
            p.xf, p.ff = p.xk, p.fk
            return

        try:
            xf = broyden2(p.f, p.x0, iter=p.maxIter)
        except:
            p.istop = -1000
            return

        p.xk = p.xf = asfarray(xf)
        p.fk = p.ff = asfarray(max(abs(p.f(xf)))).flatten()
        p.istop = 1000
        p.iterfcn()
Beispiel #15
0
    def iterate(self,kspace=None,tol=10**-6,maxiter=200):
        '''
        Iterate the SCMF to get converged order parameters.

        Parameters
        ----------
        kspace : BaseSpace, optional
            The Brillouin zone of the system.
        tol : float, optional
            The tolerance of the order parameter.
        maxiter : int, optional
            The maximum times of the iteration.
        '''
        def gx(values):
            for op,value in zip(self.ops.values(),values):
                op.value=value
            self.opsupdate(kspace)
            return array([self.ops[key].value for key in self.ops.keys()])-values
        with self.timers.get('Iteration'):
            x0=array([self.ops[key].value for key in self.ops.keys()])
            ops=broyden2(gx,x0,verbose=True,reduction_method='svd',maxiter=maxiter,x_tol=tol)
        self.log<<'Order parameters:\n%s\n'%Sheet.fromordereddict(OrderedDict([(name,op) for name,op in zip(self.ops.keys(),ops)]))
        self.log<<'Iterate: time consumed %ss.\n\n'%self.timers.time('Iteration')
def aplicarMetodosNL(f, x, y):
    try:
        print("Resultado con la funcion fsolve = {}".format(
            optimize.fsolve(f, [x, y], xtol=10**(-6))))
    except Exception as e:
        print("El metodo fsolve no converge")
    try:
        print("Resultado mediante el metodo de Newton-Krylov = {}".format(
            optimize.newton_krylov(f, [x, y], x_tol=10**(-6))))
    except Exception as e:
        print("El metodo de Newton-Krylov no converge")

    try:
        print("Resultado mediante el metodo de Broyden 1 = {}".format(
            optimize.broyden1(f, [x, y], x_tol=2**(-16))))
    except Exception as e:
        print("El metodo de Broyden 1 no converge")

    try:
        print("Resultado mediante el metodo de Broyden 2 = {}".format(
            optimize.broyden2(f, [x, y], x_tol=2**(-50))))
    except Exception as e:
        print("El metodo de Broyden 2 no converge")
def test2():
    compound1 = compoundfromformula("C18H30O2", 0.9291, name="linseedoil")
    compound2 = compoundfromformula("Pb3C2O8H2", 6.8, name="hydrocerussite")
    m = mixture([compound1, compound2], [0.5, 0.5], fraction.volume)
    mu = m.mass_att_coeff(35.0)

    # 60% transmission
    T = 0.6

    # flat sample
    thickness = -np.log(T) / (mu * m.density)  # cm

    # capillary
    # R = 30e-4 # cm
    # packing = optimize.broyden2(capillary_refine2(mu,m.density,T,R),0.7)
    packing = 1.0
    R = optimize.broyden2(capillary_refine(mu, m.density, T, packing),
                          [80e-4])[0]  # cm

    # 1-3 ug
    mass = 1 * 1e-6  # g
    volume = mass / m.density  # cm^3

    print "Mixture:"
    print "density = {} g/cm^3".format(m.density)
    print "mass.att. = {} cm^2/g".format(mu)

    print "\nCapillary:"
    print "R = {} um".format(R * 1e4)
    print "packing = {} %".format(packing * 100)
    print "h = {} mm (@ total mass = {} ug)".format(
        volume / (np.pi * R * R) * 10, mass * 1e6)

    print "\nFlat sample:"
    print "thickness = {} um".format(thickness * 1e4)
    print "footprint = {} mm^2 (@ total mass = {} ug)".format(
        volume / thickness * 1e4, mass * 1e6)
Beispiel #18
0
    ax.plot(sol[0], sol[1], 'r*', markersize=15)

    ax.plot(x_guess[0], x_guess[1], 'ko')
    ax.annotate("",
                xy=(sol[0], sol[1]),
                xytext=(x_guess[0], x_guess[1]),
                arrowprops=dict(arrowstyle="->", linewidth=2.5))

ax.legend(loc=0)
ax.set_xlabel(r'$x$', fontsize=18)
fig.tight_layout()
fig.savefig('ch5-nonlinear-system.pdf')

# In[79]:

optimize.broyden2(f, x_guesses[1])

# In[80]:


def f(x):
    return [x[1] - x[0]**3 - 2 * x[0]**2 + 1, x[1] + x[0]**2 - 1]


x = np.linspace(-3, 2, 5000)
y1 = x**3 + 2 * x**2 - 1
y2 = -x**2 + 1

fig, ax = plt.subplots(figsize=(8, 4))

ax.plot(x, y1, 'k', lw=1.5, label=r'$y = x^3 + 2x^2 - 1$')
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 29 15:55:58 2021

@author: shrohanmohapatra
"""

from scipy.optimize import broyden2


def paretoZeroFinder(x):
    return 5**(1 + 1 / x) - x / (x - 1)


print(broyden2(paretoZeroFinder, 0.54, 2500))
print(paretoZeroFinder(1.04477526717764))
def sichel_estimator(sequence=None, attributes=None, cache=None):
    """

    Implementation of Sichel’s Parametric Estimator (Sichel 1986a, 1986b and 1992)
    which uses a zero-truncated generalized inverse Gaussian-Poisson to estimate D

    implementation uses broyden 2 to solve and search linear space for good solution

    :param sequence: sample sequence of integers
    :type sequence: array of ints
    :param attributes: dictionary with keys as the unique elements and values as
                        counts of those elements
    :type attributes: dictionary where keys can be any type, values must be integers
    :param cache: argument used by median methods to avoid recomputation of variables
    :type cache: dictionary with 4 elements
                 {"n":no_elements,"d":no_unique_elements,"attr": attribute_counts,
                 "freq":frequency_dictionary}
    :return: estimated distinct count
    :rtype: float

    """
    if not (sequence is not None or attributes or cache):
        raise Exception(
            "Must provide a sequence, or a dictionary of attribute counts ")

    if cache is not None:
        n, d, frequency_dictionary = cache["n"], cache["d"], cache["freq"]
    elif sequence is not None:
        n, d, _, frequency_dictionary = precompute_from_seq(sequence)
    else:
        n, d, _, frequency_dictionary = precompute_from_attr(attributes)

    if n == d:
        return _compute_birthday_problem_probability(d)

    f1 = frequency_dictionary[1]
    a = ((2 * n) / d) - np.log(n / f1)  # does not depend on g
    b = ((2 * f1) / d) + np.log(n / f1)  # does not depend on g

    def diff_eqn(g):
        result = (1 + g) * np.log(g) - a * g + b
        return result

    d_sichel_set = set()
    warnings.filterwarnings('ignore')

    for value in np.linspace((f1 / n) + 0.00001, 0.999999, 20):
        try:
            g = broyden2(diff_eqn, value)
            if 1 > g > (f1 / n) and ((n * g) / f1) > 0:
                b_hat = g * np.log((n * g) / f1) / (1 - g)
                c_hat = (1 - g**2) / (n * (g**2))
                d_sichel = 2 / (b_hat * c_hat)
                d_sichel_set.add(d_sichel)
        except Exception as e:
            continue
    warnings.resetwarnings()

    if not d_sichel_set:
        return d
    else:
        return min(d_sichel_set)
def method_of_moments_v2_estimator(sequence=None,
                                   attributes=None,
                                   pop_estimator=lambda x: x * 1000000,
                                   n_pop=None,
                                   cache=None):
    """

    Method-of-Moments Estimator with equal frequency assumption while still sampling
     from a finite relation (Haas et al, 1995)

    :param sequence: sample sequence of integers
    :type sequence: array of ints
    :param attributes: dictionary with keys as the unique elements and values as
                        counts of those elements
    :type attributes: dictionary where keys can be any type, values must be integers
    :param cache: argument used by median methods to avoid recomputation of variables
    :type cache: dictionary with 4 elements
                 {"n":no_elements,"d":no_unique_elements,"attr": attribute_counts,
                 "freq":frequency_dictionary}
    :param pop_estimator: function to estimate population size if possible
    :type pop_estimator: function that takes in the length of sequence (int) and outputs
                         the estimated population size (int)
    :param n_pop: estimate of population size if available, will be used over pop_estimator function
    :type n_pop: int
    :return: estimated distinct count
    :rtype: float
    """
    if sequence is None and attributes is None and cache is None:
        raise Exception(
            "Must provide a sequence, or a dictionary of attribute counts ")

    if cache is not None:
        n, d, attribute_counts = cache["n"], cache["d"], cache["attr"]
    elif sequence is not None:
        n, d, attribute_counts, _ = precompute_from_seq(sequence)
    else:
        n, d, attribute_counts, _ = precompute_from_attr(attributes)

    if n == d:
        return _compute_birthday_problem_probability(d)

    if not n_pop:
        n_pop = pop_estimator(n)

    # need to implement gamma memoized function and h_x again here to enable use of global variables in broydens

    memo_dict = {}

    def memoized_gamma(x):
        """

        :param x:value to evaluate gamma function at
        :type x: float
        :param memo_dict: memoized dictionary for precomputed gamma values
        :type memo_dict: dict
        :return: value of gamma function evaluated at x, and memoized results
        :rtype: int, dict
        """
        x = int(x)
        if x in memo_dict:
            return memo_dict[x]
        else:
            result = math.lgamma(x)
            memo_dict[x] = result
            return result

    def h_x(x, n, n_pop):
        """

        :param x: h function evaluated at point x
        :type x: int
        :param n: length of sequence seen
        :type n: int
        :param n_pop: estimate of total number of tuples in Relation
        :type n_pop: int
        :return: value of h function evaluated at x
        :rtype: float
        """

        gamma_num_1 = memoized_gamma(n_pop - x + 1)
        gamma_num_2 = memoized_gamma(n_pop - n + 1)
        gamma_denom_1 = memoized_gamma(n_pop - x - n + 1)
        gamma_denom_2 = memoized_gamma(n_pop + 1)

        result = np.exp(gamma_num_1 + gamma_num_2 - gamma_denom_1 -
                        gamma_denom_2)
        return result

    def diff_eqn(D):
        return D * (1 - h_x((n_pop / D), n, n_pop)) - d

    warnings.filterwarnings('ignore')

    try:
        d_moments_1 = float(broyden1(diff_eqn, d))
    except Exception as e:
        d_moments_1 = 1000000000000000000000000

    try:
        d_moments_2 = float(broyden2(diff_eqn, d))
    except:
        d_moments_2 = 1000000000000000000000000

    warnings.resetwarnings()

    result = min(d_moments_1, d_moments_2)
    if result == 1000000000000000000000000:
        return d
    else:
        return result
Beispiel #22
0
def loop_fit(indirizzo, lim_current_inf, sup_current):
    data= genfromtxt(indirizzo)
    ydata = data[:,1]
    xdata = data[:,0]
    sigmay = 0.05
    sigmax = 0.0003
    p0lin = [2]
    p0exp = [2, 0.005, 20]
    #g_0, pcov_lin = optimize.curve_fit(linfit, xdata[:argmax(ydata>=lim_current_inf)], ydata[:argmax(ydata>=lim_current_inf)], p0lin, sigmay)
    g_0, pcov_lin = optimize.curve_fit(linexp, xdata[:argmax(ydata>=lim_current_inf)], ydata[:argmax(ydata>=lim_current_inf)], p0exp, sigmay)
    g_0 = g_0[0]
    pcov_lin = pcov_lin[0][0]
    g_var = sqrt(pcov_lin)
    #print('g0: ', g_0, '+-', g_var[0][0])
    p0_loop = [20, 0.005, 2]
    val_g = False
    val_B = False
    val_is = False
    val_Rs = False
    j = 0
    while val_g==False or val_B==False or val_is==False or val_Rs==False:
        #print(val_g==False & val_B==False & val_is==False & val_Rs==False)
    #step 2:
        jun_cur = ydata - g_0*xdata
    #step 3:
        p_loop, pcov_loop = optimize.curve_fit(tension_junc, abs(jun_cur[argmax(jun_cur>=sup_current):]), xdata[argmax(jun_cur>=sup_current):], p0_loop, sigmax)
        #p_loop, pcov_loop = optimize.curve_fit(tension_junc, abs(jun_cur), xdata, p0_loop, sigmax)
        loop_var = sqrt(pcov_loop.diagonal())
    #step 4:
        zero_func = lambda x: tension_junc_abs(x, p_loop[1], p_loop[0], p_loop[2])
        #jun_cur_1 = optimize.fsolve(fitsolve, jun_cur, (p_loop[1], p_loop[0], p_loop[2], xdata) )
        jun_cur_1 = optimize.broyden1(zero_func, jun_cur)
        jun_cur_10 = optimize.broyden2(zero_func, jun_cur[100], x_rtol=1)
        print(len(jun_cur_1), len(xdata))
    #step 5:
        tot_cur = g_0*xdata + jun_cur_1
        print(len(tot_cur[:argmax(tot_cur>=1000)]))
        print(jun_cur_10)
    #step 6:
        delta_g, delta_g_pcov = optimize.curve_fit(linfit, xdata[:argmax(tot_cur>=lim_current_inf)], ydata[:argmax(tot_cur>=lim_current_inf)]-tot_cur[:argmax(tot_cur>=lim_current_inf)], g_0, sigmay)
        g_1 = g_0 + delta_g
    #check della convergenza
        if abs(g_1 - g_0) <= abs(delta_g):
            val_g = True
        elif abs(g_1 - g_0) <= 10**(-6):
            val_g = True
        if abs(p_loop[0] - p0_loop[0]) <= abs(loop_var[0]):
            val_B = True
        #print(val_B)
        #print(abs(p_loop[0] - p0_loop[0]) - abs(loop_var[0]))
        if abs(p_loop[1] - p0_loop[1]) <= abs(loop_var[1])/100:
            val_is = True
        if abs(p_loop[2] - p0_loop[2]) <= abs(loop_var[2])/100:
            val_Rs = True
        print(val_g, val_B, val_is, val_Rs)
    #ridefinizione delle variabili
        print('B: ', p_loop[0], '+-', loop_var[0], ' I_s: ', p_loop[1], '+-', loop_var[1], ' R_s: ', p_loop[2], '+-', loop_var[2], 'G:', g_1[0], '+-', delta_g[0])
        deltaIj = jun_cur_1 - jun_cur
        g_0 = g_1
        p0_loop = p_loop
        j = j +1
        #print(j)
        #plot(xdata, ydata-tot_cur)
        #show()
    grid()
    yscale('log')
    rc('font', size=16)
    title(r"Caratteristica I-V T=%s° - fit"%(data[:,2][1]), size=14.5)
    xlabel('Tensione V [V]')
    ylabel(r'Corrente I [$\mu $ A]')
    ylim(0.02, 10000)
    lim_err = 2300
    plot(xdata, current_v(xdata, p_loop[1], p_loop[0], p_loop[2]*10**(-6), jun_cur_1, g_0), color='blue', linewidth = 1.5)   
    errorbar(xdata[:argmax(ydata>=lim_err)], current_v(xdata, p_loop[1], p_loop[0], p_loop[2]*10**(-6), jun_cur_1, g_0)[:argmax(ydata>=lim_err)], prop_error(xdata,sigmax,p_loop[1],loop_var[1],p_loop[0],loop_var[0], p_loop[2], loop_var[2],g_1[0],delta_g[0],jun_cur_1,deltaIj)[:argmax(ydata>=lim_err)], sigmax, linestyle='None',  color="blue")
    errorbar(xdata, ydata, sigmay, sigmax, linestyle='None', marker='o', markersize=1, color="red")
    B  = r'B: $ %s  \pm  %s \, [V^{-1}] $'%(round(p_loop[0], 2), round(loop_var[0], 2))
    I_s  = r'$I_{S}$: $ %s  \pm  %s \, [nA] $'%(round(p_loop[1]*10**3, 2), round(loop_var[1]*10**3, 2))
    R_s  = r'$R_{S}$: $ %s  \pm  %s \, [\Omega] $'%(round(p_loop[2], 2), round(loop_var[2], 2))
    G  = r'G: $ %s  \pm  %s \, [\mu S]$ '%(round(g_1[0], 2), round(delta_g[0], 2))    
    text(0.45, 10, B, family='serif', style='italic', size=15)
    text(0.45, 3, I_s, family='serif', style='italic', size=15)
    text(0.45, 1, R_s, family='serif', style='italic', size=15)
    text(0.45, 0.3, G, family='serif', style='italic', size=15)
    #savefig('%s.png'%(indirizzo), dpi=400)
    show()
Beispiel #23
0
	def root(fun, x0, args=(), method='hybr', jac=None, tol=None, callback=None, options=None):
		x = opt.broyden2(fun, x0, f_tol=tol, callback=callback)
		return sol(x, True)
Beispiel #24
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 29 12:48:33 2021

@author: shrohanmohapatra
"""

from scipy.optimize import broyden2


def paretoZeroFinder(x):
    return ((x - 1) / (x - 0.8))**x - 0.2


# print(newton_krylov(paretoZeroFinder,7.21))
print(paretoZeroFinder(1.9484046685235312))
print(broyden2(paretoZeroFinder, 1.95, 1000))
print(paretoZeroFinder(1.0556605455704873))