def measureBdRatefct(self, reference, processed):
    """
    BJONTEGAARD    Bjontegaard metric calculation
    Bjontegaard's metric allows to compute the average % saving in bitrate
    between two rate-distortion curves [1].
    R1,Q1 - RD points for curve 1
    R2,Q2 - RD points for curve 2
    adapted from code from: (c) 2010 Giuseppe Valenzise
    """
    # numpy plays games with its exported functions.
    # pylint: disable=no-member
    # pylint: disable=too-many-locals
    # pylint: disable=bad-builtin
    R1 = [float(x[prX]) for x in reference]
    Q1 = [float(x[prY]) for x in reference]
    R2 = [float(x[prX]) for x in processed]
    Q2 = [float(x[prY]) for x in processed]

    #print(R1)
    #print(Q1)
    #print(R2)
    #print(Q2)

    log_R1 = map(math.log, R1)
    log_R2 = map(math.log, R2)

    log_R1 = numpy.log(R1)
    log_R2 = numpy.log(R2)

    #print(log_R1)
    #print(log_R2)

    # Best cubic poly fit for graph represented by log_ratex, psrn_x.
    poly1 = numpy.polyfit(Q1, log_R1, 3)
    poly2 = numpy.polyfit(Q2, log_R2, 3)

    # Integration interval.
    min_int = max([min(Q1), min(Q2)])
    max_int = min([max(Q1), max(Q2)])

    # find integral
    p_int1 = numpy.polyint(poly1)
    p_int2 = numpy.polyint(poly2)

    # Calculate the integrated value over the interval we care about.
    int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int)
    int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int)

    # Calculate the average improvement.
    avg_exp_diff = (int2 - int1) / (max_int - min_int)

    # In really bad formed data the exponent can grow too large.
    # clamp it.
    if avg_exp_diff > 200:
      avg_exp_diff = 200

    # Convert to a percentage.
    avg_diff = (math.exp(avg_exp_diff) - 1) * 100

    return avg_diff
 def test_polyint_type(self) :
     """Ticket #944"""
     msg = "Wrong type, should be complex"
     x = np.ones(3, dtype=np.complex)
     assert_(np.polyint(x).dtype == np.complex, msg)
     msg = "Wrong type, should be float"
     x = np.ones(3, dtype=np.int)
     assert_(np.polyint(x).dtype == np.float, msg)
Example #3
0
 def test_polyint_type(self):
     # Ticket #944
     msg = "Wrong type, should be complex"
     x = np.ones(3, dtype=complex)
     assert_(np.polyint(x).dtype == complex, msg)
     msg = "Wrong type, should be float"
     x = np.ones(3, dtype=int)
     assert_(np.polyint(x).dtype == float, msg)
Example #4
0
def BdRate(group1, group2):
  """Compute the BD-rate between two score groups.

  The returned object also contains the range of PSNR values used
  to compute the result.

  Bjontegaard's metric allows to compute the average % saving in bitrate
  between two rate-distortion curves [1].

  rate1,psnr1 - RD points for curve 1
  rate2,psnr2 - RD points for curve 2

  adapted from code from: (c) 2010 Giuseppe Valenzise
  copied from code by [email protected], [email protected]

  """
  # pylint: disable=too-many-locals
  metric_set1 = group1.dataPoints()
  metric_set2 = group2.dataPoints()

  # numpy plays games with its exported functions.
  # pylint: disable=no-member
  # pylint: disable=bad-builtin
  psnr1 = [x[1] for x in metric_set1]
  psnr2 = [x[1] for x in metric_set2]

  log_rate1 = map(math.log, [x[0] for x in metric_set1])
  log_rate2 = map(math.log, [x[0] for x in metric_set2])

  # Best cubic poly fit for graph represented by log_ratex, psrn_x.
  poly1 = numpy.polyfit(psnr1, log_rate1, 3)
  poly2 = numpy.polyfit(psnr2, log_rate2, 3)

  # Integration interval.
  min_int = max([min(psnr1), min(psnr2)])
  max_int = min([max(psnr1), max(psnr2)])

  # find integral
  p_int1 = numpy.polyint(poly1)
  p_int2 = numpy.polyint(poly2)

  # Calculate the integrated value over the interval we care about.
  int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int)
  int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int)

  # Calculate the average improvement.
  avg_exp_diff = (int2 - int1) / (max_int - min_int)

  # In really bad formed data the exponent can grow too large.
  # clamp it.
  if avg_exp_diff > 200:
    avg_exp_diff = 200

  # Convert to a percentage.
  avg_diff = (math.exp(avg_exp_diff) - 1) * 100

  return {'difference': avg_diff, 'psnr':[min_int, max_int]}
def bdrate(metric_set1, metric_set2):
  """
  BJONTEGAARD    Bjontegaard metric calculation
  Bjontegaard's metric allows to compute the average % saving in bitrate
  between two rate-distortion curves [1].

  rate1,psnr1 - RD points for curve 1
  rate2,psnr2 - RD points for curve 2

  adapted from code from: (c) 2010 Giuseppe Valenzise

  """
  rate1 = [x[0] for x in metric_set1]
  psnr1 = [x[1] for x in metric_set1]
  rate2 = [x[0] for x in metric_set2]
  psnr2 = [x[1] for x in metric_set2]

  log_rate1 = map(lambda x: math.log(x), rate1)
  log_rate2 = map(lambda x: math.log(x), rate2)

  # Best cubic poly fit for graph represented by log_ratex, psrn_x.
  p1 = numpy.polyfit(psnr1, log_rate1, 3)
  p2 = numpy.polyfit(psnr2, log_rate2, 3)

  # Integration interval.
  min_int = max([min(psnr1),min(psnr2)])
  max_int = min([max(psnr1),max(psnr2)])

  # find integral
  p_int1 = numpy.polyint(p1)
  p_int2 = numpy.polyint(p2)

  # Calculate the integrated value over the interval we care about.
  int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int)
  int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int)

  # Calculate the average improvement.
  avg_exp_diff = (int2 - int1) / (max_int - min_int)

  # In really bad formed data the exponent can grow too large.
  # clamp it.
  if avg_exp_diff > 200 :
    avg_exp_diff = 200

  # Convert to a percentage.
  avg_diff = (math.exp(avg_exp_diff) - 1) * 100

  return avg_diff
Example #6
0
def _sweep_poly_phase(t, poly):
    """
    Calculate the phase used by sweep_poly to generate its output.  See
    sweep_poly for a description of the arguments.

    """
    # polyint handles lists, ndarrays and instances of poly1d automatically.
    intpoly = polyint(poly)
    phase = 2*pi * polyval(intpoly, t)
    return phase
Example #7
0
def BDPSNR(PSNR1, BR1, PSNR2, BR2):
    lBR1 = np.log10(BR1)
    p1 = np.polyfit( lBR1, PSNR1, 3)

    lBR2 = np.log10(BR2)
    p2 = np.polyfit( lBR2, PSNR2, 3)

    min_int = max(min(lBR1), min(lBR2))
    max_int = min(max(lBR1), max(lBR2))

    # find integral
    p_int1 = np.polyint(p1)
    p_int2 = np.polyint(p2)

    int1 = np.polyval(p_int1, max_int) - np.polyval(p_int1, min_int)
    int2 = np.polyval(p_int2, max_int) - np.polyval(p_int2, min_int)

    # find avg diff
    avg_diff = (int2-int1)/(max_int-min_int)

    return avg_diff
def bdsnr(metric_set1, metric_set2):
  """
  BJONTEGAARD    Bjontegaard metric calculation
  Bjontegaard's metric allows to compute the average gain in psnr between two
  rate-distortion curves [1].
  rate1,psnr1 - RD points for curve 1
  rate2,psnr2 - RD points for curve 2

  returns the calculated Bjontegaard metric 'dsnr'

  code adapted from code written by : (c) 2010 Giuseppe Valenzise
  http://www.mathworks.com/matlabcentral/fileexchange/27798-bjontegaard-metric/content/bjontegaard.m
  """
  rate1 = [x[0] for x in metric_set1]
  psnr1 = [x[1] for x in metric_set1]
  rate2 = [x[0] for x in metric_set2]
  psnr2 = [x[1] for x in metric_set2]

  log_rate1 = map(lambda x: math.log(x), rate1)
  log_rate2 = map(lambda x: math.log(x), rate2)

  # Best cubic poly fit for graph represented by log_ratex, psrn_x.
  p1 = numpy.polyfit(log_rate1, psnr1, 3)
  p2 = numpy.polyfit(log_rate2, psnr2, 3)

  # Integration interval.
  min_int = max([min(log_rate1),min(log_rate2)])
  max_int = min([max(log_rate1),max(log_rate2)])

  # Integrate p1, and p2.
  p_int1 = numpy.polyint(p1)
  p_int2 = numpy.polyint(p2)

  # Calculate the integrated value over the interval we care about.
  int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int)
  int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int)

  # Calculate the average improvement.
  avg_diff = (int2 - int1) / (max_int - min_int)
  return avg_diff
Example #9
0
 def test_4(self):
   for type in classes:
     for M in range(type[1],type[2]+1):
       coll = getattr(pySDC.CollocationClasses, type[0])(M, t_start, t_end)
       S = coll.Smat[1:,1:]
       # as in TEST 1, create and integrate a polynomial with random coefficients, but now of degree M-1
       poly_coeff = np.random.rand(M-1)
       poly_vals  = np.polyval(poly_coeff, coll.nodes)
       poly_int_coeff = np.polyint(poly_coeff)
       for i in range(1,M):
           int_ex = np.polyval(poly_int_coeff, coll.nodes[i]) - np.polyval(poly_int_coeff, coll.nodes[i-1])
           int_coll = np.dot(poly_vals, S[i,:])
           assert abs(int_ex - int_coll)<1e-12, "For node type " + type[0] + ", partial quadrature rule from Smat failed to integrate polynomial of degree M-1 exactly for M = " + str(M)
Example #10
0
def BDRate(PSNR1, BR1, PSNR2, BR2):
    lBR1 = np.log(BR1)
    p1 = np.polyfit( PSNR1, lBR1, 3)

    lBR2 = np.log(BR2)
    p2 = np.polyfit( PSNR2, lBR2, 3)

    min_int = max(min(PSNR1), min(PSNR2))
    max_int = min(max(PSNR1), max(PSNR2))

    # find integral
    p_int1 = np.polyint(p1)
    p_int2 = np.polyint(p2)

    int1 = np.polyval(p_int1, max_int) - np.polyval(p_int1, min_int)
    int2 = np.polyval(p_int2, max_int) - np.polyval(p_int2, min_int)

    # find avg diff
    avg_exp_diff = (int2-int1)/(max_int-min_int)
    avg_diff = (np.exp(avg_exp_diff)-1)*100

    return avg_diff
Example #11
0
def chirp(t,f0=0,t1=1,f1=100,method='linear',phi=0,qshape=None):
    """Frequency-swept cosine generator.

    Inputs:

        t          --  array to evaluate waveform at
        f0, f1, t1 --  frequency (in Hz) of waveform is f0 at t=0 and f1 at t=t1
            Alternatively, if f0 is an array, then it forms the coefficients of
            a polynomial (c.f. numpy.polval()) in t. The values in f1, t1,
            method, and qshape are ignored.
        method     --  linear, quadratic, or logarithmic frequency sweep
        phi        --  optional phase in degrees
        qshape     --  shape parameter for quadratic curve: concave or convex
    """

    # Convert to radians.
    phi *= pi / 180
    if size(f0) > 1:
        # We were given a polynomial.
        return cos(2*pi*polyval(polyint(f0),t)+phi)
    if method in ['linear','lin','li']:
        beta = (f1-f0)/t1
        phase_angle = 2*pi * (f0*t + 0.5*beta*t*t)
    elif method in ['quadratic','quad','q']:
        if qshape == 'concave':
            mxf = max(f0,f1)
            mnf = min(f0,f1)
            f1,f0 = mxf, mnf
        elif qshape == 'convex':
            mxf = max(f0,f1)
            mnf = min(f0,f1)
            f1,f0 = mnf, mxf
        else:
            raise ValueError("qshape must be either 'concave' or 'convex' but "
                "a value of %r was given." % qshape)
        beta = (f1-f0)/t1/t1
        phase_angle = 2*pi * (f0*t + beta*t*t*t/3)
    elif method in ['logarithmic','log','lo']:
        if f1 <= f0:
            raise ValueError(
                "For a logarithmic sweep, f1=%f must be larger than f0=%f."
                % (f1, f0))
        beta = log10(f1-f0)/t1
        phase_angle = 2*pi * (f0*t + pow(10,beta*t)/(beta*log(10)))
    else:
        raise ValueError("method must be 'linear', 'quadratic', or "
            "'logarithmic' but a value of %r was given." % method)

    return cos(phase_angle + phi)
def f_evolution_element(x, y):
  root_real = 2.
  roots = np.zeros((3,3))
  if y < 0:
    dP = np.poly([root0, root_real + y * j, root_real - y * j])
  elif y > 0:
    dP = np.poly([root0, root_real+y, root_real-y])
  else:
    dP = np.poly([root0, root_real, -root_real])

  P = lamda*np.polyint(dP)
  cplx_roots = np.roots(dP)
  roots[:,0] = [_.real for _ in cplx_roots if _.real < max_x and _.real > min_x]
  roots[:,0] = np.sort(roots[:,0])
  z = np.polyval(P, x)
  for i in xrange(roots.shape[0]):
    roots[i,1] = y
    roots[i,2] = np.polyval(P, roots[i,0])
  return z,roots
def f_evolution(x, y):
  z = np.zeros((x.size, y.size))
  root_real = 2.
  roots = np.zeros((3,y.size,3))
  for k in xrange(y.size):
    if y[k] < 0:
      dP = np.poly([root0, root_real + y[k] * j, root_real - y[k] * j])
    elif y[k] > 0:
      dP = np.poly([root0, root_real + y[k], root_real-y[k]])
    else:
      dP = np.poly([root0, root_real, -root_real])

    P = lamda*np.polyint(dP)
    cplx_roots = np.roots(dP)
    roots[:,k,0] = [_.real for _ in cplx_roots if _.real < max_x and _.real > min_x]
    roots[:,k,0] = np.sort(roots[:,k,0])
    for i in xrange(x.size):
      z[i,k] = np.polyval(P, x[i])
    for i in xrange(roots.shape[0]):
      roots[i,k,1] = y[k]
      roots[i,k,2] = np.polyval(P, roots[i,k,0])
  return z,roots
Example #14
0
  def test_1(self):
    for type in classes:
      for M in range(type[1],type[2]+1):
        coll = getattr(pySDC.CollocationClasses, type[0])(M, t_start, t_end)
        
        # some basic consistency tests
        assert np.size(coll.nodes)==np.size(coll.weights), "For node type " + type[0] + ", number of entries in nodes and weights is different"
        assert np.size(coll.nodes)==M, "For node type " + type[0] + ", requesting M nodes did not produce M entries in nodes and weights"


        # generate random set of polynomial coefficients
        poly_coeff = np.random.rand(coll.order-1)
        # evaluate polynomial at collocation nodes
        poly_vals  = np.polyval(poly_coeff, coll.nodes)
        # use python's polyint function to compute anti-derivative of polynomial
        poly_int_coeff = np.polyint(poly_coeff)
        # Compute integral from 0.0 to 1.0
        int_ex = np.polyval(poly_int_coeff, t_end) - np.polyval(poly_int_coeff, t_start)
        # use quadrature rule to compute integral
        int_coll = coll.evaluate(coll.weights, poly_vals)
        # For large values of M, substantial differences from different round of error have to be considered
        assert abs(int_ex - int_coll) < 1e-10, "For node type " + type[0] + ", failed to integrate polynomial of degree " + str(coll.order-1) + " exactly. Error: %5.3e" % abs(int_ex - int_coll)
Example #15
0
def calc_omega(cp):
    cp.insert
    a=[]
    for i in range(len(cp)):
        ptmp = []
        tmp = 0
        for j in range(len(cp)):
            if j != i:
                row = []
                row.insert(0,1/(cp[i]-cp[j]))
                row.insert(1,-cp[j]/(cp[i]-cp[j]))
                ptmp.insert(tmp,row)
                tmp += 1
        p=[1]
        for j in range(len(cp)-1):
            p = conv(p,ptmp[j])
        pint = numpy.polyint(p)
        arow = []
        for j in range(len(cp)):
            arow.append(numpy.polyval(pint,cp[j]))
        a.append(arow)
    return a
Example #16
0
    def w(self, p_c, T = 293.15):
        """
        Moisture content [kg/m3]
        
        The capillary pressure p_c is required, and T is an optional argument
        """
        
        if self.w_method == 'vangenuchten':
        
            w = np.zeros(np.shape(p_c))
            n = 1./(1-self.w_m)
            for i in range(np.size(self.w_l)):
                w += self.w_sat * self.w_l[i] * \
                (1.+(self.w_alpha[i]*abs(p_c))**n[i])**(-self.w_m[i])
            
        elif self.w_method == 'polynomial':

            w = np.polyval(self.w_poly, ham.HR(p_c, T))
            
        elif self.w_method == 'slope':
            
            w = np.polyval(np.polyint(self.xi_poly), ham.HR(p_c, T))
            
        return w
Example #17
0
  def __init__(self):
    d = 3 # Degree of interpolating polynomial
    nk = 20 # Control discretization
    tf = 10.0 # End time
    h = tf/nk
    
    tau_root = [0] + collocation_points(d, "radau") # Choose collocation points
    C = NP.zeros((d+1,d+1)) # Coefficients of the collocation equation
    D = NP.zeros(d+1) # Coefficients of the continuity equation
    F = NP.zeros(d+1) # Coefficients of the quadrature function
    T = NP.zeros((nk,d+1)) # All collocation time points
    
    self.d = d
    self.nk = nk # Control discretization
    self.tf = tf # End time
    self.h = h # Size of the finite elements
    # Construct polynomial basis
    for j in range(d+1):
      # Construct Lagrange polynomials to get the polynomial basis at the collocation point
      p = NP.poly1d([1])
      for r in range(d+1):
        if r != j:
          p *= NP.poly1d([1, -tau_root[r]]) / (tau_root[j]-tau_root[r])
        D[j] = p(1.0) # Evaluate the polynomial at the final time to get the coefficients of the continuity equation
        pder = NP.polyder(p)
        for r in range(d+1): # Evaluate the time derivative of the polynomial at all collocation points to get the coefficients of the continuity equation
          C[j,r] = pder(tau_root[r])

          # Evaluate the integral of the polynomial to get the coefficients of the quadrature function
        pint = NP.polyint(p)
        F[j] = pint(1.0)
      for k in range(nk):
        for j in range(d+1):
          T[k,j] = h*(k + tau_root[j])
      
      self.T = T; self.C = C; self.D = D; self.F= F;
    sum1 = 0
    for i in range(1, n/2 + 1):
        sum1 += f(a + (2*i - 1)*h)
    
    sum2 = 0
    for i in range(1, n/2):
        sum2 += f(a + 2*i*h)
    
    integral = (b-a)/(3*float(n))*(f(a) + f(b) + 4*sum1 + 2*sum2)
    return integral

# Test Simpson function on polynomial
# Create 2nd order Polynomial
p = np.poly1d([2,3,2])
# Calculate integral of p
P = np.polyint(p)

# define boundaries of integral
a = 0
b = 2
# Define error
eps = 1e-14

int_exact = P(b) - P(a)
int_simpson = Simpson(p, a, b)

print "Integral exact polynomial: %.2f" % (int_exact)
print "Integral with Simpson func: %.2f" % (int_simpson)
print "Simpson function is exact within 1E-14" if abs(int_exact - int_simpson) < eps else "Simpson function is not exact"
print 
Example #19
0
TdT = lambda x : np.array([np.power(x,[3,2,1,0]), np.multiply(np.power(x,[2,1,0,0]),[3,2,1,0])])
N = np.linalg.inv(np.concatenate((TdT(0),TdT(pa['L']))))

xyplot=dict([('X',np.linspace(0,pa['L'],100)), ('Xlabel','x'), ('Ylabel','Shape Function') ])
xyplot['Y']=np.transpose(np.array([np.polyval(N[:,j1],xyplot['X']) for j1 in range(4)]))
plot2D(xyplot)





#%% compute element matrices by analytical integration [0;L]
Ke, Me = np.zeros((4,4)), np.zeros((4,4))
for j1 in range(4):
    for j2 in range(4):
        coef=np.polyint(np.polymul(N[:,j1],N[:,j2]))
        Me[j1,j2]=np.polyval(coef,pa['L'])-np.polyval(coef,0)
        coef=np.polyint(np.polymul(np.polyder(N[:,j1],2),np.polyder(N[:,j2],2)))
        Ke[j1,j2]=np.polyval(coef,pa['L'])-np.polyval(coef,0)

print(Me*420./pa['L']);print(Ke*pa['L']**3)


#%% compute element matrices by numerical integration [0;L]
#get quadrature points [-1;1]
[GS,NdN]=quad_seg(4)
#compute quadrature points [0;L]
xi=[[0],[pa['L']]];
xg= NdN[:,0:2] @ xi # N_i x_i
Jac=NdN[:,2:4] @ xi # ???
#compute shape functions & derivatives at quadrature points
Example #20
0
def lorenz_coefficient(poros,
                       perme,
                       layer_thick=0.25,
                       depth=None,
                       curve_plot=False):
    """Calculates Lorenz coefficient and curve of subsurface sample given porosity and permeability data.
    :param poros: porosities in %
    :type: 1D numpy.ndarray
    :param perme: permeabilities in mD
    :type: 1D numpy.ndarray
    :param layer_thick: uniform layer thickness; default 0.25 m;
                        if depth is not None, layer_thick is recalculated
    :type: int or float
    :param depth: default None for constant thickness; 
                  for variable layer thickness, enter 1D numpy.ndarray of depths in m
    :param curve_plot: default False; set to True to display the Lorenz Curve
    :type: boolean
    :return:lorenz_coeff: Lorenz Coefficient rounded to 3 decimal places
           :type: float
           :lorenz_plot: (optional) figure with plot of Lorenz Curve   
    """

    # Library imports

    import pandas as pd
    import numpy as np
    from matplotlib import pyplot as plt

    # Converting to row-oriented numpy.ndarrays (if necessary)

    if np.ndim(depth) == 2:
        depth = np.transpose(depth)[0, :]
    if np.ndim(poros) == 2:
        poros = np.transpose(poros)[0, :]
    if np.ndim(perme) == 2:
        perme = np.transpose(perme)[0, :]

    # Determining layer thickness array

    if depth is not None:
        layer_thick = depth - np.concatenate(([0], depth[:-1]))
    else:
        layer_thick = np.ones(len(poros)) * layer_thick

    # Exception handling

    if (all(isinstance(x, (int, float)) for x in layer_thick)
            or all(isinstance(x, (int, float)) for x in poros)
            or all(isinstance(x, (int, float)) for x in perme)) == False:
        raise TypeError('Not all array entries are integers or floats')

    elif len(layer_thick) != len(poros) or len(layer_thick) != len(perme):
        raise TypeError(
            'There are not sufficient porosity/permeability measurements for all layers'
        )

    elif (np.ndim(layer_thick) or np.ndim(poros) or np.ndim(perme)) != 1:
        raise TypeError('Arguments should be 1D numpy.ndarrays')

    # Sorting data in descending order of perme/poros

    ratio = perme / poros
    layer_thick = np.flip(layer_thick[np.argsort(ratio)], axis=0)
    poros = np.flip(poros[np.argsort(ratio)], axis=0)
    perme = np.flip(perme[np.argsort(ratio)], axis=0)

    # Flow and storage capacities

    storage_cap = poros * layer_thick
    flow_cap = perme * layer_thick

    # Cumulative flow and storage capacities

    cumul_storage_cap = np.zeros(len(flow_cap) + 1)
    cumul_flow_cap = np.zeros(len(flow_cap) + 1)

    cumul_storage_cap[1] = storage_cap[0]
    cumul_flow_cap[1] = flow_cap[0]

    for i in range(2, len(flow_cap) + 1):
        cumul_storage_cap[i] = cumul_storage_cap[i - 1] + storage_cap[i - 1]
        cumul_flow_cap[i] = cumul_flow_cap[i - 1] + flow_cap[i - 1]

    # Fractional flow and storage capacities

    frac_storage_cap = cumul_storage_cap / cumul_storage_cap[-1]
    frac_flow_cap = cumul_flow_cap / cumul_flow_cap[-1]

    # Curve fitting

    weights = np.ones(len(
        frac_storage_cap))  # To ensure the fit converges to (0,0) and (1,1)
    weights[0] = 1000
    weights[-1] = 1000

    poly_fit = np.polyfit(frac_storage_cap, frac_flow_cap, deg=3, w=weights)

    poly_fit = np.poly1d(poly_fit)
    integral = np.polyint(poly_fit)
    lorenz_coeff = (integral(1) - 0.5) / 0.5

    if curve_plot == True:  # To output Lorenz Curve plot

        # Plotting instructions

        lorenz_plot = plt.figure(figsize=(16, 12))

        plt.style.use('fivethirtyeight')

        plt.plot([0, 1], [0, 1],
                 label='Homogeneous Curve',
                 color='red',
                 linewidth='3')
        plt.plot(np.linspace(0, 1, 100),
                 poly_fit(np.linspace(0, 1, 100)),
                 linestyle='dashed',
                 linewidth='3',
                 label='Polynomial Fit',
                 color='black')
        plt.fill_between(np.linspace(0, 1, 100),
                         poly_fit(np.linspace(0, 1, 100)),
                         np.linspace(0, 1, 100),
                         alpha=0.75)
        plt.fill_between(np.linspace(0, 1, 100),
                         poly_fit(np.linspace(0, 1, 100)),
                         1,
                         alpha=0.75,
                         color='orange')

        plt.title('Lorenz Curve and Coefficient', size=50)
        plt.xlabel('Fraction of Total Storage Capacity (m)', size=36)
        plt.ylabel('Fraction of Total Flow Capacity (mD*m)', size=36)
        plt.legend(loc=4, facecolor='#C0C0C0', fontsize=25)
        plt.text(0.3,
                 0.7,
                 'lorenz_coeff = \n %.3f' % lorenz_coeff,
                 size=40,
                 horizontalalignment='center',
                 bbox=dict(facecolor='none', edgecolor='black'))

        plt.show()

    else:  # To output only Lorenz Coefficient
        return 'lorenz_coeff = %.3f' % lorenz_coeff
Example #21
0
def FindScalarProduct(a,b): # функція, що шукає скалярний добуток
    product_arr = np.polymul(a,b) # перемножуємо 
    poduct_poly = np.poly1d(product_arr) # перетворюємо результат на поліном
    integral_poly = np.polyint(poduct_poly) # знаходимо інтеграл
    
    return integral_poly(1)-integral_poly(0) # від інтеграла в точці 1 - інтеграл в точці 0
Example #22
0
def bdrate(columns):
    #################################################
    ############### Read User Inputs ################
    #################################################

    #print("\n  Read data from a text file. \n  Inside the text file, Please put the Rate in Column 1 and 3, and their corresponding PSNR in Column2 and 4.")

    # module INPUT is a built-in function in Python 3.x
    #filename = input("Please enter filename with path: ")
    #mode = float(input("Please enter mode (0 or 1 ?): "))
    mode = float(1)



    #################################################
    ############### Compute BDPSNR ##################
    #################################################

    #f = open(filename, 'r')

    # define arrays to store the data
    Rate1 = []
    Rate2 = []
    Psnr1 = []
    Psnr2 = []
    
    # Loop over lines and extract variables of interest
    for i in range(0,4):
        #	    if line in ['\n', '\r\n']:
        #		    break
        #	    line = line.strip()
        #	    columns = line.split()
        Rate1.append(math.log(float(columns[0+4*i])))
        Rate2.append(math.log(float(columns[2+4*i])))
        Psnr1.append(float(columns[1+4*i]))
        Psnr2.append(float(columns[3+4*i]))

    if mode == 0:
	    # compute PSNR differences
	    rates1 = numpy.array(Rate1)
	    psnrs1 = numpy.array(Psnr1)
	    z_poly1 = numpy.polyfit(rates1,psnrs1,3)

	    rates2 = numpy.array(Rate2)
	    psnrs2 = numpy.array(Psnr2)
	    z_poly2 = numpy.polyfit(rates2,psnrs2,3) 
	
	    stack_rate = []
	    stack_rate.append(Rate1)
	    stack_rate.append(Rate2)
	    min_int = numpy.amax(stack_rate)
	    max_int = numpy.amin(stack_rate)

	    #compute integral
	    z_poly_integral1 = numpy.polyint(numpy.poly1d(z_poly1))	
	    z_poly_integral2 = numpy.polyint(numpy.poly1d(z_poly2))

	    integral1 = numpy.polyval(z_poly_integral1, max_int) - numpy.polyval(z_poly_integral1, min_int)
	    integral2 = numpy.polyval(z_poly_integral2, max_int) - numpy.polyval(z_poly_integral2, min_int)

	    #compute average differences
	    avg_diff = (integral2-integral1)/(max_int-min_int)
	    #print(avg_diff)

    elif mode == 1:	
	    # compute Rate differences
	    rates1 = numpy.array(Rate1)
	    psnrs1 = numpy.array(Psnr1)
	    z_poly1 = numpy.polyfit(psnrs1,rates1,3)

	    rates2 = numpy.array(Rate2)
	    psnrs2 = numpy.array(Psnr2)
	    z_poly2 = numpy.polyfit(psnrs2,rates2,3) 

	    stack_psnr = []
	    stack_psnr.append(Psnr1)
	    stack_psnr.append(Psnr2)
	    min_int = numpy.amax(stack_psnr)
	    max_int = numpy.amin(stack_psnr)

	    #compute integral
	    z_poly_integral1 = numpy.polyint(numpy.poly1d(z_poly1))	
	    z_poly_integral2 = numpy.polyint(numpy.poly1d(z_poly2))

	    integral1 = numpy.polyval(z_poly_integral1, max_int) - numpy.polyval(z_poly_integral1, min_int)
	    integral2 = numpy.polyval(z_poly_integral2, max_int) - numpy.polyval(z_poly_integral2, min_int)
	    #compute average differences
	    avg_exp_diff = (integral2-integral1)/(max_int-min_int)
 
	    #avg_diff = 100*(math.pow(10,avg_exp_diff)-1)
	    avg_diff = 100*(math.pow(math.exp(1),avg_exp_diff)-1)	
	    #print(avg_diff,'%')

    return avg_diff
Example #23
0
        ax1.plot(x, p(x))

        # Perform integration over an uneven mesh using just two elements
        x_salient = [x[0], 0.1, x[-1]]
        ax1.plot(x_salient, p(x_salient), 'r.')
        ax1.axhline(0.0, color='k', alpha=0.3)  # overlay y=0 line

        mesh, integral_vals = integrate_gauss(p,
                                              x_salient,
                                              n_gp=3,
                                              cumulative=True,
                                              make_plot=True,
                                              ax_list=[ax1, ax2])

        # Derive exact integrated polynomial
        p_integral = npy.polyint(p)
        p0 = p_integral(x[0])  # integration constant
        ax2.plot(x, p_integral(x) - p0, label='exact')
        ax1.legend()
        ax2.legend()

        # Define some nodes

#        # Define some elements
#        elemTopo = npy.asarray([[0,1],[1,2],[0,2]])
#        Ne = elemTopo.shape[0]
#
#        for e in range(Ne):
#            Element(meshObj1,elemTopo[e,:],name="Element %d" (e+1))
#
#        # Define a mesh of meshes
Example #24
0
    # Construct Lagrange polynomials to get the polynomial basis at the collocation point
    p = np.poly1d([1])
    for r in range(d+1):
        if r != j:
            p *= np.poly1d([1, -tau_root[r]]) / (tau_root[j]-tau_root[r])

    # Evaluate the polynomial at the final time to get the coefficients of the continuity equation
    D[j] = p(1.0)

    # Evaluate the time derivative of the polynomial at all collocation points to get the coefficients of the continuity equation
    pder = np.polyder(p)
    for r in range(d+1):
        C[j,r] = pder(tau_root[r])

    # Evaluate the integral of the polynomial to get the coefficients of the quadrature function
    pint = np.polyint(p)
    B[j] = pint(1.0)

# Time horizon
T = 10.

# Declare model variables
x1 = ca.SX.sym('x1')
x2 = ca.SX.sym('x2')
x = ca.vertcat(x1, x2)
u = ca.SX.sym('u')

# Model equations
xdot = ca.vertcat((1-x2**2)*x1 - x2 + u, x1)

# Objective term
def legendre(n, lb, ub):
    """
     legendre: Compute the legendre polynomials

    Inputs:
      n - Order of Legendre polynomials.
      lb - lower bounds for Legendre polynomials
      ub - Upper bounds for Legendre polynomials

    Output:
      Le - Coefficients of Legendre polynomials
    """
    if n < 0:
        raise ValueError(
            "The order of legendre polynomial must be greater than or equal to 0"
        )

    if isinstance(n, int) == False:
        n = int(n)

    #Call legendre recursive function
    L0 = np.array([1])
    L1 = np.array([1, -((ub + lb) / 2)])

    if n == 0:
        Le = L0
    elif n == 1:
        Le = L1
    else:
        # Perform Gram Schmidt orthogonalization
        for i in range(0, n - 1):
            if i == 0:
                K = deepcopy(L0)
                L = deepcopy(L1)

            a1 = np.polyval(
                np.polyint(np.convolve(np.array([1, 0]), np.convolve(L, L))),
                ub) - np.polyval(
                    np.polyint(np.convolve(np.array([1, 0]), np.convolve(
                        L, L))), lb)
            a2 = np.polyval(np.polyint(np.convolve(L, L)), ub) - np.polyval(
                np.polyint(np.convolve(L, L)), lb)
            a = a1 / a2
            h1 = np.convolve(polymin(np.array([1, 0]), a), L)

            b1 = np.polyval(np.polyint(np.convolve(L, L)), ub) - np.polyval(
                np.polyint(np.convolve(L, L)), lb)
            b2 = np.polyval(np.polyint(np.convolve(K, K)), ub) - np.polyval(
                np.polyint(np.convolve(K, K)), lb)
            b = b1 / b2
            h2 = np.convolve(b, K)

            Le = polymin(h1, h2)

            K = deepcopy(L)
            L = deepcopy(Le)

        cons = 1 / (np.polyval(Le, 1))
        Le = Le * cons

    return Le.tolist()
Example #26
0
def poly_area(x1,x2,p1,p2):
    p = pa.polyadd(p1,-p2)
    i = np.polyint(p)
    return np.abs( np.polyval(i,x2) - np.polyval(i,x1))
Example #27
0
def _area(observed_height, observed_width, height_breakpoints, poly_fits,
          area_median_flow, fit_width_var, fit_height_var, cov_height_width,
          num_obs):
    """
    Computes cross-sectional area from fit, based on CalculatedAEIV.m at
    https://github.com/mikedurand/SWOTAprimeCalcs

    observed_height - swot observed height for this reach
    observed_width - swot observed width for this reach
    height_breakpoints - boundaries for fits in height
    poly_fits - polynominal coeffs for the fits
    area_median_flow - cross-sectional area at median flow
    fit_width_var - width error std**2
    fit_height_var - height error std**2
    cov_height_width - covariance matrix for width / height
    """
    poly_ints = np.array([np.polyint(item) for item in poly_fits])

    height_fits_ll = height_breakpoints[0:-1]
    height_fits_ul = height_breakpoints[1:]

    ifit = np.argwhere(
        np.logical_and(observed_height >= height_fits_ll,
                       observed_height < height_fits_ul))

    low_height_snr = (cov_height_width[1, 1] -
                      fit_height_var) / fit_height_var < 2

    if ifit.size == 0:
        observed_height_hat = np.nan
        observed_width_hat = observed_width
        if observed_height > height_breakpoints.max():
            delta_area_hat = (
                np.polyval(poly_ints[-1], height_breakpoints[-1]) -
                np.polyval(poly_ints[-1], height_breakpoints[-2]) +
                area_median_flow)
            dAunc = np.sqrt(fit_height_var * observed_width**2 +
                            2 * fit_width_var *
                            (observed_height - height_breakpoints[-1])**2)

        else:
            delta_area_hat = (-area_median_flow - (
                (height_breakpoints[0] - observed_height) *
                (observed_width + poly_fits[0][0] * height_breakpoints[0] +
                 poly_fits[0][1]) / 2))
            dAunc = np.sqrt(fit_height_var * observed_width**2 +
                            2 * fit_width_var *
                            (observed_height - height_breakpoints[0])**2)

    else:
        ifit = ifit[0][0]
        if low_height_snr:
            observed_height_hat = observed_height
        else:
            observed_height_hat = estimate_height(observed_width,
                                                  observed_height,
                                                  poly_fits[ifit],
                                                  fit_width_var,
                                                  fit_height_var)

        ifit_hat = np.argwhere(
            np.logical_and(observed_height_hat >= height_fits_ll,
                           observed_height_hat < height_fits_ul))

        if ifit_hat.size > 0:
            ifit = ifit_hat[0][0]
            observed_height_hat = estimate_height(observed_width,
                                                  observed_height,
                                                  poly_fits[ifit],
                                                  fit_width_var,
                                                  fit_height_var)

        if low_height_snr:
            observed_width_hat = observed_width
        else:
            observed_width_hat = np.polyval(poly_fits[ifit],
                                            observed_height_hat)

        delta_area_hat = 0
        for poly_int, height_ll, height_ul in zip(poly_ints[:ifit + 1],
                                                  height_fits_ll[:ifit + 1],
                                                  height_fits_ul[:ifit + 1]):

            delta_area_hat += (np.polyval(
                poly_int, np.min([observed_height_hat, height_ul])) -
                               np.polyval(poly_int, height_ll))

        delta_area_hat -= area_median_flow

        if poly_fits[ifit][0] == 0:
            dAunc = poly_fits[ifit][1] * np.sqrt(fit_height_var)
        else:
            mu = (np.sqrt(poly_fits[ifit][0] / 2) *
                  (observed_height_hat - height_fits_ul[ifit]) +
                  np.polyval(poly_fits[ifit], height_fits_ul[ifit]) /
                  np.sqrt(2 * poly_fits[ifit][0]))
            sigma = np.sqrt(poly_fits[ifit][0] / 2) * np.sqrt(fit_height_var)
            dAunc = np.sqrt(4 * mu**2 * sigma**2 + 2 * sigma**4)

    return delta_area_hat, observed_width_hat, observed_height_hat, dAunc
Example #28
0
def get_node_dist(pol):
    int_p = np.polyint(pol)
    return int_p / eval_(int_p, 1)
Example #29
0
import numpy as np
import argparse
import logging

import utils
import codes


class Dist:  # irregular code distribution
    def __init__(self, name, lambda_p, rho_p, eps_BP):
        self.name, self.thresh = name, eps_BP
        self.lambda_p, self.rho_p = lambda_p, rho_p


eval_ = lambda p_, x_: np.polyval(p_, x_)
avg_deg_inv = lambda p_: eval_(np.polyint(p_), 1)  # 0 to 1 integration
rate__ = lambda lambda_p, rho_p, : 1 - avg_deg_inv(rho_p) / avg_deg_inv(
    lambda_p)
rate_ = lambda code_: rate__(code_.lambda_p, code_.rho_p)

x1 = np.linspace(0, .6, num=50)
f_eps_x = lambda eps_, lambda_p_, rho_p_, x_: eps_ * eval_(
    lambda_p_, 1 - eval_(rho_p_, 1 - x_))
f_eps_x1 = lambda eps_, lambda_p_, rho_p_: f_eps_x(eps_, lambda_p_, rho_p_, x1)
plot_de_eps = lambda code_, eps_, eps_name='eps', extra=None: \
    plt.plot(x1, f_eps_x1(eps_, code_.lambda_p, code_.rho_p), linewidth=3,
             label='%s: rate=%g, %s=%g%s' % (code_.name, rate_(code_), eps_name, eps_,
                                             '' if extra is None else ', %s' % extra))

plot_de_thresh = lambda code_: plot_de_eps(
    code_, code_.thresh, 'eps_BP', 'gap=%g, mul. gap=%g' %
Example #30
0
def opt_mintime(reftrack: np.ndarray,
                coeffs_x: np.ndarray,
                coeffs_y: np.ndarray,
                normvectors: np.ndarray,
                pars: dict,
                tpamap_path: str,
                tpadata_path: str,
                export_path: str,
                print_debug: bool = False,
                plot_debug: bool = False) -> tuple:
    """
    Created by:
    Fabian Christ

    Extended by:
    Thomas Herrmann, Francesco Passigato

    Documentation:
    The minimum lap time problem is described as an optimal control problem, converted to a nonlinear program using
    direct orthogonal Gauss-Legendre collocation and then solved by the interior-point method IPOPT. Reduced computing
    times are achieved using a curvilinear abscissa approach for track description, algorithmic differentiation using
    the software framework CasADi, and a smoothing of the track input data by approximate spline regression. The
    vehicles behavior is approximated as a double track model with quasi-steady state tire load simplification and
    nonlinear tire model.

    Please refer to our paper for further information:
    Christ, Wischnewski, Heilmeier, Lohmann
    Time-Optimal Trajectory Planning for a Race Car Considering Variable Tire-Road Friction Coefficients

    Inputs:
    reftrack:       track [x_m, y_m, w_tr_right_m, w_tr_left_m]
    coeffs_x:       coefficient matrix of the x splines with size (no_splines x 4)
    coeffs_y:       coefficient matrix of the y splines with size (no_splines x 4)
    normvectors:    array containing normalized normal vectors for every traj. point [x_component, y_component]
    pars:           parameters dictionary
    tpamap_path:    file path to tpa map (required for friction map loading)
    tpadata_path:   file path to tpa data (required for friction map loading)
    export_path:    path to output folder for warm start files and solution files
    print_debug:    determines if debug messages are printed
    plot_debug:     determines if debug plots are shown

    Outputs:
    alpha_opt:      solution vector of the optimization problem containing the lateral shift in m for every point
    v_opt:          velocity profile for the raceline
    reftrack:       possibly (depending on non-regular sampling) modified reference track must be returned for later
                    usage
    a_interp:       possibly (depending on non-regular sampling) modified equation system matrix for splines must be
                    returned for later usage
    normvectors:    possibly (depending on non-regular sampling) modified normal vectors must be returned for later
                    usage
    """

    # ------------------------------------------------------------------------------------------------------------------
    # USE NON-REGULAR SAMPLING -----------------------------------------------------------------------------------------
    # ------------------------------------------------------------------------------------------------------------------

    no_points_orig = reftrack.shape[0]

    if pars["optim_opts"]["step_non_reg"] > 0:
        reftrack, discr_points = tph.nonreg_sampling.nonreg_sampling(
            track=reftrack,
            eps_kappa=pars["optim_opts"]["eps_kappa"],
            step_non_reg=pars["optim_opts"]["step_non_reg"])

        # relcalculate splines
        refpath_cl = np.vstack((reftrack[:, :2], reftrack[0, :2]))
        coeffs_x, coeffs_y, a_interp, normvectors = tph.calc_splines.calc_splines(
            path=refpath_cl)

    else:
        discr_points = np.arange(reftrack.shape[0])
        a_interp = None

    # ------------------------------------------------------------------------------------------------------------------
    # PREPARE TRACK INFORMATION ----------------------------------------------------------------------------------------
    # ------------------------------------------------------------------------------------------------------------------

    # spline lengths
    spline_lengths_refline = tph.calc_spline_lengths.calc_spline_lengths(
        coeffs_x=coeffs_x, coeffs_y=coeffs_y)

    # calculate heading and curvature (numerically)
    kappa_refline = tph.calc_head_curv_num. \
        calc_head_curv_num(path=reftrack[:, :2],
                           el_lengths=spline_lengths_refline,
                           is_closed=True,
                           stepsize_curv_preview=pars["curv_calc_opts"]["d_preview_curv"],
                           stepsize_curv_review=pars["curv_calc_opts"]["d_review_curv"],
                           stepsize_psi_preview=pars["curv_calc_opts"]["d_preview_head"],
                           stepsize_psi_review=pars["curv_calc_opts"]["d_review_head"])[1]

    # close track
    kappa_refline_cl = np.append(kappa_refline, kappa_refline[0])
    discr_points_cl = np.append(
        discr_points, no_points_orig
    )  # add virtual index of last/first point for closed track
    w_tr_left_cl = np.append(reftrack[:, 3], reftrack[0, 3])
    w_tr_right_cl = np.append(reftrack[:, 2], reftrack[0, 2])

    # step size along the reference line
    h = pars["stepsize_opts"]["stepsize_reg"]

    # optimization steps (0, 1, 2 ... end point/start point)
    # steps = [i for i in range(kappa_refline_cl.size)]
    steps = [i for i in range(discr_points_cl.size)]

    # number of control intervals
    N = steps[-1]

    # station along the reference line
    # s_opt = np.linspace(0.0, N * h, N + 1)
    s_opt = np.asarray(discr_points_cl) * h

    # interpolate curvature of reference line in terms of steps
    kappa_interp = ca.interpolant('kappa_interp', 'linear', [steps],
                                  kappa_refline_cl)

    # interpolate track width (left and right to reference line) in terms of steps
    w_tr_left_interp = ca.interpolant('w_tr_left_interp', 'linear', [steps],
                                      w_tr_left_cl)
    w_tr_right_interp = ca.interpolant('w_tr_right_interp', 'linear', [steps],
                                       w_tr_right_cl)

    # describe friction coefficients from friction map with linear equations or gaussian basis functions
    if pars["optim_opts"]["var_friction"] is not None:
        w_mue_fl, w_mue_fr, w_mue_rl, w_mue_rr, center_dist = opt_mintime_traj.src. \
            approx_friction_map.approx_friction_map(reftrack=reftrack,
                                                    normvectors=normvectors,
                                                    tpamap_path=tpamap_path,
                                                    tpadata_path=tpadata_path,
                                                    pars=pars,
                                                    dn=pars["optim_opts"]["dn"],
                                                    n_gauss=pars["optim_opts"]["n_gauss"],
                                                    print_debug=print_debug,
                                                    plot_debug=plot_debug)

    # ------------------------------------------------------------------------------------------------------------------
    # DIRECT GAUSS-LEGENDRE COLLOCATION --------------------------------------------------------------------------------
    # ------------------------------------------------------------------------------------------------------------------

    # degree of interpolating polynomial
    d = 3

    # legendre collocation points
    tau = np.append(0, ca.collocation_points(d, 'legendre'))

    # coefficient matrix for formulating the collocation equation
    C = np.zeros((d + 1, d + 1))

    # coefficient matrix for formulating the collocation equation
    D = np.zeros(d + 1)

    # coefficient matrix for formulating the collocation equation
    B = np.zeros(d + 1)

    # construct polynomial basis
    for j in range(d + 1):
        # construct Lagrange polynomials to get the polynomial basis at the collocation point
        p = np.poly1d([1])
        for r in range(d + 1):
            if r != j:
                p *= np.poly1d([1, -tau[r]]) / (tau[j] - tau[r])

        # evaluate polynomial at the final time to get the coefficients of the continuity equation
        D[j] = p(1.0)

        # evaluate time derivative of polynomial at collocation points to get the coefficients of continuity equation
        p_der = np.polyder(p)
        for r in range(d + 1):
            C[j, r] = p_der(tau[r])

        # evaluate integral of the polynomial to get the coefficients of the quadrature function
        pint = np.polyint(p)
        B[j] = pint(1.0)

    # ------------------------------------------------------------------------------------------------------------------
    # STATE VARIABLES --------------------------------------------------------------------------------------------------
    # ------------------------------------------------------------------------------------------------------------------

    # number of state variables
    if pars["pwr_params_mintime"]["pwr_behavior"]:
        nx = 11
        nx_pwr = 6
    else:
        nx = 5
        nx_pwr = 0

    # velocity [m/s]
    v_n = ca.SX.sym('v_n')
    v_s = 50
    v = v_s * v_n

    # side slip angle [rad]
    beta_n = ca.SX.sym('beta_n')
    beta_s = 0.5
    beta = beta_s * beta_n

    # yaw rate [rad/s]
    omega_z_n = ca.SX.sym('omega_z_n')
    omega_z_s = 1
    omega_z = omega_z_s * omega_z_n

    # lateral distance to reference line (positive = left) [m]
    n_n = ca.SX.sym('n_n')
    n_s = 5.0
    n = n_s * n_n

    # relative angle to tangent on reference line [rad]
    xi_n = ca.SX.sym('xi_n')
    xi_s = 1.0
    xi = xi_s * xi_n

    if pars["pwr_params_mintime"]["pwr_behavior"]:

        # Initialize e-machine object
        machine = opt_mintime_traj.powertrain_src.src.EMachine.EMachineModel(
            pwr_pars=pars["pwr_params_mintime"])

        # Initialize battery object
        batt = opt_mintime_traj.powertrain_src.src.Battery.BattModel(
            pwr_pars=pars["pwr_params_mintime"])

        # Initialize inverter object
        inverter = opt_mintime_traj.powertrain_src.src.Inverter.InverterModel(
            pwr_pars=pars["pwr_params_mintime"])

        # Initialize radiator objects (2 in total)
        radiators = opt_mintime_traj.powertrain_src.src.Radiators.RadiatorModel(
            pwr_pars=pars["pwr_params_mintime"])

        # scaling factors for state variables
        x_s = np.array([
            v_s, beta_s, omega_z_s, n_s, xi_s, machine.temp_mot_s,
            batt.temp_batt_s, inverter.temp_inv_s, radiators.temp_cool_mi_s,
            radiators.temp_cool_b_s, batt.soc_batt_s
        ])

        # put all states together
        x = ca.vertcat(v_n, beta_n, omega_z_n, n_n, xi_n, machine.temp_mot_n,
                       batt.temp_batt_n, inverter.temp_inv_n,
                       radiators.temp_cool_mi_n, radiators.temp_cool_b_n,
                       batt.soc_batt_n)

    else:

        # scaling factors for state variables
        x_s = np.array([v_s, beta_s, omega_z_s, n_s, xi_s])

        # put all states together
        x = ca.vertcat(v_n, beta_n, omega_z_n, n_n, xi_n)

    # ------------------------------------------------------------------------------------------------------------------
    # CONTROL VARIABLES ------------------------------------------------------------------------------------------------
    # ------------------------------------------------------------------------------------------------------------------

    # number of control variables
    nu = 4

    # steer angle [rad]
    delta_n = ca.SX.sym('delta_n')
    delta_s = 0.5
    delta = delta_s * delta_n

    # positive longitudinal force (drive) [N]
    f_drive_n = ca.SX.sym('f_drive_n')
    f_drive_s = 7500.0
    f_drive = f_drive_s * f_drive_n

    # negative longitudinal force (brake) [N]
    f_brake_n = ca.SX.sym('f_brake_n')
    f_brake_s = 20000.0
    f_brake = f_brake_s * f_brake_n

    # lateral wheel load transfer [N]
    gamma_y_n = ca.SX.sym('gamma_y_n')
    gamma_y_s = 5000.0
    gamma_y = gamma_y_s * gamma_y_n

    # scaling factors for control variables
    u_s = np.array([delta_s, f_drive_s, f_brake_s, gamma_y_s])

    # put all controls together
    u = ca.vertcat(delta_n, f_drive_n, f_brake_n, gamma_y_n)

    # ------------------------------------------------------------------------------------------------------------------
    # MODEL EQUATIONS --------------------------------------------------------------------------------------------------
    # ------------------------------------------------------------------------------------------------------------------

    # extract vehicle and tire parameters
    veh = pars["vehicle_params_mintime"]
    tire = pars["tire_params_mintime"]

    # general constants
    g = pars["veh_params"]["g"]
    mass = pars["veh_params"]["mass"]

    # curvature of reference line [rad/m]
    kappa = ca.SX.sym('kappa')

    # drag force [N]
    f_xdrag = pars["veh_params"]["dragcoeff"] * v**2

    # rolling resistance forces [N]
    f_xroll_fl = 0.5 * tire["c_roll"] * mass * g * veh["wheelbase_rear"] / veh[
        "wheelbase"]
    f_xroll_fr = 0.5 * tire["c_roll"] * mass * g * veh["wheelbase_rear"] / veh[
        "wheelbase"]
    f_xroll_rl = 0.5 * tire["c_roll"] * mass * g * veh[
        "wheelbase_front"] / veh["wheelbase"]
    f_xroll_rr = 0.5 * tire["c_roll"] * mass * g * veh[
        "wheelbase_front"] / veh["wheelbase"]
    f_xroll = tire["c_roll"] * mass * g

    # static normal tire forces [N]
    f_zstat_fl = 0.5 * mass * g * veh["wheelbase_rear"] / veh["wheelbase"]
    f_zstat_fr = 0.5 * mass * g * veh["wheelbase_rear"] / veh["wheelbase"]
    f_zstat_rl = 0.5 * mass * g * veh["wheelbase_front"] / veh["wheelbase"]
    f_zstat_rr = 0.5 * mass * g * veh["wheelbase_front"] / veh["wheelbase"]

    # dynamic normal tire forces (aerodynamic downforces) [N]
    f_zlift_fl = 0.5 * veh["liftcoeff_front"] * v**2
    f_zlift_fr = 0.5 * veh["liftcoeff_front"] * v**2
    f_zlift_rl = 0.5 * veh["liftcoeff_rear"] * v**2
    f_zlift_rr = 0.5 * veh["liftcoeff_rear"] * v**2

    # dynamic normal tire forces (load transfers) [N]
    f_zdyn_fl = (-0.5 * veh["cog_z"] / veh["wheelbase"] *
                 (f_drive + f_brake - f_xdrag - f_xroll) -
                 veh["k_roll"] * gamma_y)
    f_zdyn_fr = (-0.5 * veh["cog_z"] / veh["wheelbase"] *
                 (f_drive + f_brake - f_xdrag - f_xroll) +
                 veh["k_roll"] * gamma_y)
    f_zdyn_rl = (0.5 * veh["cog_z"] / veh["wheelbase"] *
                 (f_drive + f_brake - f_xdrag - f_xroll) -
                 (1.0 - veh["k_roll"]) * gamma_y)
    f_zdyn_rr = (0.5 * veh["cog_z"] / veh["wheelbase"] *
                 (f_drive + f_brake - f_xdrag - f_xroll) +
                 (1.0 - veh["k_roll"]) * gamma_y)

    # sum of all normal tire forces [N]
    f_z_fl = f_zstat_fl + f_zlift_fl + f_zdyn_fl
    f_z_fr = f_zstat_fr + f_zlift_fr + f_zdyn_fr
    f_z_rl = f_zstat_rl + f_zlift_rl + f_zdyn_rl
    f_z_rr = f_zstat_rr + f_zlift_rr + f_zdyn_rr

    # slip angles [rad]
    alpha_fl = delta - ca.atan(
        (v * ca.sin(beta) + veh["wheelbase_front"] * omega_z) /
        (v * ca.cos(beta) - 0.5 * veh["track_width_front"] * omega_z))
    alpha_fr = delta - ca.atan(
        (v * ca.sin(beta) + veh["wheelbase_front"] * omega_z) /
        (v * ca.cos(beta) + 0.5 * veh["track_width_front"] * omega_z))
    alpha_rl = ca.atan(
        (-v * ca.sin(beta) + veh["wheelbase_rear"] * omega_z) /
        (v * ca.cos(beta) - 0.5 * veh["track_width_rear"] * omega_z))
    alpha_rr = ca.atan(
        (-v * ca.sin(beta) + veh["wheelbase_rear"] * omega_z) /
        (v * ca.cos(beta) + 0.5 * veh["track_width_rear"] * omega_z))

    # lateral tire forces [N]
    f_y_fl = (pars["optim_opts"]["mue"] * f_z_fl *
              (1 + tire["eps_front"] * f_z_fl / tire["f_z0"]) *
              ca.sin(tire["C_front"] *
                     ca.atan(tire["B_front"] * alpha_fl - tire["E_front"] *
                             (tire["B_front"] * alpha_fl -
                              ca.atan(tire["B_front"] * alpha_fl)))))
    f_y_fr = (pars["optim_opts"]["mue"] * f_z_fr *
              (1 + tire["eps_front"] * f_z_fr / tire["f_z0"]) *
              ca.sin(tire["C_front"] *
                     ca.atan(tire["B_front"] * alpha_fr - tire["E_front"] *
                             (tire["B_front"] * alpha_fr -
                              ca.atan(tire["B_front"] * alpha_fr)))))
    f_y_rl = (pars["optim_opts"]["mue"] * f_z_rl *
              (1 + tire["eps_rear"] * f_z_rl / tire["f_z0"]) *
              ca.sin(tire["C_rear"] *
                     ca.atan(tire["B_rear"] * alpha_rl - tire["E_rear"] *
                             (tire["B_rear"] * alpha_rl -
                              ca.atan(tire["B_rear"] * alpha_rl)))))
    f_y_rr = (pars["optim_opts"]["mue"] * f_z_rr *
              (1 + tire["eps_rear"] * f_z_rr / tire["f_z0"]) *
              ca.sin(tire["C_rear"] *
                     ca.atan(tire["B_rear"] * alpha_rr - tire["E_rear"] *
                             (tire["B_rear"] * alpha_rr -
                              ca.atan(tire["B_rear"] * alpha_rr)))))

    # longitudinal tire forces [N]
    f_x_fl = 0.5 * f_drive * veh["k_drive_front"] + 0.5 * f_brake * veh[
        "k_brake_front"] - f_xroll_fl
    f_x_fr = 0.5 * f_drive * veh["k_drive_front"] + 0.5 * f_brake * veh[
        "k_brake_front"] - f_xroll_fr
    f_x_rl = 0.5 * f_drive * (1 - veh["k_drive_front"]) + 0.5 * f_brake * (
        1 - veh["k_brake_front"]) - f_xroll_rl
    f_x_rr = 0.5 * f_drive * (1 - veh["k_drive_front"]) + 0.5 * f_brake * (
        1 - veh["k_brake_front"]) - f_xroll_rr

    # longitudinal acceleration [m/s²]
    ax = (f_x_rl + f_x_rr + (f_x_fl + f_x_fr) * ca.cos(delta) -
          (f_y_fl + f_y_fr) * ca.sin(delta) -
          pars["veh_params"]["dragcoeff"] * v**2) / mass

    # lateral acceleration [m/s²]
    ay = ((f_x_fl + f_x_fr) * ca.sin(delta) + f_y_rl + f_y_rr +
          (f_y_fl + f_y_fr) * ca.cos(delta)) / mass

    # ------------------------------------------------------------------------------------------------------------------
    # POWERTRAIN BEHAVIOR ----------------------------------------------------------------------------------------------
    # ------------------------------------------------------------------------------------------------------------------

    if pars["pwr_params_mintime"]["pwr_behavior"]:

        pwr_pars = pars["pwr_params_mintime"]

        # --------------------------------------------------------------------------------------------------------------
        # CALCS --------------------------------------------------------------------------------------------------------
        # --------------------------------------------------------------------------------------------------------------

        # On wheels requested power [kW]
        p_des = (f_drive * v * 0.001)

        # E-Machines
        machine.get_states(f_drive=f_drive, v=v)

        # Machine losses [kW]
        machine.get_loss(p_wheel=p_des)

        # Calculate total power loss for all electric machines in vehicle [kW]
        machine.get_machines_cum_losses()

        # Inverter losses
        inverter.get_loss(i_eff=machine.i_eff,
                          v_dc=batt.v_dc,
                          p_out_inv=machine.p_input)

        # Calculate total power loss for all inverters in vehicle [kW]
        inverter.get_inverters_cum_losses()

        # Get internal battery resistance [Ohm]
        batt.internal_resistance()

        # Get battery loss power [kW], output power [kW] and output current [A]
        batt.battery_loss(p_des=p_des,
                          p_loss_mot=machine.p_loss_total_all_machines,
                          p_loss_inv=inverter.p_loss_total_all_inverters,
                          p_in_inv=inverter.p_in_inv)

        # get intermediate temperatures for motor-inverter cooling
        radiators.get_intermediate_temps(temp_inv=inverter.temp_inv,
                                         r_inv=inverter.r_inv)

    # ------------------------------------------------------------------------------------------------------------------
    # DERIVATIVES ------------------------------------------------------------------------------------------------------
    # ------------------------------------------------------------------------------------------------------------------

    # time-distance scaling factor (dt/ds)
    sf = (1.0 - n * kappa) / (v * (ca.cos(xi + beta)))

    # model equations for two track model (ordinary differential equations)
    dv = (sf / mass) * (
        (f_x_rl + f_x_rr) * ca.cos(beta) +
        (f_x_fl + f_x_fr) * ca.cos(delta - beta) +
        (f_y_rl + f_y_rr) * ca.sin(beta) -
        (f_y_fl + f_y_fr) * ca.sin(delta - beta) - f_xdrag * ca.cos(beta))

    dbeta = sf * (
        -omega_z +
        (-(f_x_rl + f_x_rr) * ca.sin(beta) +
         (f_x_fl + f_x_fr) * ca.sin(delta - beta) +
         (f_y_rl + f_y_rr) * ca.cos(beta) +
         (f_y_fl + f_y_fr) * ca.cos(delta - beta) + f_xdrag * ca.sin(beta)) /
        (mass * v))

    domega_z = (sf / veh["I_z"]) * (
        (f_x_rr - f_x_rl) * veh["track_width_rear"] / 2 -
        (f_y_rl + f_y_rr) * veh["wheelbase_rear"] +
        ((f_x_fr - f_x_fl) * ca.cos(delta) +
         (f_y_fl - f_y_fr) * ca.sin(delta)) * veh["track_width_front"] / 2 +
        ((f_y_fl + f_y_fr) * ca.cos(delta) +
         (f_x_fl + f_x_fr) * ca.sin(delta)) * veh["track_width_front"])

    dn = sf * v * ca.sin(xi + beta)

    dxi = sf * omega_z - kappa

    if pars["pwr_params_mintime"]["pwr_behavior"]:

        machine.get_increment(sf=sf,
                              temp_cool_12=radiators.temp_cool_12,
                              temp_cool_13=radiators.temp_cool_13)

        inverter.get_increment(sf=sf,
                               temp_cool_mi=radiators.temp_cool_mi,
                               temp_cool_12=radiators.temp_cool_12)

        batt.get_increment(sf=sf, temp_cool_b=radiators.temp_cool_b)

        radiators.get_increment_mi(sf=sf,
                                   temp_mot=machine.temp_mot,
                                   temp_inv=inverter.temp_inv,
                                   r_inv=inverter.r_inv,
                                   r_machine=machine.r_machine)

        radiators.get_increment_b(sf=sf,
                                  temp_batt=batt.temp_batt,
                                  temp_cool_b=radiators.temp_cool_b,
                                  R_eq_B_inv=batt.r_batt_inverse)

        batt.get_soc(sf=sf)

        # ODEs: driving dynamics and thermodynamics
        dx = ca.vertcat(dv, dbeta, domega_z, dn, dxi, machine.dtemp,
                        batt.dtemp, inverter.dtemp, radiators.dtemp_cool_mi,
                        radiators.dtemp_cool_b, batt.dsoc) / x_s
    else:

        # ODEs: driving dynamics only
        dx = ca.vertcat(dv, dbeta, domega_z, dn, dxi) / x_s

    # ------------------------------------------------------------------------------------------------------------------
    # CONTROL BOUNDARIES -----------------------------------------------------------------------------------------------
    # ------------------------------------------------------------------------------------------------------------------

    delta_min = -veh["delta_max"] / delta_s  # min. steer angle [rad]
    delta_max = veh["delta_max"] / delta_s  # max. steer angle [rad]
    f_drive_min = 0.0  # min. longitudinal drive force [N]
    f_drive_max = veh[
        "f_drive_max"] / f_drive_s  # max. longitudinal drive force [N]
    f_brake_min = -veh[
        "f_brake_max"] / f_brake_s  # min. longitudinal brake force [N]
    f_brake_max = 0.0  # max. longitudinal brake force [N]
    gamma_y_min = -np.inf  # min. lateral wheel load transfer [N]
    gamma_y_max = np.inf  # max. lateral wheel load transfer [N]

    # ------------------------------------------------------------------------------------------------------------------
    # STATE BOUNDARIES -------------------------------------------------------------------------------------------------
    # ------------------------------------------------------------------------------------------------------------------

    v_min = 1.0 / v_s  # min. velocity [m/s]
    v_max = pars["veh_params"]["v_max"] / v_s  # max. velocity [m/s]
    beta_min = -0.5 * np.pi / beta_s  # min. side slip angle [rad]
    beta_max = 0.5 * np.pi / beta_s  # max. side slip angle [rad]
    omega_z_min = -0.5 * np.pi / omega_z_s  # min. yaw rate [rad/s]
    omega_z_max = 0.5 * np.pi / omega_z_s  # max. yaw rate [rad/s]
    xi_min = -0.5 * np.pi / xi_s  # min. relative angle to tangent on reference line [rad]
    xi_max = 0.5 * np.pi / xi_s  # max. relative angle to tangent on reference line [rad]

    # ------------------------------------------------------------------------------------------------------------------
    # INITIAL GUESS FOR DECISION VARIABLES -----------------------------------------------------------------------------
    # ------------------------------------------------------------------------------------------------------------------
    v_guess = 20.0 / v_s

    # ------------------------------------------------------------------------------------------------------------------
    # HELPER FUNCTIONS -------------------------------------------------------------------------------------------------
    # ------------------------------------------------------------------------------------------------------------------

    # continuous time dynamics
    f_dyn = ca.Function('f_dyn', [x, u, kappa], [dx, sf], ['x', 'u', 'kappa'],
                        ['dx', 'sf'])

    # longitudinal tire forces [N]
    f_fx = ca.Function('f_fx', [x, u], [f_x_fl, f_x_fr, f_x_rl, f_x_rr],
                       ['x', 'u'], ['f_x_fl', 'f_x_fr', 'f_x_rl', 'f_x_rr'])
    # lateral tire forces [N]
    f_fy = ca.Function('f_fy', [x, u], [f_y_fl, f_y_fr, f_y_rl, f_y_rr],
                       ['x', 'u'], ['f_y_fl', 'f_y_fr', 'f_y_rl', 'f_y_rr'])
    # vertical tire forces [N]
    f_fz = ca.Function('f_fz', [x, u], [f_z_fl, f_z_fr, f_z_rl, f_z_rr],
                       ['x', 'u'], ['f_z_fl', 'f_z_fr', 'f_z_rl', 'f_z_rr'])

    # longitudinal and lateral acceleration [m/s²]
    f_a = ca.Function('f_a', [x, u], [ax, ay], ['x', 'u'], ['ax', 'ay'])

    if pars["pwr_params_mintime"]["pwr_behavior"]:

        machine.ini_nlp_state(x=x, u=u)
        inverter.ini_nlp_state(x=x, u=u)
        batt.ini_nlp_state(x=x, u=u)
        radiators.ini_nlp_state(x=x, u=u)

    # ------------------------------------------------------------------------------------------------------------------
    # FORMULATE NONLINEAR PROGRAM --------------------------------------------------------------------------------------
    # ------------------------------------------------------------------------------------------------------------------

    # initialize NLP vectors
    w = []
    w0 = []
    lbw = []
    ubw = []
    J = 0
    g = []
    lbg = []
    ubg = []

    # initialize ouput vectors
    x_opt = []
    u_opt = []
    dt_opt = []
    tf_opt = []
    ax_opt = []
    ay_opt = []
    ec_opt = []

    # initialize control vectors (for regularization)
    delta_p = []
    F_p = []

    # boundary constraint: lift initial conditions
    Xk = ca.MX.sym('X0', nx)
    w.append(Xk)
    n_min = (-w_tr_right_interp(0) + pars["optim_opts"]["width_opt"] / 2) / n_s
    n_max = (w_tr_left_interp(0) - pars["optim_opts"]["width_opt"] / 2) / n_s
    if pars["pwr_params_mintime"]["pwr_behavior"]:
        lbw.append([
            v_min, beta_min, omega_z_min, n_min, xi_min, machine.temp_min,
            batt.temp_min, inverter.temp_min, radiators.temp_cool_mi_min,
            radiators.temp_cool_b_min, batt.soc_min
        ])
        ubw.append([
            v_max, beta_max, omega_z_max, n_max, xi_max, machine.temp_max,
            batt.temp_max, inverter.temp_max, radiators.temp_cool_mi_max,
            radiators.temp_cool_b_max, batt.soc_max
        ])
        w0.append([
            v_guess, 0.0, 0.0, 0.0, 0.0, machine.temp_guess, batt.temp_guess,
            inverter.temp_guess, radiators.temp_cool_mi_guess,
            radiators.temp_cool_b_guess, batt.soc_guess
        ])

        # Initial powertrain conditions
        g.append(Xk[5] - pwr_pars["T_mot_ini"] / machine.temp_mot_s)
        lbg.append([0])
        ubg.append([0])

        g.append(Xk[6] - pwr_pars["T_batt_ini"] / batt.temp_batt_s)
        lbg.append([0])
        ubg.append([0])

        g.append(Xk[7] - pwr_pars["T_inv_ini"] / inverter.temp_inv_s)
        lbg.append([0])
        ubg.append([0])

        g.append(Xk[8] - pwr_pars["T_cool_mi_ini"] / radiators.temp_cool_mi_s)
        lbg.append([0])
        ubg.append([0])

        g.append(Xk[9] - pwr_pars["T_cool_b_ini"] / radiators.temp_cool_b_s)
        lbg.append([0])
        ubg.append([0])

        g.append(Xk[10] - pwr_pars["SOC_ini"] / batt.soc_batt_s)
        lbg.append([0])
        ubg.append([0])

    else:
        lbw.append([v_min, beta_min, omega_z_min, n_min, xi_min])
        ubw.append([v_max, beta_max, omega_z_max, n_max, xi_max])
        w0.append([v_guess, 0.0, 0.0, 0.0, 0.0])
    x_opt.append(Xk * x_s)

    # loop along the racetrack and formulate path constraints & system dynamic
    # retrieve step-sizes of optimization along reference line
    h = np.diff(s_opt)
    for k in range(N):
        # add decision variables for the control
        Uk = ca.MX.sym('U_' + str(k), nu)
        w.append(Uk)
        lbw.append([delta_min, f_drive_min, f_brake_min, gamma_y_min])
        ubw.append([delta_max, f_drive_max, f_brake_max, gamma_y_max])
        w0.append([0.0] * nu)

        # add decision variables for the state at collocation points
        Xc = []
        for j in range(d):
            Xkj = ca.MX.sym('X_' + str(k) + '_' + str(j), nx)
            Xc.append(Xkj)
            w.append(Xkj)
            lbw.append([-np.inf] * nx)
            ubw.append([np.inf] * nx)
            if pars["pwr_params_mintime"]["pwr_behavior"]:
                w0.append([
                    v_guess, 0.0, 0.0, 0.0, 0.0, machine.temp_guess,
                    batt.temp_guess, inverter.temp_guess,
                    radiators.temp_cool_mi_guess, radiators.temp_cool_b_guess,
                    batt.soc_guess
                ])
            else:
                w0.append([v_guess, 0.0, 0.0, 0.0, 0.0])

        # loop over all collocation points
        Xk_end = D[0] * Xk
        sf_opt = []
        for j in range(1, d + 1):
            # calculate the state derivative at the collocation point
            xp = C[0, j] * Xk
            for r in range(d):
                xp = xp + C[r + 1, j] * Xc[r]

            # interpolate kappa at the collocation point
            kappa_col = kappa_interp(k + tau[j])

            # append collocation equations (system dynamic)
            fj, qj = f_dyn(Xc[j - 1], Uk, kappa_col)
            g.append(h[k] * fj - xp)
            lbg.append([0.0] * nx)
            ubg.append([0.0] * nx)

            # add contribution to the end state
            Xk_end = Xk_end + D[j] * Xc[j - 1]

            # add contribution to quadrature function
            J = J + B[j] * qj * h[k]

            # add contribution to scaling factor (for calculating lap time)
            sf_opt.append(B[j] * qj * h[k])

        # calculate used energy
        dt_opt.append(sf_opt[0] + sf_opt[1] + sf_opt[2])
        if pars["pwr_params_mintime"]["pwr_behavior"]:
            # Add battery output power [kW] and battery loss power [kW] to retireve entire system power [W] and
            # multiply by dt for energy consumption [Ws]
            ec_opt.append((batt.f_nlp(Xk, Uk)[0] + batt.f_nlp(Xk, Uk)[1]) *
                          1000 * dt_opt[-1])
        else:
            ec_opt.append(Xk[0] * v_s * Uk[1] * f_drive_s * dt_opt[-1])

        # add new decision variables for state at end of the collocation interval
        Xk = ca.MX.sym('X_' + str(k + 1), nx)
        w.append(Xk)
        n_min = (-w_tr_right_interp(k + 1) +
                 pars["optim_opts"]["width_opt"] / 2.0) / n_s
        n_max = (w_tr_left_interp(k + 1) -
                 pars["optim_opts"]["width_opt"] / 2.0) / n_s
        if pars["pwr_params_mintime"]["pwr_behavior"]:
            lbw.append([
                v_min, beta_min, omega_z_min, n_min, xi_min, machine.temp_min,
                batt.temp_min, inverter.temp_min, radiators.temp_cool_mi_min,
                radiators.temp_cool_b_min, batt.soc_min
            ])
            ubw.append([
                v_max, beta_max, omega_z_max, n_max, xi_max, machine.temp_max,
                batt.temp_max, inverter.temp_max, radiators.temp_cool_mi_max,
                radiators.temp_cool_b_max, batt.soc_max
            ])
            w0.append([
                v_guess, 0.0, 0.0, 0.0, 0.0, machine.temp_guess,
                batt.temp_guess, inverter.temp_guess,
                radiators.temp_cool_mi_guess, radiators.temp_cool_mi_guess,
                batt.soc_guess
            ])
        else:
            lbw.append([v_min, beta_min, omega_z_min, n_min, xi_min])
            ubw.append([v_max, beta_max, omega_z_max, n_max, xi_max])
            w0.append([v_guess, 0.0, 0.0, 0.0, 0.0])

        # add equality constraint
        g.append(Xk_end - Xk)
        lbg.append([0.0] * nx)
        ubg.append([0.0] * nx)

        # get tire forces
        f_x_flk, f_x_frk, f_x_rlk, f_x_rrk = f_fx(Xk, Uk)
        f_y_flk, f_y_frk, f_y_rlk, f_y_rrk = f_fy(Xk, Uk)
        f_z_flk, f_z_frk, f_z_rlk, f_z_rrk = f_fz(Xk, Uk)

        # get accelerations (longitudinal + lateral)
        axk, ayk = f_a(Xk, Uk)

        # path constraint: limitied engine power
        g.append(Xk[0] * Uk[1])
        lbg.append([-np.inf])
        ubg.append([veh["power_max"] / (f_drive_s * v_s)])

        # get constant friction coefficient
        if pars["optim_opts"]["var_friction"] is None:
            mue_fl = pars["optim_opts"]["mue"]
            mue_fr = pars["optim_opts"]["mue"]
            mue_rl = pars["optim_opts"]["mue"]
            mue_rr = pars["optim_opts"]["mue"]

        # calculate variable friction coefficients along the reference line (regression with linear equations)
        elif pars["optim_opts"]["var_friction"] == "linear":
            # friction coefficient for each tire
            mue_fl = w_mue_fl[k + 1, 0] * Xk[3] * n_s + w_mue_fl[k + 1, 1]
            mue_fr = w_mue_fr[k + 1, 0] * Xk[3] * n_s + w_mue_fr[k + 1, 1]
            mue_rl = w_mue_rl[k + 1, 0] * Xk[3] * n_s + w_mue_rl[k + 1, 1]
            mue_rr = w_mue_rr[k + 1, 0] * Xk[3] * n_s + w_mue_rr[k + 1, 1]

        # calculate variable friction coefficients along the reference line (regression with gaussian basis functions)
        elif pars["optim_opts"]["var_friction"] == "gauss":
            # gaussian basis functions
            sigma = 2.0 * center_dist[k + 1, 0]
            n_gauss = pars["optim_opts"]["n_gauss"]
            n_q = np.linspace(-n_gauss, n_gauss,
                              2 * n_gauss + 1) * center_dist[k + 1, 0]

            gauss_basis = []
            for i in range(2 * n_gauss + 1):
                gauss_basis.append(
                    ca.exp(-(Xk[3] * n_s - n_q[i])**2 / (2 * (sigma**2))))
            gauss_basis = ca.vertcat(*gauss_basis)

            mue_fl = ca.dot(w_mue_fl[k + 1, :-1],
                            gauss_basis) + w_mue_fl[k + 1, -1]
            mue_fr = ca.dot(w_mue_fr[k + 1, :-1],
                            gauss_basis) + w_mue_fr[k + 1, -1]
            mue_rl = ca.dot(w_mue_rl[k + 1, :-1],
                            gauss_basis) + w_mue_rl[k + 1, -1]
            mue_rr = ca.dot(w_mue_rr[k + 1, :-1],
                            gauss_basis) + w_mue_rr[k + 1, -1]

        else:
            raise ValueError("No friction coefficients are available!")

        # path constraint: Kamm's Circle for each wheel
        g.append(((f_x_flk / (mue_fl * f_z_flk))**2 + (f_y_flk /
                                                       (mue_fl * f_z_flk))**2))
        g.append(((f_x_frk / (mue_fr * f_z_frk))**2 + (f_y_frk /
                                                       (mue_fr * f_z_frk))**2))
        g.append(((f_x_rlk / (mue_rl * f_z_rlk))**2 + (f_y_rlk /
                                                       (mue_rl * f_z_rlk))**2))
        g.append(((f_x_rrk / (mue_rr * f_z_rrk))**2 + (f_y_rrk /
                                                       (mue_rr * f_z_rrk))**2))
        lbg.append([0.0] * 4)
        ubg.append([1.0] * 4)

        # path constraint: lateral wheel load transfer
        g.append((
            (f_y_flk + f_y_frk) * ca.cos(Uk[0] * delta_s) + f_y_rlk + f_y_rrk +
            (f_x_flk + f_x_frk) * ca.sin(Uk[0] * delta_s)) * veh["cog_z"] /
                 ((veh["track_width_front"] + veh["track_width_rear"]) / 2) -
                 Uk[3] * gamma_y_s)
        lbg.append([0.0])
        ubg.append([0.0])

        # path constraint: f_drive * f_brake == 0 (no simultaneous operation of brake and accelerator pedal)
        g.append(Uk[1] * Uk[2])
        lbg.append([-20000.0 / (f_drive_s * f_brake_s)])
        ubg.append([0.0])

        # path constraint: actor dynamic
        if k > 0:
            sigma = (1 - kappa_interp(k) * Xk[3] * n_s) / (Xk[0] * v_s)
            g.append(
                (Uk - w[1 + (k - 1) * (nx - nx_pwr)]) / (h[k - 1] * sigma))
            lbg.append([
                delta_min / (veh["t_delta"]), -np.inf,
                f_brake_min / (veh["t_brake"]), -np.inf
            ])
            ubg.append([
                delta_max / (veh["t_delta"]), f_drive_max / (veh["t_drive"]),
                np.inf, np.inf
            ])

        # path constraint: safe trajectories with acceleration ellipse
        if pars["optim_opts"]["safe_traj"]:
            g.append((ca.fmax(axk, 0) / pars["optim_opts"]["ax_pos_safe"])**2 +
                     (ayk / pars["optim_opts"]["ay_safe"])**2)
            g.append((ca.fmin(axk, 0) / pars["optim_opts"]["ax_neg_safe"])**2 +
                     (ayk / pars["optim_opts"]["ay_safe"])**2)
            lbg.append([0.0] * 2)
            ubg.append([1.0] * 2)

        # append controls (for regularization)
        delta_p.append(Uk[0] * delta_s)
        F_p.append(Uk[1] * f_drive_s / 10000.0 + Uk[2] * f_brake_s / 10000.0)

        # append outputs
        x_opt.append(Xk * x_s)
        u_opt.append(Uk * u_s)
        tf_opt.extend([f_x_flk, f_y_flk, f_z_flk, f_x_frk, f_y_frk, f_z_frk])
        tf_opt.extend([f_x_rlk, f_y_rlk, f_z_rlk, f_x_rrk, f_y_rrk, f_z_rrk])
        ax_opt.append(axk)
        ay_opt.append(ayk)

        if pars["pwr_params_mintime"]["pwr_behavior"]:
            machine.p_losses_opt.extend(machine.f_nlp(Xk, Uk))
            inverter.p_losses_opt.extend(inverter.f_nlp(Xk, Uk))
            batt.p_losses_opt.extend(batt.f_nlp(Xk, Uk))
            radiators.temps_opt.extend(radiators.f_nlp(Xk, Uk))

    # boundary constraint: start states = final states
    g.append(w[0] - Xk)
    if pars["pwr_params_mintime"]["pwr_behavior"]:
        lbg.append([
            0.0, 0.0, 0.0, 0.0, 0.0, -np.inf, -np.inf, -np.inf, -np.inf,
            -np.inf, -np.inf
        ])
        ubg.append([
            0.0, 0.0, 0.0, 0.0, 0.0, np.inf, np.inf, np.inf, np.inf, np.inf,
            np.inf
        ])
    else:
        lbg.append([0.0, 0.0, 0.0, 0.0, 0.0])
        ubg.append([0.0, 0.0, 0.0, 0.0, 0.0])

    # path constraint: limited energy consumption
    if pars["optim_opts"]["limit_energy"]:
        g.append(ca.sum1(ca.vertcat(*ec_opt)) / 3600000.0)
        lbg.append([0])
        ubg.append([pars["optim_opts"]["energy_limit"]])

    # formulate differentiation matrix (for regularization)
    diff_matrix = np.eye(N)
    for i in range(N - 1):
        diff_matrix[i, i + 1] = -1.0
    diff_matrix[N - 1, 0] = -1.0

    # regularization (delta)
    delta_p = ca.vertcat(*delta_p)
    Jp_delta = ca.mtimes(ca.MX(diff_matrix), delta_p)
    Jp_delta = ca.dot(Jp_delta, Jp_delta)

    # regularization (f_drive + f_brake)
    F_p = ca.vertcat(*F_p)
    Jp_f = ca.mtimes(ca.MX(diff_matrix), F_p)
    Jp_f = ca.dot(Jp_f, Jp_f)

    # formulate objective
    J = J + pars["optim_opts"]["penalty_F"] * Jp_f + pars["optim_opts"][
        "penalty_delta"] * Jp_delta

    # concatenate NLP vectors
    w = ca.vertcat(*w)
    g = ca.vertcat(*g)
    w0 = np.concatenate(w0)
    lbw = np.concatenate(lbw)
    ubw = np.concatenate(ubw)
    lbg = np.concatenate(lbg)
    ubg = np.concatenate(ubg)

    # concatenate output vectors
    x_opt = ca.vertcat(*x_opt)
    u_opt = ca.vertcat(*u_opt)
    tf_opt = ca.vertcat(*tf_opt)
    dt_opt = ca.vertcat(*dt_opt)
    ax_opt = ca.vertcat(*ax_opt)
    ay_opt = ca.vertcat(*ay_opt)
    ec_opt = ca.vertcat(*ec_opt)
    if pars["pwr_params_mintime"]["pwr_behavior"]:
        machine.p_losses_opt = ca.vertcat(*machine.p_losses_opt)
        inverter.p_losses_opt = ca.vertcat(*inverter.p_losses_opt)
        batt.p_losses_opt = ca.vertcat(*batt.p_losses_opt)
        radiators.temps_opt = ca.vertcat(*radiators.temps_opt)

    # ------------------------------------------------------------------------------------------------------------------
    # CREATE NLP SOLVER ------------------------------------------------------------------------------------------------
    # ------------------------------------------------------------------------------------------------------------------

    # fill nlp structure
    nlp = {'f': J, 'x': w, 'g': g}

    # solver options
    opts = {
        "expand": True,
        "verbose": print_debug,
        "ipopt.max_iter": 2000,
        "ipopt.tol": 1e-7
    }

    # solver options for warm start
    if pars["optim_opts"]["warm_start"]:
        opts_warm_start = {
            "ipopt.warm_start_init_point": "yes",
            "ipopt.warm_start_bound_push": 1e-3,
            "ipopt.warm_start_mult_bound_push": 1e-3,
            "ipopt.warm_start_slack_bound_push": 1e-3,
            "ipopt.mu_init": 1e-3
        }
        opts.update(opts_warm_start)

    # load warm start files
    if pars["optim_opts"]["warm_start"]:
        try:
            w0 = np.loadtxt(os.path.join(export_path, 'w0.csv'))
            lam_x0 = np.loadtxt(os.path.join(export_path, 'lam_x0.csv'))
            lam_g0 = np.loadtxt(os.path.join(export_path, 'lam_g0.csv'))
        except IOError:
            print('\033[91m' + 'WARNING: Failed to load warm start files!' +
                  '\033[0m')
            sys.exit(1)

    # check warm start files
    if pars["optim_opts"]["warm_start"] and not len(w0) == len(lbw):
        print(
            '\033[91m' +
            'WARNING: Warm start files do not fit to the dimension of the NLP!'
            + '\033[0m')
        sys.exit(1)

    # create solver instance
    solver = ca.nlpsol("solver", "ipopt", nlp, opts)

    # ------------------------------------------------------------------------------------------------------------------
    # SOLVE NLP --------------------------------------------------------------------------------------------------------
    # ------------------------------------------------------------------------------------------------------------------

    # start time measure
    t0 = time.perf_counter()

    # solve NLP
    if pars["optim_opts"]["warm_start"]:
        sol = solver(x0=w0,
                     lbx=lbw,
                     ubx=ubw,
                     lbg=lbg,
                     ubg=ubg,
                     lam_x0=lam_x0,
                     lam_g0=lam_g0)
    else:
        sol = solver(x0=w0, lbx=lbw, ubx=ubw, lbg=lbg, ubg=ubg)

    # end time measure
    tend = time.perf_counter()

    if solver.stats()['return_status'] != 'Solve_Succeeded':
        print('\033[91m' + 'ERROR: Optimization did not succeed!' + '\033[0m')
        sys.exit(1)

    # ------------------------------------------------------------------------------------------------------------------
    # EXTRACT SOLUTION -------------------------------------------------------------------------------------------------
    # ------------------------------------------------------------------------------------------------------------------

    # helper function to extract solution for state variables, control variables, tire forces, time
    f_sol = ca.Function(
        'f_sol', [w], [x_opt, u_opt, tf_opt, dt_opt, ax_opt, ay_opt, ec_opt],
        ['w'],
        ['x_opt', 'u_opt', 'tf_opt', 'dt_opt', 'ax_opt', 'ay_opt', 'ec_opt'])

    if pars["pwr_params_mintime"]["pwr_behavior"]:

        machine.extract_sol(w=w, sol_states=sol['x'])
        inverter.extract_sol(w=w, sol_states=sol['x'])
        batt.extract_sol(w=w, sol_states=sol['x'])
        radiators.extract_sol(w=w, sol_states=sol['x'])

        # Store for convenient export
        pwr_comps = {
            "machine": machine,
            "inverter": inverter,
            "batt": batt,
            "radiators": radiators
        }
    else:

        pwr_comps = None

    # extract solution
    x_opt, u_opt, tf_opt, dt_opt, ax_opt, ay_opt, ec_opt = f_sol(sol['x'])

    # solution for state variables
    x_opt = np.reshape(x_opt, (N + 1, nx))

    # solution for control variables
    u_opt = np.reshape(u_opt, (N, nu))

    # solution for tire forces
    tf_opt = np.append(tf_opt[-12:], tf_opt[:])
    tf_opt = np.reshape(tf_opt, (N + 1, 12))

    # solution for time
    t_opt = np.hstack((0.0, np.cumsum(dt_opt)))

    # solution for acceleration
    ax_opt = np.append(ax_opt[-1], ax_opt)
    ay_opt = np.append(ay_opt[-1], ay_opt)
    atot_opt = np.sqrt(np.power(ax_opt, 2) + np.power(ay_opt, 2))

    # solution for energy consumption
    ec_opt_cum = np.hstack((0.0, np.cumsum(ec_opt))) / 3600.0

    # ------------------------------------------------------------------------------------------------------------------
    # EXPORT SOLUTION --------------------------------------------------------------------------------------------------
    # ------------------------------------------------------------------------------------------------------------------

    # export data to CSVs
    opt_mintime_traj.src.export_mintime_solution.export_mintime_solution(
        file_path=export_path,
        pars=pars,
        s=s_opt,
        t=t_opt,
        x=x_opt,
        u=u_opt,
        tf=tf_opt,
        ax=ax_opt,
        ay=ay_opt,
        atot=atot_opt,
        w0=sol["x"],
        lam_x0=sol["lam_x"],
        lam_g0=sol["lam_g"],
        pwr=pwr_comps)

    # ------------------------------------------------------------------------------------------------------------------
    # PLOT & PRINT RESULTS ---------------------------------------------------------------------------------------------
    # ------------------------------------------------------------------------------------------------------------------

    if plot_debug:
        opt_mintime_traj.src.result_plots_mintime.result_plots_mintime(
            pars=pars,
            reftrack=reftrack,
            s=s_opt,
            t=t_opt,
            x=x_opt,
            u=u_opt,
            ax=ax_opt,
            ay=ay_opt,
            atot=atot_opt,
            tf=tf_opt,
            ec=ec_opt_cum,
            pwr=pwr_comps)

    if print_debug:
        print("INFO: Laptime: %.3fs" % t_opt[-1])
        print("INFO: NLP solving time: %.3fs" % (tend - t0))
        print("INFO: Maximum abs(ay): %.2fm/s2" % np.amax(ay_opt))
        print("INFO: Maximum ax: %.2fm/s2" % np.amax(ax_opt))
        print("INFO: Minimum ax: %.2fm/s2" % np.amin(ax_opt))
        print("INFO: Maximum total acc: %.2fm/s2" % np.amax(atot_opt))
        print('INFO: Energy consumption: %.3fWh' % ec_opt_cum[-1])

    return -x_opt[:-1, 3], x_opt[:-1, 0], reftrack, a_interp, normvectors
### Calculate P[n+1] with P[n] and P[n-1]
for n in range(1, N - 1):
    Legen_poly[n+1] = ((2*n+1) * np.poly1d([1, 0]) * Legen_poly[n] - \
                        n * Legen_poly[n-1])/(n+1)

### Print the Legendre polynomial
for n in range(0, N):
    print 'P[' + str(n) + ' ] is '
    print Legen_poly[n]
    print('\n')

### Indefinite integration of  P_n * P_m
for i in range(0, N):
    for j in range(0, N):
        Legen_poly_multp[i * N + j] = np.polyint(Legen_poly[i] * Legen_poly[j])

### Calculate the definite circulus from -1 to +1
for n in range(0, N**2):
    Legen_poly_inter[n] = Legen_poly_multp[n](1) - Legen_poly_multp[n](-1)

Legen_poly_inter = Legen_poly_inter.reshape((N, N))

### 1.ii
for n in range(0, N):
    #    Legen_poly_deri[n] = np.polyder(Legen_poly[n])
    #    print Legen_poly_deri[n]
    Legen_poly_ii1_left[n] = -np.polyder(
        np.poly1d([-1, 0, 1]) * np.polyder(Legen_poly[n]))
    Legen_poly_ii1_right[n] = n * (n + 1) * Legen_poly[n]
    Legen_poly_ii1[n] = Legen_poly_ii1_left[n] - Legen_poly_ii1_right[n]
for x in x59:
    y = next(x62)
    for z in y:
        print(z)

x63 = np.mean(x59)
print("mean = ", x63)
print('------------------------------')
x64 = np.var(x59)
print("var = ", x64)
print('------------------------------')
x65 = np.std(x59)
print("std = ", x65)
print('------------------------------')
x66 = np.median(x59)
print("median = ", x66)
print('------------------------------')

#numpy.polyval(p, x) => Evaluate a polynomial at specific values
# ax^2+bx+c = 0
# 2x^2+x+3 = 0
x67 = np.array([2, 1, 3])
print('x=2 => 2x^2+x+3 = 0, ', np.polyval(x67, 2))
print('------------------------------')
# numpy.polyder => Return the derivative of the specified order of a polynomial
print('np.polyder(x67) : ', np.polyder(x67))
print('------------------------------')
#numpy.polyint => Return an antiderivative (indefinite integral) of a polynomial
print('np.polyint(x67) : ', np.polyint(x67))
print('------------------------------')
Example #33
0
def getIntegral(p,interval):
    pint=np.polyint(p)
    return pint(interval[1])-pint(interval[0])
Example #34
0
    # Construct Lagrange polynomials to get the polynomial basis at the collocation point
    p = NP.poly1d([1])
    for r in range(d + 1):
        if r != j:
            p *= NP.poly1d([1, -tau_root[r]]) / (tau_root[j] - tau_root[r])

    # Evaluate the polynomial at the final time to get the coefficients of the continuity equation
    D[j] = p(1.0)

    # Evaluate the time derivative of the polynomial at all collocation points to get the coefficients of the continuity equation
    pder = NP.polyder(p)
    for r in range(d + 1):
        C[j, r] = pder(tau_root[r])

    # Evaluate the integral of the polynomial to get the coefficients of the quadrature function
    pint = NP.polyint(p)
    F[j] = pint(1.0)

# Control discretization
nk = 20

# End time
tf = 10.0

# Size of the finite elements
h = tf / nk

# All collocation time points
T = NP.zeros((nk, d + 1))
for k in range(nk):
    for j in range(d + 1):
Example #35
0
  for piece in ppoly:
    cur_xs = np.linspace(piece.left, piece.right, num_points * (piece.right - piece.left) / (b - a))
    cur_ys = np.polyval(piece.hypothesis, cur_xs)
    xs.extend(cur_xs)
    ys.extend(cur_ys)
    knots.append(piece.right)
  knots = knots[:-1]
  spline = scipy.interpolate.LSQUnivariateSpline(xs, ys, knots, bbox=(a,b), k=degree)
  return spline, xs, ys


def make_distribution(c, (a, b)):
  nonneg, _ = poly_nonnegative(c, (a, b))
  if not nonneg:
    raise ValueError('Polynomial is negative on the given interval.')
  C = np.polyint(c)
  vals = np.polyval(C, [a, b])
  mass = vals[1] - vals[0]
  return c / mass


def get_cdf(c, (a, b)):
  C = np.polyint(np.squeeze(c))
  d = c.size
  C[d] = C[d] - np.polyval(C, [a])[0]
  return C


def sample_from(c, (a, b)):
  C = get_cdf(c, (a, b))
  d = c.size
Example #36
0
    def mark(self, start: float, target_value: float):
        """
        Answer a Mark query: return "end" such that the value of the interval [start,end] is target_value.

        :param start: Location on cake where the calculation starts.
        :param target_value: required value for the piece [start,end]
        :return: the end of an interval with a value of target_value.
        If the value is too high - returns None.

        >>> a = PiecewiseLinearAgent([11,22,33,44],[1,2,0,-4])
        >>> a.mark(1, 55)
        3
        >>> a.mark(1.5, 44)
        2.992
        >>> a.mark(1, 66)
        3.242
        >>> a.mark(1.5, 55)
        3.236
        >>> a.mark(1, 99)
        4
        >>> a.mark(1, 100)
        >>> a.mark(1, 0)
        1.0
        >>> a = PiecewiseLinearAgent([2,2],[1,0],name="alice")
        >>> a.mark(0,1)
        0.562
        >>> a.mark(1,1)
        1.5
        >>> a.mark(1,2)
        2
        >>> a.mark(0,3)
        1.5
        >>> a.mark(0,6) # returns none since no such value exists
        >>> a.mark(0,0.2)
        0.128
        """
        # the cake to the left of 0 and to the right of length is considered worthless.
        start = max(0, start)
        if start >= self.length:
            return None  # value is too high

        if target_value < 0:
            raise ValueError(
                "sum out of range (should be positive): {}".format(sum))

        start_floor = int(np.floor(start))
        if start_floor >= len(self.values):
            raise ValueError(
                "mark({},{}): start_floor ({}) is >= length of values ({})".
                format(start, target_value, start_floor, self.values))

        start_fraction = (start_floor + 1 -
                          start) if start > start_floor else 0.0
        current_value = self.values_integral[start_floor](start_fraction, 1)
        if current_value == target_value:
            return start_floor + 1
        elif current_value < target_value:
            return self.mark(start_floor + 1, target_value - current_value)
        else:
            inte_poly = np.polyint(self.piece_poly[start_floor])
            if inte_poly.order > 1:
                temp_poly = np.poly1d([target_value])
                target_poly = inte_poly - temp_poly
                for root in target_poly.r:
                    if 0 <= root <= 1:
                        return round(start_floor + root, 3)
            else:
                return round((target_value / current_value) + start_floor, 3)
        # Value is too high: return None
        return None
Example #37
0
  # Construct Lagrange polynomials to get the polynomial basis at the collocation point
  p = NP.poly1d([1])
  for r in range(d+1):
    if r != j:
      p *= NP.poly1d([1, -tau_root[r]]) / (tau_root[j]-tau_root[r])
  
  # Evaluate the polynomial at the final time to get the coefficients of the continuity equation
  D[j] = p(1.0)

  # Evaluate the time derivative of the polynomial at all collocation points to get the coefficients of the continuity equation
  pder = NP.polyder(p)
  for r in range(d+1):
    C[j,r] = pder(tau_root[r])

  # Evaluate the integral of the polynomial to get the coefficients of the quadrature function
  pint = NP.polyint(p)
  F[j] = pint(1.0)

# Control discretization
nk = 20

# End time
tf = 10.0  

# Size of the finite elements
h = tf/nk

# All collocation time points
T = NP.zeros((nk,d+1))
for k in range(nk):
  for j in range(d+1):
Example #38
0
 def integ(self, m=1, k=0):
     return LocalPoly(np.polyint(self.coeffs, m=m, k=k)*self.step**m,\
             self.middle,self.step)
Example #39
0
if user_args.e:
    # Get experimental data
    directory = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
    filename = directory + "/experimental_results.txt"
    with open(filename) as input:
        data = zip(*(line.strip().split('\t') for line in input))
        data_name = data[0][0] + data[1][0] + data[2][0]
        exp_x = np.asfarray(data[0][1:])
        exp_y = np.asfarray(data[1][1:])
        # Error is given in % Rel Error
        exp_error = np.asfarray(data[2][1:])*exp_y/100.0

    print '\nLockwood Experimental'
    exp_fit = np.poly1d(np.polyfit(FMR, exp_y, poly))

    P = np.polyint(exp_fit)
    exp_int = P(FMR[-1])*CSDA_R
    print 'Integral using polyint      = ', exp_int

    x = np.linspace(0, FMR[-1], 1000)
    y = np.zeros(1000)
    for i in range(1000):
      y[i] = exp_fit(x[i])
    # plt.plot(x,y, color='b' )
    print 'Integral using simpson rule = ', integrate.simps(y, x)*CSDA_R

    # Plot the data
    line0,err0,arg3, = ax0.errorbar(FMR, exp_y, yerr=exp_error, label="Lockwood (Exp.)", fmt="-s", markersize=5 )


markers = ["--v","-.o",":^","--<","-.>",":+","--x","-.1",":2","--3","-.4",":8","--p","-.P",":*","--h","-.H",":X","--D","-.d"]
# sets y to be the entire column of data
y = info.returnData(columnNum)

# get the polynomial - this will be graphed
z = backend.getPolynomial(x, y, columnNum)
z1D = numpy.poly1d(z)

# the cPoint is the point at which the applicant is at.
# get the polynomial inverse - we will use the inverse to find the cPoint.
zInverse = backend.getPolynomialInverse(x, y, columnNum)
zInverse1D = numpy.poly1d(zInverse)

score = float(score)

# get the antiderivative
integral = numpy.polyint(z1D, 1)

# get cPoint
cPoint = numpy.polyval(zInverse1D, score)

# evaluate the antiderivative:

# get full area
fullArea = (numpy.polyval(
    integral, info.nonZeroCount(columnNum))) - (numpy.polyval(integral, 0))

# get partial area
partArea = (numpy.polyval(integral, cPoint)) - (numpy.polyval(integral, 0))

# get the percentage
percentage = partArea * 100 / fullArea
Example #41
0
while poly[n] == 0:
    del poly[n]
    n-=1

#сворюємо V_1... V_n  зводимо до робочого вигляду  
list_poly = [] 
list_poly.append(poly)
size = int(len(poly)) 
#u = None
#res = 0
count = 0 
print(poly)
while count!=size:
    all_poly = np.array(list_poly)
    #poduct_poly = np.poly1d(list_poly) # перетворюємо результат на поліном
    local = np.polyint(all_poly[count])
    next_poly = np.delete(local,np.s_[size:]) # знаходимо інтеграл
    #flip_poly = np.flip(next_poly)
    #print(flip_poly)   
    print(next_poly)
    list_poly.append(next_poly)
    count+=1
    list_poly[count]

"""---Tusk 3---"""
#створюємо матриці 
all_poly = np.array(list_poly)
matrix_of_SCALAR = []
vector_res = []
vector_of_coef = []
for i in all_poly[1:]:
Example #42
0
print(numpy.dot(A, B))
print(numpy.cross(A, B))

A = numpy.array([[1, 2], [3, 4]])
B = numpy.array([[1, 2], [3, 4]])
A[0, :]
A[:, 0]
numpy.dot(A, B)

print(numpy.inner(A, B))
print(numpy.outer(A, B))

print(numpy.poly([-1, 1, 1, 10]))
print(numpy.poly([1, 1]))
print(numpy.roots([1, 0, -1]))
print(numpy.polyint([1, 1, 1]))  #find the integration
print(numpy.polyder([1, 1, 1, 1]))  #find the derivative
print(numpy.polyval([1, -2, 0, 2], 4))
print(numpy.polyfit([0, 1, -1, 2, -2], [0, 1, 1, 4, 4],
                    2))  #fit the value with specific order

p1 = numpy.poly1d([1, 2])
p2 = numpy.poly1d([9, 5, 4])
print(p1)
print(p2)
print(numpy.polyadd(p1, p2))
print(numpy.polysub(p1, p2))
print(numpy.polymul(p1, p2))
print(numpy.polydiv(p1, p2))

#https://docs.scipy.org/doc/numpy/reference/routines.linalg.html
Example #43
0
    # Construct Lagrange polynomials to get the polynomial basis at the collocation point
    p = np.poly1d([1])
    for r in range(d + 1):
        if r != j:
            p *= np.poly1d([1, -tau_root[r]]) / (tau_root[j] - tau_root[r])

    # Evaluate the polynomial at the final time to get the coefficients of the continuity equation
    D[j] = p(1.0)

    # Evaluate the time derivative of the polynomial at all collocation points to get the coefficients of the continuity equation
    pder = np.polyder(p)
    for r in range(d + 1):
        C[j, r] = pder(tau_root[r])

    # Evaluate the integral of the polynomial to get the coefficients of the quadrature function
    pint = np.polyint(p)
    B[j] = pint(1.0)


class ecosystem_optimization:
    def __init__(self,
                 mass_vector,
                 parameters,
                 spectral,
                 layers,
                 elements=1,
                 depth=20):
        self.layers = layers
        self.mass_vector = mass_vector
        self.strategy_matrix = np.vstack(
            [np.repeat(1 / depth, layers)] *
Example #44
0
def scb_analysis(filename, plot):
    import pandas as pd
    import numpy as np
    from scb_plot import scb_plot

    # Sketchy proprietary import
    # Skips lines with non-native encoding
    df = pd.read_csv(filename, header=None, skiprows=44)
    meta = pd.read_csv(filename,
                       header=None,
                       skiprows=12,
                       nrows=3,
                       usecols=[1])

    # Rename columns for EZ access
    heads = [
        'Time (Sec)', 'Load (kN)', 'Disp (mm)', 'Core Temp (C)',
        'Surf Temp (C)'
    ]
    df.columns = heads

    vals = meta.values
    diameter, thickness, notch = vals[0][0], vals[1][0], vals[2][0]

    idxmax = df['Load (kN)'].idxmax()
    pre = df.ix[0:idxmax]
    post_all = df.ix[idxmax:]

    # Find first index of value below 0.1 kN termination point
    end = post_all.loc[post_all['Load (kN)'] < 0.1].index[0]
    post = post_all.ix[:end]

    poly_pre = np.polyfit(pre['Disp (mm)'], pre['Load (kN)'], deg=4)
    poly_post = np.polyfit(post['Disp (mm)'], post['Load (kN)'], deg=5)

    # Integrate to get area under curve
    polyint_pre = np.polyint(poly_pre)
    polyint_post = np.polyint(poly_post)

    def poly3_def_int(coefficients, bottom, top):
        lower = np.polyval(coefficients, bottom)
        upper = np.polyval(coefficients, top)
        return upper - lower

    # Calculate definite integral and add pre- and post- peak areas
    max_load_disp = df.ix[idxmax]['Disp (mm)']
    terminal_disp = post.iloc[-1]['Disp (mm)']
    pre_work = poly3_def_int(polyint_pre, 0, max_load_disp)
    post_work = poly3_def_int(polyint_post, max_load_disp, terminal_disp)

    # Calculate as many variables as possible so far
    WORK_OF_FRACTURE = pre_work + post_work
    AREA_OF_FRACTURE = (thickness * (diameter - notch))
    FRACTURE_ENERGY = WORK_OF_FRACTURE / AREA_OF_FRACTURE
    SECANT_STIFFNESS = df['Load (kN)'].max() / max_load_disp

    # Find 1st and second derivative
    der1 = np.polyder(poly_post)
    der2 = np.polyder(der1)

    # Find x,y of inflection x,y
    px = np.poly1d(der2)

    inflx = (px).roots
    infly = np.polyval(poly_post, inflx)

    # Calculate slope of inflection point tangent
    SLOPE = np.polyval(der1, inflx[2])
    y_intercept = infly[2] - (SLOPE * inflx[2])

    # Define inflection point equation
    infl_eqn = np.poly1d([SLOPE, y_intercept])

    # Finally, calculate flexibility index and critical displacement
    CRITICAL_DISPLACEMENT = (0.1 - y_intercept) / SLOPE
    FLEXIBILITY_INDEX = (FRACTURE_ENERGY / abs(SLOPE)) * 100000

    if plot == True:
        scb_plot(df, pre, post, poly_pre, poly_post, inflx, infly, infl_eqn,
                 FLEXIBILITY_INDEX, CRITICAL_DISPLACEMENT, filename)

    results = [
        filename, WORK_OF_FRACTURE, AREA_OF_FRACTURE, FRACTURE_ENERGY,
        SECANT_STIFFNESS, inflx[0], infly[0], SLOPE, CRITICAL_DISPLACEMENT,
        FLEXIBILITY_INDEX
    ]

    return results
    sleep(0.1)
    pbar = tqdm(total=T)

    # Initialize polynomial coefficients
    C_t = []

    # Initialize integral of p(\mu)
    I_t = []  # np.zeros(1)

    # Extract observations and rewards
    O_t = O_list[sim]
    R_t = R_list[sim]

    # Evaluate coefficients
    C_t.append(np.array([1]))
    poly_ind_int = np.polyint(C_t[0])
    poly_eval = np.polyval(poly_ind_int, [0, 1])
    I_t.append(np.diff(poly_eval))  # Integral of p_0

    # Evaluate observed RV conditional prior distributions p_1, ..., p_{T-1}
    for t in range(1, T + 1):

        # Set current agent variables
        agent.c_t = np.asarray(C_t[t - 1])  # coefficient
        agent.o_t = O_t[t - 1]  # observation  todo: here we should sample

        # Update polynomial coefficients
        agent.learn(R_t[t - 1])
        C_t.append(agent.c_t)

        # Compute integral over \mu
def bspline_penalty_matrix_optimized(
        linear_operator: LinearDifferentialOperator, basis: BSpline):

    coefs = linear_operator.constant_weights()
    if coefs is None:
        return NotImplemented

    nonzero = np.flatnonzero(coefs)

    # All derivatives above the order of the spline are effectively
    # zero
    nonzero = nonzero[nonzero < basis.order]

    if len(nonzero) == 0:
        return np.zeros((basis.n_basis, basis.n_basis))

    # We will only deal with one nonzero coefficient right now
    if len(nonzero) != 1:
        return NotImplemented

    derivative_degree = nonzero[0]

    if derivative_degree == basis.order - 1:
        # The derivative of the bsplines are constant in the intervals
        # defined between knots
        knots = np.array(basis.knots)
        mid_inter = (knots[1:] + knots[:-1]) / 2
        basis_deriv = basis.derivative(order=derivative_degree)
        constants = basis_deriv(mid_inter)[..., 0].T
        knots_intervals = np.diff(basis.knots)
        # Integration of product of constants
        return constants.T @ np.diag(knots_intervals) @ constants

    # We only deal with the case without zero length intervals
    # for now
    if np.any(np.diff(basis.knots) == 0):
        return NotImplemented

    # Compute exactly using the piecewise polynomial
    # representation of splines

    # Places m knots at the boundaries
    knots = basis._evaluation_knots()

    # c is used the select which spline the function
    # PPoly.from_spline below computes
    c = np.zeros(len(knots))

    # Initialise empty list to store the piecewise polynomials
    ppoly_lst = []

    no_0_intervals = np.where(np.diff(knots) > 0)[0]

    # For each basis gets its piecewise polynomial representation
    for i in range(basis.n_basis):

        # Write a 1 in c in the position of the spline
        # transformed in each iteration
        c[i] = 1

        # Gets the piecewise polynomial representation and gets
        # only the positions for no zero length intervals
        # This polynomial are defined relatively to the knots
        # meaning that the column i corresponds to the ith knot.
        # Let the ith knot be a
        # Then f(x) = pp(x - a)
        pp = PPoly.from_spline((knots, c, basis.order - 1))
        pp_coefs = pp.c[:, no_0_intervals]

        # We have the coefficients for each interval in coordinates
        # (x - a), so we will need to subtract a when computing the
        # definite integral
        ppoly_lst.append(pp_coefs)
        c[i] = 0

    # Now for each pair of basis computes the inner product after
    # applying the linear differential operator
    penalty_matrix = np.zeros((basis.n_basis, basis.n_basis))
    for interval in range(len(no_0_intervals)):
        for i in range(basis.n_basis):
            poly_i = np.trim_zeros(ppoly_lst[i][:, interval], 'f')
            if len(poly_i) <= derivative_degree:
                # if the order of the polynomial is lesser or
                # equal to the derivative the result of the
                # integral will be 0
                continue
            # indefinite integral
            derivative = polyder(poly_i, derivative_degree)
            square = polymul(derivative, derivative)
            integral = polyint(square)

            # definite integral
            penalty_matrix[i, i] += np.diff(
                polyval(
                    integral, basis.knots[interval:interval + 2] -
                    basis.knots[interval]))[0]

            for j in range(i + 1, basis.n_basis):
                poly_j = np.trim_zeros(ppoly_lst[j][:, interval], 'f')
                if len(poly_j) <= derivative_degree:
                    # if the order of the polynomial is lesser
                    # or equal to the derivative the result of
                    # the integral will be 0
                    continue
                    # indefinite integral
                integral = polyint(
                    polymul(polyder(poly_i, derivative_degree),
                            polyder(poly_j, derivative_degree)))
                # definite integral
                penalty_matrix[i, j] += np.diff(
                    polyval(
                        integral, basis.knots[interval:interval + 2] -
                        basis.knots[interval]))[0]
                penalty_matrix[j, i] = penalty_matrix[i, j]
    return penalty_matrix
Example #47
0
def compute_point_and_mat_extrapo(degree, mesh):
    """
###############################################################################
# NAME : compute_point_and_mat_extrapo
# DESC : function to compute the positions of the flux points and the sol 
#        points in an parametric cell. It will also compute the matrices for 
#        extrapolation and derivation
# INPUT : degree = degree of the polynomial interpolation knowing that :
#                  number of solution points = degree + 1
#                  number of flux points = degree + 2
#         mesh   = coordinates of the cells in the mesh
# OUTPUT : sol_point
#          flux_point
#          mat_global_extrapolation
#          mat_global_d_flux_at_sol_point
#          mat_global_grad_border
#          global_R
#          global_sec_mem_rev_mat
###############################################################################"""

    n_cells = len(mesh) - 1
    n_solution_points = degree + 1
    n_flux_points = degree + 2

    #Compute the flux points and solution points
    flux_point = np.zeros(n_flux_points)
    solution_point = np.zeros(n_solution_points)

    #Flux points
    polynom_legendre = np.polynomial.legendre.Legendre.basis(degree)
    roots_legendre = np.polynomial.legendre.Legendre.roots(polynom_legendre)

    for i in range(0, degree):
        flux_point[i + 1] = roots_legendre[i]

    flux_point[0] = -1.
    flux_point[n_flux_points - 1] = 1.

    #Solution points
    polynom_chebyshev = np.polynomial.chebyshev.Chebyshev.basis(
        n_solution_points)
    solution_point = np.polynomial.chebyshev.Chebyshev.roots(polynom_chebyshev)

    #computing the flux_point and the sol_point in the iso cell
    #computing the legendre polynomial of degree p
    polynom_legendre = np.polynomial.legendre.Legendre.basis(n_flux_points - 2)
    roots = np.polynomial.legendre.Legendre.roots(polynom_legendre)
    flux_point[1:len(flux_point) - 1] = roots[:]
    flux_point[0] = -1.
    flux_point[len(flux_point) - 1] = 1.

    #building the derivative matrix to compute the derivative of the flux at
    #sol point

    #building the extrapolation matrix sol point toward flux point
    local_extrapolation_matrix = np.zeros((n_flux_points, n_solution_points))
    identity_matrix = np.identity(n_solution_points)
    for j in range(0, n_solution_points):

        polynom_lagrange = interpolate.lagrange(solution_point,
                                                identity_matrix[j])

        for i in range(0, n_flux_points):
            local_extrapolation_matrix[i, j] = polynom_lagrange(flux_point[i])

    local_derivative_matrix = np.zeros((n_solution_points, n_flux_points))
    identity_matrix = np.identity(n_flux_points)

    for j in range(0, n_flux_points):

        polynom_lagrange = interpolate.lagrange(flux_point, identity_matrix[j])
        polynom_derivative_lagrange = np.polyder(polynom_lagrange, 1)

        for i in range(0, n_solution_points):
            local_derivative_matrix[i, j] = polynom_derivative_lagrange(
                solution_point[i])

    #compute the global extrapolation matrix
    global_extrapolation_matrix = np.zeros(
        (n_cells * n_flux_points, n_cells * n_solution_points))

    for i in range(0, n_cells * n_flux_points):
        for j in range(0, n_solution_points):
            global_extrapolation_matrix[i, n_solution_points*math.floor(i/n_flux_points)+j] =\
            local_extrapolation_matrix[i%n_flux_points, j]

    #compute the global extrapolation matrix
    global_derivative_matrix = np.zeros(
        (n_cells * n_solution_points, n_cells * n_flux_points))

    for i in range(0, n_cells * n_solution_points):
        for j in range(0, n_flux_points):
            global_derivative_matrix[i, n_flux_points*math.floor(i/n_solution_points)+j] =\
            (1/(mesh[math.floor(i/n_solution_points)+1] - mesh[math.floor(i/n_solution_points)]))*\
            local_derivative_matrix[i%n_solution_points, j]

    #compute the vector to compute gradient of u at 1 and -1
    local_gradient_matrix = np.zeros((2, n_flux_points))
    identity_matrix = np.identity(degree + 2)

    for j in range(0, n_flux_points):

        polynom_lagrange = interpolate.lagrange(flux_point, identity_matrix[j])
        polynom_derivative_lagrange = np.polyder(polynom_lagrange, 1)

        for i in range(0, 2):
            if i == 0:
                local_gradient_matrix[i, j] = polynom_derivative_lagrange(-1)
            if i == 1:
                local_gradient_matrix[i, j] = polynom_derivative_lagrange(1)

    #compute the global mat_grad_border matrix
    global_gradient_matrix = np.zeros((2 * n_cells, n_cells * n_flux_points))
    for i in range(0, n_cells * 2):
        for j in range(0, n_flux_points):
            global_gradient_matrix[i, n_flux_points*math.floor(i/2)+j] =\
            local_gradient_matrix[i%2, j]

    #compute the local relevment matrix
    local_lifting_matrix = np.zeros((n_flux_points, n_flux_points))
    identity_matrix = np.identity(n_flux_points)
    for i in range(0, n_flux_points):

        polynom_line = interpolate.lagrange(flux_point, identity_matrix[i])

        for j in range(0, n_flux_points):
            polynom_column = interpolate.lagrange(flux_point,
                                                  identity_matrix[j])
            polynom_lagrange = np.polyint(
                np.polymul(polynom_line, polynom_column))
            local_lifting_matrix[
                i, j] = polynom_lagrange(1) - polynom_lagrange(-1)

    #compute the global relevment matrix
    global_lifting_matrix = np.zeros(
        (n_cells * n_flux_points, n_cells * n_flux_points))
    for i in range(0, n_cells * n_flux_points):
        for j in range(0, n_flux_points):
            global_lifting_matrix[i, n_flux_points*math.floor(i/n_flux_points)+j] =\
            (1/(mesh[math.floor(i/n_flux_points)+1]-mesh[math.floor(i/n_flux_points)]))*\
            local_lifting_matrix[i%n_flux_points, j]

    #compute the matrix to build the second member of the relevment equation
    loc_sec_mem_rev_mat = np.zeros((n_flux_points, 2))
    identity_matrix = np.identity(n_flux_points)
    for i in range(0, n_flux_points):
        polynom_line = interpolate.lagrange(flux_point, identity_matrix[i])
        loc_sec_mem_rev_mat[i, 0] = polynom_line(-1)
        loc_sec_mem_rev_mat[i, 1] = -polynom_line(1)

    #compute the global matrix to build the second member of the relevment equation
    global_sec_mem_rev_mat = np.zeros((n_flux_points * n_cells, 2 * n_cells))
    for i in range(0, n_flux_points * n_cells):
        for j in range(0, 2):
            global_sec_mem_rev_mat[i, 2*math.floor(i/n_flux_points)+j] =\
            loc_sec_mem_rev_mat[i%n_flux_points, j]

    return (solution_point, flux_point, global_extrapolation_matrix,
            global_derivative_matrix, global_gradient_matrix,
            global_lifting_matrix, global_sec_mem_rev_mat)
Example #48
0
A= list(map(int, input().split()))
B= list(map(int, input().split()))
A = numpy.array(A)
B = numpy.array(B)
print(numpy.inner(A,B))
print(numpy.outer(A,B))
 


y= list(map(float, input().split()))
x= int(input())
y= numpy.array(y)
print(numpy.polyval(y, x)) 
print numpy.poly([-1, 1, 1, 10]) 
print numpy.roots([1, 0, -1])
print numpy.polyint([1, 1, 1]) 
print numpy.polyder([1, 1, 1, 1])
print numpy.polyval([1, -2, 0, 2], 4)
print numpy.polyfit([0,1,-1, 2, -2], [0,1,1, 4, 4], 2)


print numpy.linalg.det([[1 , 2], [2, 1]])
vals, vecs = numpy.linalg.eig([[1 , 2], [2, 1]])
print numpy.linalg.inv([[1 , 2], [2, 1]]) 


n=int(input())
A = numpy.array([input().split() for _ in range(n)], float)
numpy.set_printoptions(legacy='1.13')
print(numpy.linalg.det(A))
Example #49
0
def poly_area(x1,x2,p1,p2=np.zeros(1)):
    if p2 == []:
        p2 = np.zeros(1)
    p = pa.polyadd(p1,-p2)
    i = np.polyint(p)
    return np.abs( np.polyval(i,x2) - np.polyval(i,x1))
Example #50
0
"""
polynomial and root
"""

import numpy as np
print(np.poly([-1,1])) # (x+1)(x-1)=x^2-1  ==> [1,0,-1]
print(np.poly([1,2])) # (x-1)(x-2)=x^2-3^x+2  ==> [1,-3,2]
print (np.poly([1,2,3,4])) # (x-1)(x-2)(x-3)(x-4) ==> [1,-10,35,-50,24]

print (np.roots([1, 0, -1])) #x^2-1 => [-1,1]
print (np.roots([1,-3,2])) #x^2-3*x+2 =>[1,2]
print(np.roots([1,2,3,4,5])) #not real number

#Anti-Derivative of the polynomial.
print (np.polyint([1, 2])) 
print (np.polyint([3, 2,6])) #3x^2+2^x+6

#derivative of poly
print (np.polyder([1,2,3,4])) #(x^3+2*x^2+3*x^1+4)' = [3,4,3]. 0 omited

#value of poly
print (np.polyval([1,2,3],1)) # x^2+2*x+3 (x=1) => 6
print (np.polyval([1,2,3],2)) # x^2+2*x+3 (x=2) =>11
print (np.polyval([1,2,3],3)) # x^2+2*x+3 (x=3) => 18



Example #51
0
def calc(g):
    p = np.poly1d(g.c)
    p_i = np.polyint(p)
    I = p_i(g.r[1]) - p_i(g.r[0])
    return [I]
Example #52
0
#3x-4y +5z=-8
#x+y+z=6
c = np.array([[2, 1, 1], [3, -4, 5], [1, 1, 1]])
d = np.array([7, -8, 6])
e = np.linalg.solve(c, d)
v(e)

#4x^3+3x^2−2x+10=0
ppar = [4, 3, -2, 10]
v(np.roots(ppar))

#y = 3x^2 + 1
p = np.poly1d([3, 0, 1])  #coefs from ax^2 + bx + c
derivative = np.polyder(p)
v(derivative)
integral = np.polyint(p)
v(integral)
pylab.plot(p)

xvals = np.arange(-2, 1, 0.01)  # Grid of 0.01 spacing from -2 to 10
newyvals = 1 - 0.5 * xvals**2  # Evaluate quadratic approximation on xvals
plt.plot(xvals, newyvals, 'r--')  # Create line plot with red dashed line
plt.xlabel('Input')
plt.ylabel('Function values')
plt.show()  # Show the figure (remove the previous instance)

xvals = np.arange(-5, 5, 0.1)  # Grid of 0.1 spacing from -5 to 5
newyvals = -8 - 2 * xvals + xvals**2  # Evaluate quadratic approximation on xvals
fig, ax = plt.subplots()
ax.plot(xvals, newyvals, 'r--')  # Create line plot with red dashed line
ax.spines['left'].set_position('zero')
Example #53
0
def old_chirp(t, f0=0, t1=1, f1=100, method='linear', phi=0, qshape=None):
    """Frequency-swept cosine generator.

    Parameters
    ----------
    t : ndarray
        Times at which to evaluate the waveform.
    f0 : float or ndarray, optional
        Frequency (in Hz) of the waveform at time 0.  If `f0` is an
        ndarray, it specifies the frequency change as a polynomial in
        `t` (see Notes below).
    t1 : float, optional
        Time at which `f1` is specified.
    f1 : float, optional
        Frequency (in Hz) of the waveform at time `t1`.
    method : {'linear', 'quadratic', 'logarithmic'}, optional
        Kind of frequency sweep.
    phi : float
        Phase offset, in degrees.
    qshape : {'convex', 'concave'}
        If method is 'quadratic', `qshape` specifies its shape.

    Notes
    -----
    If `f0` is an array, it forms the coefficients of a polynomial in
    `t` (see `numpy.polval`). The polynomial determines the waveform
    frequency change in time.  In this case, the values of `f1`, `t1`,
    `method`, and `qshape` are ignored.

    This function is deprecated.  It will be removed in SciPy version 0.9.0.
    It exists so that during in version 0.8.0, the new chirp function can
    call this function to preserve the old behavior of the quadratic chirp.
    """
    warnings.warn("The function old_chirp is deprecated, and will be removed in "
                    "SciPy 0.9", DeprecationWarning)
    # Convert to radians.
    phi *= pi / 180
    if size(f0) > 1:
        # We were given a polynomial.
        return cos(2*pi*polyval(polyint(f0),t)+phi)
    if method in ['linear','lin','li']:
        beta = (f1-f0)/t1
        phase_angle = 2*pi * (f0*t + 0.5*beta*t*t)
    elif method in ['quadratic','quad','q']:
        if qshape == 'concave':
            mxf = max(f0,f1)
            mnf = min(f0,f1)
            f1,f0 = mxf, mnf
        elif qshape == 'convex':
            mxf = max(f0,f1)
            mnf = min(f0,f1)
            f1,f0 = mnf, mxf
        else:
            raise ValueError("qshape must be either 'concave' or 'convex' but "
                "a value of %r was given." % qshape)
        beta = (f1-f0)/t1/t1
        phase_angle = 2*pi * (f0*t + beta*t*t*t/3)
    elif method in ['logarithmic','log','lo']:
        if f1 <= f0:
            raise ValueError(
                "For a logarithmic sweep, f1=%f must be larger than f0=%f."
                % (f1, f0))
        beta = log10(f1-f0)/t1
        phase_angle = 2*pi * (f0*t + (pow(10,beta*t)-1)/(beta*log(10)))
    else:
        raise ValueError("method must be 'linear', 'quadratic', or "
            "'logarithmic' but a value of %r was given." % method)

    return cos(phase_angle + phi)
Example #54
0
 def s(theta):
     # integral of torque wrt theta
     pI = np.polyint(p)
     return np.polyval(pI, theta)
Example #55
0
def scalar_product(a_poly: np.poly1d, b_poly: np.poly1d) -> float:
    """Find scalar product of two polynomials"""
    integral = np.polyint(np.polymul(
        a_poly, b_poly))  # множимо поліноми і знаходимо первісну
    return integral(1) - integral(
        0)  # від інтегралу в точці 1 - інтеграл в точці 0
Example #56
0
 def gen_integrals(self,k=0):
     tmppol = [ np.polyint( poly ) for poly in self.poly ]
     tmp_ints = np.array ( [ [ pol(pos) for pol in tmppol ] for pos in self.roots ] )
     self.int_out = np.array ( [ row -  tmp_ints[0] for row in tmp_ints[1:]  ] )
     self.int_in  = np.array ( [ tmp_ints[-1] - row for row in tmp_ints[:-1] ] )
     return