コード例 #1
0
ファイル: ar.py プロジェクト: idiap/ssp
def ARLineSpectra(ar):
    """
    Convert AR coeffs to LSPs

    From wikipedia:
    A palindromic polynomial (i.e., P) of odd degree has -1 as a root.
    An antipalindromic polynomial (i.e., Q) has 1 as a root.
    An antipalindromic polynomial of even degree has -1 and 1 as roots
    """
    order = ar.shape[-1]
    ret = np.zeros(ar.shape)
    for a, o in core.refiter([ar, ret], core.newshape(ar.shape)):
        p = np.ones((order+2))
        q = np.ones((order+2))
        q[-1] = -1.0
        for i in range(order):
            p[i+1] = -a[i] - a[order-i-1]
            q[i+1] = -a[i] + a[order-i-1]
        pr = np.roots(p)
        qr = np.roots(q)

        j = 0
        an = np.ndarray((order+2))
        for i in range(len(pr)):
            if np.imag(pr[i]) >= 0.0:
                an[j] = np.angle(pr[i])
                j += 1
            if np.imag(qr[i]) >= 0.0:
                an[j] = np.angle(qr[i])
                j += 1
        # The angle list (an) will always contain both 0 and pi; they
        # will move to the ends after the sort
        o[...] = np.sort(an)[1:-1]
    return ret;
コード例 #2
0
ファイル: metric.py プロジェクト: j-dr/bigbrother
    def splitBimodal(self, x, y, largepoly=30):
        p = np.polyfit(x, y, largepoly) # polynomial coefficients for fit

        extrema = np.roots(np.polyder(p))
        extrema = extrema[np.isreal(extrema)]
        extrema = extrema[(extrema - x[1]) * (x[-2] - extrema) > 0] # exclude the endpoints due false maxima during fitting
        try:
            root_vals = [sum([p[::-1][i]*(root**i) for i in range(len(p))]) for root in extrema]
            peaks = extrema[np.argpartition(root_vals, -2)][-2:] # find two peaks of bimodal distribution

            mid, = np.where((x - peaks[0])* (peaks[1] - x) > 0)
             # want data points between the peaks
        except:
            warnings.warn("Peak finding failed!")
            return None

        try:
            p_mid = np.polyfit(x[mid], y[mid], 2) # fit middle section to a parabola
            midpoint = np.roots(np.polyder(p_mid))[0]
        except:
            warnings.warn("Polynomial fit between peaks of distribution poorly conditioned. Falling back on using the minimum! May result in inaccurate split determination.")
            if len(mid) == 0:
                return None

            midx = np.argmin(y[mid])
            midpoint = x[mid][midx]

        return midpoint
コード例 #3
0
ファイル: utils.py プロジェクト: rferdman/pypsr
def polyxval(poly_coeffs, y, x1=None, x2=None, warn=False):
     if (x1==None or x2==None):
          print "Must assign range [x1, x2] in which to search for x values."
          return None

     if(x1==x2):
          print "x1 must not equal x2."
          return None

     if (x1 > x2):
          temp_x = x1
          x1 = x2
          x2 = temp_x

# Supress warnings (default)
     if (warn==False):
          warnings.simplefilter('ignore', np.RankWarning)

     poly_coeffs_y = poly_coeffs
     # subtract y-value from zeroth order coefficient (i.e. no x's)
     poly_coeffs_y[len(poly_coeffs_y)-1] -= y
     re_roots = \
         np.roots(poly_coeffs_y)[np.roots(poly_coeffs_y).imag == 0.].real

# restrict solution to range [x1, x2]
     x_val = re_roots[(re_roots >= x1) & (re_roots <= x2)]

     return x_val
コード例 #4
0
ファイル: wavelets.py プロジェクト: 258073127/MissionPlanner
def daub(p):
    """
    The coefficients for the FIR low-pass filter producing Daubechies wavelets.

    p>=1 gives the order of the zero at f=1/2.
    There are 2p filter coefficients.

    Parameters
    ----------
    p : int
        Order of the zero at f=1/2, can have values from 1 to 34.

    """
    sqrt = np.sqrt
    if p < 1:
        raise ValueError("p must be at least 1.")
    if p==1:
        c = 1/sqrt(2)
        return np.array([c,c])
    elif p==2:
        f = sqrt(2)/8
        c = sqrt(3)
        return f*np.array([1+c,3+c,3-c,1-c])
    elif p==3:
        tmp  = 12*sqrt(10)
        z1 = 1.5 + sqrt(15+tmp)/6 - 1j*(sqrt(15)+sqrt(tmp-15))/6
        z1c = np.conj(z1)
        f = sqrt(2)/8
        d0 = np.real((1-z1)*(1-z1c))
        a0 = np.real(z1*z1c)
        a1 = 2*np.real(z1)
        return f/d0*np.array([a0, 3*a0-a1, 3*a0-3*a1+1, a0-3*a1+3, 3-a1, 1])
    elif p<35:
        # construct polynomial and factor it
        if p<35:
            P = [comb(p-1+k,k,exact=1) for k in range(p)][::-1]
            yj = np.roots(P)
        else:  # try different polynomial --- needs work
            P = [comb(p-1+k,k,exact=1)/4.0**k for k in range(p)][::-1]
            yj = np.roots(P) / 4
        # for each root, compute two z roots, select the one with |z|>1
        # Build up final polynomial
        c = np.poly1d([1,1])**p
        q = np.poly1d([1])
        for k in range(p-1):
            yval = yj[k]
            part = 2*sqrt(yval*(yval-1))
            const = 1-2*yval
            z1 = const + part
            if (abs(z1)) < 1:
                z1 = const - part
            q = q * [1,-z1]

        q = c * np.real(q)
        # Normalize result
        q = q / np.sum(q) * sqrt(2)
        return q.c[::-1]
    else:
        raise ValueError("Polynomial factorization does not work "
              "well for p too large.")
コード例 #5
0
def fit_edge_hist(bins, counts, fwhm_guess=10.0):
    if len(bins) == len(counts)+1: bins = bins[:-1]+0.5*(bins[1]-bins[0]) # convert bin edge to bin centers if neccesary
    pfit = np.polyfit(bins, counts, 3)
    edgeGuess = np.roots(np.polyder(pfit,2))
    try:
        preGuessX, postGuessX = np.sort(np.roots(np.polyder(pfit,1)))
    except:
        raise ValueError("failed to generate guesses")
    use = bins>(edgeGuess+2*fwhm_guess)
    if np.sum(use)>4:
        pfit2 = np.polyfit(bins[use], counts[use],1)
        slope_guess = pfit2[0]
    else:
        slope_guess=1
    pGuess = np.array([edgeGuess, np.polyval(pfit,preGuessX), np.polyval(pfit,postGuessX),fwhm_guess,slope_guess],dtype='float64')

    try:
        pOut = curve_fit(edge_model, bins, counts, pGuess)
    except:
        return (0,0,0,0,0,0)
    (edgeCenter, preHeight, postHeight, fwhm, bgSlope) = pOut[0]
    model_counts = edge_model(bins, edgeCenter, preHeight, postHeight, fwhm, bgSlope)
    num_degree_of_freedom = float(len(bins)-1-5) # num points - 1 - number of fitted parameters
    chi2 = np.sum(((counts - model_counts)**2)/model_counts)/num_degree_of_freedom
    return (edgeCenter, preHeight, postHeight, fwhm, bgSlope, chi2)
コード例 #6
0
ファイル: midify.py プロジェクト: jyt109/speech_density
def lpc_to_lsf(all_lpc):
    if len(all_lpc.shape) < 2:
        all_lpc = all_lpc[None]
    order = all_lpc.shape[1] - 1
    all_lsf = np.zeros((len(all_lpc), order))
    for i in range(len(all_lpc)):
        lpc = all_lpc[i]
        lpc1 = np.append(lpc, 0)
        lpc2 = lpc1[::-1]
        sum_filt = lpc1 + lpc2
        diff_filt = lpc1 - lpc2

        if order % 2 != 0:
            deconv_diff, _ = sg.deconvolve(diff_filt, [1, 0, -1])
            deconv_sum = sum_filt
        else:
            deconv_diff, _ = sg.deconvolve(diff_filt, [1, -1])
            deconv_sum, _ = sg.deconvolve(sum_filt, [1, 1])

        roots_diff = np.roots(deconv_diff)
        roots_sum = np.roots(deconv_sum)
        angle_diff = np.angle(roots_diff[::2])
        angle_sum = np.angle(roots_sum[::2])
        lsf = np.sort(np.hstack((angle_diff, angle_sum)))
        if len(lsf) != 0:
            all_lsf[i] = lsf
    return np.squeeze(all_lsf)
コード例 #7
0
def verify_roots_of_generated_polynomial_on_unit_circle(num_of_lamda, lamda_lst):

    # we need to flip the coefficient left and right, because
    # numpy.roots function expect the coefficient in the following format:
    # p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]

    # numpy.fliplr require 2D array, for 1D array, we need the follow trick
    lamda_new = np.fliplr([lamda_lst])[0]

    roots = np.roots(lamda_new)

    # get the absolute of the roots to see if they are on unit circle
    roots_abs = np.absolute(roots)

    # since leeya polynomial requires one symmetry, that is p[0] = p[n], p[1] = p[n-1], etc...
    # above polynomial will not meet this symmetry, let's just create a symmetric version

    lamda_symmetric = np.concatenate([lamda_new, lamda_lst])
    roots_symmetric = np.roots(lamda_symmetric)

    # get the absolute of the roots to see if they are on unit circle
    roots_symmetric_abs = np.absolute(roots_symmetric)

    # print "num_of_lamda", num_of_lamda,  'roots_abs =', roots_abs
    print "num_of_lamda", num_of_lamda,  'roots_symmetric_abs =', roots_symmetric_abs
    # print "num_of_lamda", num_of_lamda,  'roots_symmetric =', roots_symmetric

    return roots, roots_abs, roots_symmetric, roots_symmetric_abs
コード例 #8
0
ファイル: lazy_lpc.py プロジェクト: ARK1988/audiolazy
def lsf(fir_filt):
  """
  Find the Line Spectral Frequencies (LSF) from a given FIR filter.

  Parameters
  ----------
  filt :
    A LTI FIR filter as a LinearFilter object.

  Returns
  -------
  A tuple with all LSFs in rad/sample, alternating from the forward prediction
  and backward prediction filters, starting with the lowest LSF value.

  """
  den = fir_filt.denominator
  if len(den) != 1:
    raise ValueError("Filter has feedback")
  elif den[0] != 1: # So we don't have to worry with the denominator anymore
    fir_filt /= den[0]

  from numpy import roots
  rev_filt = ZFilter(fir_filt.numerator[::-1]) * z ** -1
  P = fir_filt + rev_filt
  Q = fir_filt - rev_filt
  roots_p = roots(P.numerator[::-1])
  roots_q = roots(Q.numerator[::-1])
  lsf_p = sorted(phase(roots_p))
  lsf_q = sorted(phase(roots_q))
  return reduce(operator.concat, xzip(*sorted([lsf_p, lsf_q])), tuple())
コード例 #9
0
def test_calculateTF():
	"""Test function for calculateTF()"""
	from ._utils import cplxpair
	ABCD = [[1.000000000000000, 0., 0., 0.044408783846879, -0.044408783846879],
	        [0.999036450096481, 0.997109907515262, -0.005777399147297, 0., 0.499759089304780],
	        [0.499759089304780, 0.999036450096481, 0.997109907515262,  0., -0.260002096136488],
	        [0,                 0,                 1.000000000000000,  0, -0.796730400347216]]
	ABCD = np.array(ABCD)
	ntf, stf = calculateTF(ABCD)
	ntf_zeros, ntf_poles = np.roots(ntf.num), np.roots(ntf.den)
	stf_zeros, stf_poles = np.roots(stf.num), np.roots(stf.den)
	mntf_poles = np.array((1.498975311463384, 1.102565142679772, 0.132677264750882))
	mntf_zeros = np.array((0.997109907515262 + 0.075972576202904j, 
	                       0.997109907515262 - 0.075972576202904j, 
	                       1.000000000000000 + 0.000000000000000j)
	                      )
	mstf_zeros = np.array((-0.999999999999996,))
	mstf_poles = np.array((1.498975311463384, 1.102565142679772, 0.132677264750882))
	# for some reason, sometimes the zeros are in different order.
	ntf_zeros, mntf_zeros = cplxpair(ntf_zeros), cplxpair(mntf_zeros)
	stf_zeros, mstf_zeros = cplxpair(stf_zeros), cplxpair(mstf_zeros)
	ntf_poles, mntf_poles = cplxpair(ntf_poles), cplxpair(mntf_poles)
	stf_poles, mstf_poles = cplxpair(stf_poles), cplxpair(mstf_poles)
	assert np.allclose(ntf_zeros, mntf_zeros, rtol=1e-5, atol=1e-8)
	assert np.allclose(ntf_poles, mntf_poles, rtol=1e-5, atol=1e-8)
	assert np.allclose(stf_zeros, mstf_zeros, rtol=1e-5, atol=1e-8)
	assert np.allclose(stf_poles, mstf_poles, rtol=1e-5, atol=1e-8)
コード例 #10
0
ファイル: bode_asymptotic.py プロジェクト: thetdg/SNaP
def plot_range(num, den):

    # The corner frequencies
    zero = sort(abs(roots(num)))    
    pole = sort(abs(roots(den)))
    
    # Calculate the minimum and maximum corner frequencies needed
    if len(pole) == 0:
        corner_min = zero[0]
        corner_max = zero[-1]
    
    elif len(zero) == 0:
        corner_min = pole[0]
        corner_max = pole[-1]
        
    elif len(zero) > 0 and len(pole) > 0:
        corner_min = min(zero[0], pole[0])
        corner_max = max(zero[-1], pole[-1]) 
    
    else:
        corner_min, corner_max = 0.1, 10
    
    # start from 2 decades lower than the lowest corner 
    # and end at 2 decades above the highest corner
    freq_range = [10 ** (floor(log10(corner_min)) - 1), 
                   10 ** (floor(log10(corner_max)) + 2)]
    
    return freq_range
コード例 #11
0
    def setUp(self):
        ABCD = [[1.0, 0.0, 0.0, 0.044408783846879, -0.044408783846879],
                [0.999036450096481, 0.997109907515262, -0.005777399147297,
                 0.0, 0.499759089304780],
                [0.499759089304780, 0.999036450096481, 0.997109907515262,
                 0.0, -0.260002096136488],
                [0.0, 0.0, 1.0,  0.0, -0.796730400347216]]
        ABCD = np.array(ABCD)
        (ntf, stf) = ds.calculateTF(ABCD)
        (ntf_zeros, ntf_poles) = (np.roots(ntf.num), np.roots(ntf.den))
        (stf_zeros, stf_poles) = (np.roots(stf.num), np.roots(stf.den))
        mntf_poles = np.array((1.498975311463384, 1.102565142679772,
                               0.132677264750882))
        mntf_zeros = np.array((0.997109907515262 + 0.075972576202904j,
                               0.997109907515262 - 0.075972576202904j,
                               1.000000000000000 + 0.000000000000000j))
        mstf_zeros = np.array((-0.999999999999996,))
        mstf_poles = np.array((1.498975311463384, 1.102565142679772,
                               0.132677264750882))

        # for some reason, sometimes the zeros are in different order.
        (self.ntf_zeros, self.mntf_zeros) = (cplxpair(ntf_zeros),
                                             cplxpair(mntf_zeros))
        (self.stf_zeros, self.mstf_zeros) = (cplxpair(stf_zeros),
                                             cplxpair(mstf_zeros))
        (self.ntf_poles, self.mntf_poles) = (cplxpair(ntf_poles),
                                             cplxpair(mntf_poles))
        (self.stf_poles, self.mstf_poles) = (cplxpair(stf_poles),
                                             cplxpair(mstf_poles))
コード例 #12
0
ファイル: utils.py プロジェクト: rferdman/pypsr
def real_roots(poly_coeffs, x1=None, x2=None, warn=False):

# Supress warnings (default)
     if (warn==False):
          warnings.simplefilter('ignore', np.RankWarning)

# Evaluate roots, keeping only the real parts or those without an imaginary component
     re_roots = \
         np.roots(poly_coeffs)[np.roots(poly_coeffs).imag == 0.].real

# Go through limit possibilities, returning the appropriate values
# If no limits were given then return all real roots
     if (x1==None and x2==None):
          return re_roots
# The following are cases where either or both limits are given
     elif (x2==None):  # If only lower limit was given
          return re_roots[(re_roots >= x1)]
     elif (x1==None):  # If only upper limit was given
          return re_roots[(re_roots <= x2)]
     else:             # If both limits are given
          # Check that x1 < x2 and fix if necessary
          if (x1 > x2):
               temp_x = x1
               x1 = x2
               x2 = temp_x
          return re_roots[(re_roots >= x1) & (re_roots <= x2)]
コード例 #13
0
ファイル: filter_design.py プロジェクト: epaxon/nengo
def tf2zpk(b, a):
    """Return zero, pole, gain (z,p,k) representation from a numerator,
    denominator representation of a linear filter.

    Parameters
    ----------
    b : ndarray
        Numerator polynomial.
    a : ndarray
        Denominator polynomial.

    Returns
    -------
    z : ndarray
        Zeros of the transfer function.
    p : ndarray
        Poles of the transfer function.
    k : float
        System gain.

    Notes
    -----
    If some values of `b` are too close to 0, they are removed. In that case,
    a BadCoefficients warning is emitted.

    """
    b, a = normalize(b, a)
    b = (b + 0.0) / a[0]
    a = (a + 0.0) / a[0]
    k = b[0]
    b /= b[0]
    z = roots(b)
    p = roots(a)
    return z, p, k
コード例 #14
0
def isstable(b,a,ftype='digital'):
    """Determine whether IIR filter (b,a) is stable
    
    Parameters
    ----------
        b: ndarray
            filter numerator coefficients
        a: ndarray
            filter denominator coefficients
        ftype: string
            type of filter (`digital` or `analog`)
    Returns
    -------
        stable: bool
            whether filter is stable or not
                
    """

    if ftype=='digital':
        v = np.roots(a)
        if np.any(np.abs(v)>1.0):
            return False
        else:
            return True
    elif ftype=='analog':
        v = np.roots(a)
        if np.any(np.real(v)<0):
            return False
        else:
            return True
コード例 #15
0
ファイル: bode_asymptotic.py プロジェクト: thetdg/SNaP
def asymptote(num, den):

    # create a Python list for the zeros and the poles of the system
    zero = list(sort(abs(roots(num))))    
    pole = list(sort(abs(roots(den))))
    
    #calculate the low frequency gain -- type 0 system
    lf_gain = 20 * log10(abs((num[-1] / den[-1])))
    
    # create an empty matrix to contain the corner frequencies and
    # the corresponding slope indicator (+1 or -1)
    corners = zeros((len(zero) + len(pole) + 2, 2))
    
    starting_freq, end_freq = plot_range(num, den)
    corners[0] = [starting_freq, 0]
    corners[-1] = [end_freq, 0]
    
    # take the first elements from the list of poles and zeros
    # compare them and assign the slope indicator
    # delete the corresponding valuefrom the original list of poles and zeros
    for count in range(len(zero) + len(pole)): 
            
        if len(zero) > 0:
            a = zero[0]
        else:
            a = inf
            
        if len(pole) > 0:
            b = pole[0]
        else:
            b = inf
        
        c = min(a, b)
                
        if c == a:
            corners[count + 1] = [c, 1]
            if len(zero) > 0:
                zero.pop(0)

        if c == b:
            corners[count + 1] = [c, -1]
            if len(pole) > 0:
                pole.pop(0)
         
    # now calculate the gains at the corners using 
    # gain = +/- 20log10(upper_corner / lower_corner) 
    asymptotic_gain = zeros_like(corners)   
    asymptotic_gain[0, 1] = lf_gain
    asymptotic_gain[1, 1] = lf_gain
    
    gain = lf_gain
    multiplier = cumsum(corners[:, 1])
    for k in range(2, len(corners)):    
        gain += multiplier[k-1] * 20 * log10(corners[k, 0] / corners[k-1, 0])
        asymptotic_gain[k, 1] = gain
        
    asymptotic_gain[:, 0] = corners[:, 0]
                   
    return asymptotic_gain
コード例 #16
0
ファイル: linear_prediction.py プロジェクト: rtrhd/spectrum
def poly2lsf(a):
    """Prediction polynomial to line spectral frequencies.

    converts the prediction polynomial specified by A,
    into the corresponding line spectral frequencies, LSF.
    normalizes the prediction polynomial by A(1).

    .. doctest::

        >>> from spectrum import poly2lsf
        >>> a = [1.0000,  0.6149, 0.9899, 0.0000 ,0.0031, -0.0082]
        >>> lsf = poly2lsf(a)
        >>> lsf =  array([0.7842, 1.5605, 1.8776, 1.8984, 2.3593])

    .. seealso:: lsf2poly, poly2rc, poly2qc, rc2is
    """

    #Line spectral frequencies are not defined for complex polynomials.

    # Normalize the polynomial

    a = numpy.array(a)
    if a[0] != 1:
        a/=a[0]

    if max(numpy.abs(numpy.roots(a))) >= 1.0:
        error('The polynomial must have all roots inside of the unit circle.');


    # Form the sum and differnce filters

    p  = len(a)-1   # The leading one in the polynomial is not used
    a1 = numpy.concatenate((a, numpy.array([0])))
    a2 = a1[-1::-1]
    P1 = a1 - a2        # Difference filter
    Q1 = a1 + a2        # Sum Filter

    # If order is even, remove the known root at z = 1 for P1 and z = -1 for Q1
    # If odd, remove both the roots from P1

    if p%2: # Odd order
        P, r = deconvolve(P1,[1, 0 ,-1])
        Q = Q1
    else:          # Even order
        P, r = deconvolve(P1, [1, -1])
        Q, r = deconvolve(Q1, [1,  1])

    rP  = numpy.roots(P)
    rQ  = numpy.roots(Q)

    aP  = numpy.angle(rP[1::2])
    aQ  = numpy.angle(rQ[1::2])

    lsf = sorted(numpy.concatenate((-aP,-aQ)))

    return lsf
コード例 #17
0
def zplane(b,a,filename=None):
    """Plot the complex z-plane given a transfer function.
    """

    # get a figure/plot
    ax = plt.subplot(111)

    # create the unit circle
    uc = patches.Circle((0,0), radius=1, fill=False,
                        color='black', ls='dashed')
    ax.add_patch(uc)

    # The coefficients are less than 1, normalize the coeficients
    if np.max(b) > 1:
        kn = np.max(b)
        b = b/float(kn)
    else:
        kn = 1

    if np.max(a) > 1:
        kd = np.max(a)
        a = a/float(kd)
    else:
        kd = 1
        
    # Get the poles and zeros
    p = np.roots(a)
    z = np.roots(b)
    k = kn/float(kd)
    
    # Plot the zeros and set marker properties    
    t1 = plt.plot(z.real, z.imag, 'go', ms=10)
    plt.setp( t1, markersize=10.0, markeredgewidth=1.0,
              markeredgecolor='k', markerfacecolor='g')

    # Plot the poles and set marker properties
    t2 = plt.plot(p.real, p.imag, 'rx', ms=10)
    plt.setp( t2, markersize=12.0, markeredgewidth=3.0,
              markeredgecolor='r', markerfacecolor='r')

    ax.spines['left'].set_position('center')
    ax.spines['bottom'].set_position('center')
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)

    # set the ticks
    r = 1.5; plt.axis('scaled'); plt.axis([-r, r, -r, r])
    ticks = [-1, -.5, .5, 1]; plt.xticks(ticks); plt.yticks(ticks)

    if filename is None:
        plt.show()
    else:
        plt.savefig(filename)
    

    return z, p, k
コード例 #18
0
def InitializeCalculations(output):
    """This functions calculates parameters from the input"""
    # SYSTEM
    if output['series'] is False:	
    	output['system'] = 'F' + "{0:.2f}".format(output['BASEfraction']).replace('.','_') + 'D1_8V' + \
                           '0'*(len(str(output['version']))%2) + str(output['version'])
        output['systemfolder'] = output['system']
    # BASE PARTICLES	
    output['BASEparticlevolume'] = (4.0/3.0) * np.pi * (output['BASEsigmacorrected']/2.0)**3	
    output['BASEdiffusion'] = (output['ReferenceT']*output['Boltzmann']) / (6.0*np.pi*output['viscosity']*(0.5*output['BASEsigma']))
    output['BASEtau'] = (output['BASEsigma']**2) / output['BASEdiffusion']	
    output['eta'] = output['viscosity']*(output['BASEsigma']**3)/(output['Boltzmann']*output['ReferenceT']*output['BASEtau'])
    output['BASEfriction'] = 0.5*6.0*output['BASEsigmacorrected']*np.pi*output['eta'];
    if output['series'] is False:
        if output['basegenerator'] == 'Lbox' or output['InitialConfiguration'] == 'random':
            output['BOXvolume'] = output['BOXsize_x']*output['BOXsize_y']*output['BOXsize_z']
            TotalParticleVolume = output['BOXvolume'] * output['BASEfraction']
            output['BASEparticles'] = int(np.round(TotalParticleVolume / output['BASEparticlevolume'],0))
        elif output['InitialConfiguration'] == 'BCC' or output['InitialConfiguration'] == 'FCC':	
            output['BOXsize'] = (output['BASEparticlevolume']*output['BASEparticles']/output['BASEfraction'])**(1/3.0)
            output['BOXsize_x'] = output['BOXsize']
            output['BOXsize_y'] = output['BOXsize']
            output['BOXsize_z'] = output['BOXsize']
            output['BOXvolume'] = output['BOXsize_x']*output['BOXsize_y']*output['BOXsize_z']			
    if output['InitialConfiguration'] == 'BCC' and output['basegenerator'] == 'Nparticles':
        roots = np.roots([2,0,0,-1.0*output['BASEparticles']])[::-1] # Nparticles of the basis crystal along one axis
        m = roots[0]
        output['m'] = int(round(m.real,0))
        output['n'] = output['m']-1 # number of unit cells
        output['a'] = output['BOXsize'] / output['m']
    elif output['InitialConfiguration'] == 'FCC': 
        roots = np.roots([4,-6,3,-output['BASEparticles']]) # number of particles of the basis crystal along one axis
        m = roots[0]
        output['m'] = int(round(m.real,0))
        output['n'] = output['m']-1 # number of unit cells
        output['a'] = output['BOXsize'] / output['m']
    elif output['InitialConfiguration'] == 'BCC' and output['basegenerator'] == 'Lbox':
        # for a box with ratio's x,y = 1 and z = 0.5
        output['m'] = int(np.round(np.power(output['BASEparticles'],(1/3.0)),0))
        output['n'] = output['m']-1 # number of unit cells
        output['a'] = output['BOXsize'] / output['m']
    if output['method'] == 'wigner':
        output['ALLparticles'] = output['BASEparticles'] 
    # INTERSTITIALS
    elif output['method'] == 'interstitial':		
        output['INTERsigmacorrected'] = output['INTERsigma'] / output['BASEsigma']	
        output['INTERfriction'] = 0.5*6.0*output['INTERsigmacorrected']*np.pi*output['eta']
        output['INTERdiffusion'] = (output['ReferenceT']*output['Boltzmann']) / (6.0*np.pi*output['viscosity']*(0.5*output['INTERsigma']))
        output['INTERtau'] = (output['INTERsigma']**2) / output['INTERdiffusion']
        output['IBepsilon'] = 0.5*(output['BASEepsilon'] + output['INTERepsilon'])
        output['IBkappa'] = 0.5*(output['BASEkappa'] + output['INTERkappa'])
        if output['InitialConfiguration'] == 'BCC':
            output['INTERsites'] = 12*output['m']*output['n']*output['n']
            output['INTERparticles'] = int(output['INTERsites']*output['INTERfraction'])
        output['ALLparticles'] = output['BASEparticles'] + output['INTERparticles']
    return output
コード例 #19
0
ファイル: plot_thermal.py プロジェクト: tdengg/pylastic
	def free_ene(self, trange):
		self.get_data()
		#self.get_E0()
		#ax = plt.subplot(111)
		ndic = collections.OrderedDict(sorted(self.__dic.items()))	#sort dictionary
		minF = []
		minl = []
		for temp in trange:
			xdata = []
			ydata = []
			i=0
			for out in ndic:
				xdata.append(out)
				ind = self.__dic[out]['T'].index(temp)
				
				ydata.append(self.__dic[out]['F'][ind]/self.conv + self.__E0[i] + 13.5)
				i+=1
			#polyfit:
			coeff = np.polyfit(xdata,ydata,3)
			p = np.poly1d(coeff)
			polyx = np.linspace(min(xdata),max(xdata),1000)
			
			#ax.plot(xdata,ydata,'+')
			#ax.plot(polyx,p(polyx))
			minl.append(np.roots(p.deriv())[1])
			minF.append(p(np.roots(p.deriv())[1]))
			
			#polyfit F-T
			coeff = np.polyfit(minl,minF,21)
			p = np.poly1d(coeff)
			polyx = np.linspace(min(minl),max(minl),1000)
			
			#ax.plot(polyx,p(polyx))
			#ax.plot(minl,minF,'o')
		
		
		
		
		#polyfit thermal expansion:
		
		coeff = np.polyfit(trange,minl,4)
		p = np.poly1d(coeff)
		polyx = np.linspace(min(trange),max(trange),1000)
		
		#ax1 = plt.plot(polyx,p(polyx))
		#ax1 = plt.plot(trange,minl,'o')
		
		#thermal expansion/T
		
		alpha = []
		for i in range(len(trange)-1):
			alpha.append((minl[i+1]-minl[i])/(trange[i+1]-trange[i])/minl[0]*10**6.)
		plt.plot(trange[:-1], alpha, label = self._lname, lw=2., ls=self.style)
		self.__alpha = alpha
		self.__minl = minl
		self.__minF = minF
コード例 #20
0
ファイル: filtertool.py プロジェクト: zinka/arraytool
def dual_band(F, P, E, eps, eps_R=1, x1=0.5, map_type=1):
    r"""
    Function to give modified F, P, and E polynomials after lowpass 
    prototype dual band transformation.
    
    :param F:       Polynomial F, i.e., numerator of S11 (in s-domain)
    :param P:       Polynomial P, i.e., numerator of S21 (in s-domain)
    :param E:       polynomial E is the denominator of S11 and S21 (in s-domain)
    :param eps:     Constant term associated with S21
    :param eps_R:   Constant term associated with S11
    :param x1:
    :param map_type:
                  
    :rtype:    
    """

    if (map_type == 1) or (map_type == 2):

        s = sp.Symbol("s")
        if map_type == 1:
            a = -2j / (1 - x1 ** 2)
            b = -1j * (1 + x1 ** 2) / (1 - x1 ** 2)
        elif map_type == 2:
            a = 2j / (1 - x1 ** 2)
            b = 1j * (1 + x1 ** 2) / (1 - x1 ** 2)
        s1 = a * s ** 2 + b

        F = sp.Poly(F.ravel().tolist(), s)
        F1 = sp.simplify(F.subs(s, s1))
        F1 = sp.Poly(F1, s).all_coeffs()
        F1 = I_to_i(F1)

        E = sp.Poly(E.ravel().tolist(), s)
        E1 = sp.simplify(E.subs(s, s1))
        E1 = sp.Poly(E1, s).all_coeffs()
        E1 = I_to_i(E1)

        P = sp.Poly(P.ravel().tolist(), s)
        P1 = sp.simplify(P.subs(s, s1))
        P1 = sp.Poly(P1, s).all_coeffs()
        P1 = I_to_i(P1)

    elif map_type == 3:

        F_roots = np.roots(F.ravel().tolist())
        P_roots = np.roots(P.ravel().tolist())
        F1_roots = Lee_roots_map(F_roots, x1)
        P1_roots = Lee_roots_map(P_roots, x1)
        P1_roots = np.concatenate((P1_roots, np.array([0, 0])))
        F1 = np.poly(F1_roots)
        P1 = np.poly(P1_roots)
        F1 = np.reshape(F1, (len(F1), -1))
        P1 = np.reshape(P1, (len(P1), -1))
        E1 = poly_E(eps, eps_R, F1, P1)[0]

    return F1, P1, E1
コード例 #21
0
ファイル: CARMAFast.py プロジェクト: kobyafrank/kali
def checkParams(aList=None,bList=None):
	if aList is None:
		raise ValueError('#CAR > 0')
	p = len(aList)

	if bList is None:
		raise ValueError('#CMA > 0')
	q = len(bList)-1

	hasUniqueEigenValues=1
	isStable=1
	isInvertible=1
	isNotRedundant=1
	hasPosSigma=1

	CARPoly=list()
	CARPoly.append(1.0)
	for i in xrange(p):
		CARPoly.append(aList[i])
	CARRoots=roots(CARPoly)
	#print 'C-AR Roots: ' + str([rootVal for rootVal in CARRoots])
	if (len(CARRoots)!=len(set(CARRoots))):
		hasUniqueEigenValues=0
	for CARRoot in CARRoots:
		if (CARRoot.real>=0.0):
			isStable=0
	#print 'isStable: %d'%(isStable)

	isInvertible=1
	CMAPoly=list()
	for i in xrange(q + 1):
		CMAPoly.append(bList[i])
	CMAPoly.reverse()
	CMARoots=roots(CMAPoly)
	#print 'C-MA Roots: ' + str([rootVal for rootVal in CMARoots])
	if (len(CMARoots)!=len(set(CMARoots))):
		uniqueRoots=0
	for CMARoot in CMARoots:
		if (CMARoot>0.0):
			isInvertible=0
	#print 'isInvertible: %d'%(isInvertible)

	isNotRedundant=1
	for CARRoot in CARRoots:
		for CMARoot in CMARoots:
			if (CARRoot==CMARoot):
				isNotRedundant=0

	if (bList[0] <= 0.0):
		hasPosSigma = 0

	retVal = isStable*isInvertible*isNotRedundant*hasUniqueEigenValues*hasPosSigma
	#print 'retVal: %d'%(retVal)

	return retVal
コード例 #22
0
ファイル: PolyCalibration.py プロジェクト: sebalander/sebaPhD
def radialUndistort(rpp, k, quot=False, der=False):
    '''dqD
    takes distorted radius and returns the radius undistorted
    optioally it returns the undistortion quotient rp = rpp * q
    '''
    # polynomial coeffs, grade 7
    # # (k1,k2,p1,p2[,k3[,k4,k5,k6[,s1,s2,s3,s4[,τx,τy]]]])
    # polynomial coeffs, grade 7
    # # (k1,k2,p1,p2[,k3[,k4,k5,k6[,s1,s2,s3,s4[,τx,τy]]]])
    k.shape = -1
    poly = [[k[4], # k3
             0,
             k[1], # k2
             0,
             k[0], # k1
             0,
             1,
             -r] for r in rpp]
    
    # calculate roots
    rootsPoly = array([roots(p) for p in poly])
    
    # return flag, True if there is a suitable (real AND positive) solution
    rPRB = isreal(rootsPoly) & (0 <= real(rootsPoly))  # real Positive Real Bool
    retVal = any(rPRB, axis=1)
    
    rp = empty_like(rpp)
    
    if any(~ retVal): # if at least one case of non solution
        # calculate extrema of polyniomial
        rExtrema = roots([7*k[4], 0, 5*k[1], 0, 3*k[0], 0, 1])
        # select real extrema in positive side, keep smallest
        rRealPos = min(rExtrema.real[isreal(rExtrema) & (0<=rExtrema.real)])
        # assign to problematic values
        rp[~retVal] = rRealPos
    
    # choose minimum positive roots
    rp[retVal] = [min(rootsPoly[i, rPRB[i]].real)
                   for i in arange(rpp.shape[0])[retVal]]
    
    if der:
        # derivada de la directa
        q, dQdP, dQdK = radialDistort(rp, k, der=True)

        if quot:
            return q, retVal, dQdP, dQdK
        else:
            return rp, retVal, dQdP, dQdK
    else:
        if quot:
            return rp / rpp, retVal
        else:
            return rp, retVal
コード例 #23
0
def radialUndistort(rpp, k, quot=False, der=False):
    '''
    takes distorted radius and returns the radius undistorted
    optionally it returns the distortion quotioent rpp = rp * q
    '''
    # polynomial coeffs, grade 7
    # # (k1,k2,p1,p2[,k3[,k4,k5,k6[,s1,s2,s3,s4[,τx,τy]]]])
    # polynomial coeffs, grade 7
    # # (k1,k2,p1,p2[,k3[,k4,k5,k6[,s1,s2,s3,s4[,τx,τy]]]])
    
    k.shape = -1
    poly = [[k[3], 0, k[2], 0, k[1], 0, k[0], 0, 1, -r] for r in rpp]
    
    # calculate roots
    rootsPoly = array([roots(p) for p in poly])
    
    # return flag, True if there is a suitable (real AND positive) solution
    rPRB = isreal(rootsPoly) & (0 <= real(rootsPoly))  # real Positive Real Bool
    retVal = any(rPRB, axis=1)
    
    thetap = empty_like(rpp)
    
    if any(~retVal): # if at least one case of non solution
        # calculate extrema of polyniomial
        thExtrema = roots([9*k[3], 0, 7*k[2], 0, 5*k[1], 0, 3*k[0], 0, 1])
        # select real extrema in positive side, keep smallest
        thRealPos = min(thExtrema.real[isreal(thExtrema) & (0<=thExtrema.real)])
        # assign to problematic values
        thetap[~retVal] = thRealPos
    
    # choose minimum positive roots
    thetap[retVal] = [min(rootsPoly[i, rPRB[i]].real)
                   for i in arange(rpp.shape[0])[retVal]]
    
    rp = abs(tan(thetap))  # correct negative values
    # if theta angle is greater than pi/2, retVal=False
    retVal[thetap >= pi/2] = False
    
    if der:
        # derivada de la directa
        q, dQdP, dQdK = radialDistort(rp, k, quot, der)
        
        if quot:
            return q, retVal, dQdP, dQdK
        else:
            return rp, retVal, dQdP, dQdK
    else:
        if quot:
            # returns q
            return rpp / rp, retVal
        else:
            return rp, retVal
コード例 #24
0
def intersect_point(s1, s2, s3, r):

    if s1[2] == s2[2] == s3[2]:
        a = 2*(s3[0] - s1[0])
        b = 2*(s3[1] - s1[1])
        c = r[0]**2 - r[2]**2 - s1[0]**2 - s1[1]**2 + s3[0]**2 + s3[1]**2
        d = 2*(s3[0] - s2[0])
        e = 2*(s3[1] - s2[1])
        f = r[1]**2 - r[2]**2 - s2[0]**2 - s2[1]**2 + s3[0]**2 + s3[1]**2

        t1 = (a*e - b*d)

        x = (c*e - b*f) / t1
        y = (a*f - c*d) / t1

        da = 1
        db = -2*s1[2]
        dc = s1[2]**2 - r[0]**2 + (x - s1[0])**2 + (y - s1[1])**2

        z = np.roots([da, db, dc])

        return np.array([x, x]), np.array([y, y]), z

    else:
        d31 = s3 - s1
        d32 = s3 - s2

        n1 = np.dot(s1, s1)
        n2 = np.dot(s2, s2)
        n3 = np.dot(s3, s3)

        q = r[0]**2 - r[2]**2 - n1 + n3
        p = r[1]**2 - r[2]**2 - n2 + n3

        t1 = d31[0]*d32[2] - d31[2]*d32[0]
        t2 = 2*d32[2]

        a4 = (d31[2]*d32[1] - d31[1]*d32[2]) / t1
        a5 = -(d31[2]*p - d32[2]*q) / (2*t1)
        a6 = (-2*d32[0]*a4 - 2*d32[1]) / t2
        a7 = (p - 2*d32[0]*a5) / t2

        a = a4**2 + 1 + a6**2
        b = 2*a4*(a5 - s1[0]) - 2*s1[1] + 2*a6*(a7 - s1[2])
        c = a5*(a5 - 2*s1[0]) + a7*(a7 - 2*s1[2]) + n1 - r[0]**2

        y = np.roots([a, b, c])
        x = a4*y+a5
        z = a6*y+a7

        return x, y, z
コード例 #25
0
	def projection(self, x_start, y_start, x_tmp, y_tmp, x_end, y_end, lc):
		"""Computes the coordinates of a point
		by projection onto the segment [(x_tmp, y_tmp), (x_end, y_end)]
		such that the distance between (x_start, y_start) and the new point
		is the given cahracteristic length.

		Arguments
		---------
		x_start, y_start -- coordinates of the starting point.
		x_tmp, y_tmp -- coordinates of the intermediate point.
		x_end, y_end -- coordinates of the ending point.
		lc -- characteristic length.

		Returns
		-------
		x_target, y_target -- coordinates of the projected point.
		"""
		tol = 1.0E-06
		if abs(y_end-y_tmp) >= tol:
			# solve for y
			# coefficients of the second-order polynomial
			a = (x_end-x_tmp)**2 + (y_end-y_tmp)**2
			b = 2.0*( (x_end-x_tmp)*( y_tmp*(x_start-x_end) 
									+ y_end*(x_tmp-x_start) ) 
					- y_start*(y_end-y_tmp)**2 )
			c = (y_start**2-lc**2)*(y_end-y_tmp)**2 \
				+ (y_tmp*(x_start-x_end) + y_end*(x_tmp-x_start))**2
			# solve the second-order polynomial: ay^2 + by + c = 0
			y = np.roots([a, b, c])
			# test if the point belongs to the segment
			test = (y_tmp <= y[0] <= y_end or y_end <= y[0] <= y_tmp)
			y_target = (y[0] if test else y[1])
			x_target = x_tmp + (x_end-x_tmp)/(y_end-y_tmp)*(y_target-y_tmp)
		else:
			# solve for x
			# coefficients of the second-order polynomial
			a = (x_end-x_tmp)**2 + (y_end-y_tmp)**2
			b = 2.0*( (x_end-x_tmp)*(y_tmp-y_start)*(y_end-y_tmp) 
					 - x_start*(x_end-x_tmp)**2 
					 - x_tmp*(x_end-x_tmp)**2 )
			c = (x_end-x_tmp)**2*((y_tmp-y_start)**2+x_start**2-lc**2) \
				+ x_tmp**2*(y_end-y_tmp)**2 \
				- 2*x_tmp*(x_end-x_tmp)*(y_tmp-y_start)*(y_end-y_tmp)
			# solve the second-order polynomial: ax^2 + bx + c = 0
			x = np.roots([a, b, c])
			# test if the point belongs to the segment
			test = (x_tmp <= x[0] <= x_end or x_end <= x[0] <= x_tmp)
			x_target = (x[0] if test else x[1])
			y_target = y_tmp + (y_end-y_tmp)/(x_end-x_tmp)*(x_target-x_tmp)
		return x_target, y_target
コード例 #26
0
def critical_point(beta, gamma_init, ctrl_max, tol_crit, bm_value):
    g = gamma_init
    coeff_x = [1, 3 * g - 1, beta ** 2 * (1 + g) ** 4 + 3 * g * (g - 1), (g - 3) * g ** 2, -g ** 3]
    r = np.roots(coeff_x)
    for i in range(len(r)):
        if r[i] == np.conjugate(r[i]):
            r[i] = r[i].real
            if (r[i] > 0) & (r[i] < 1):
                a = r[i]
                a = a.real
                break
    ctrl = 0
    db = np.abs(balance(a, g, beta))
    dder = np.abs(balance_der(a, g, beta))
    while (db > tol_crit) | (dder > tol_crit):
        if (
            ctrl < ctrl_max
        ):  ##Une condition servant à éviter d'être coincé indéfiniment au cas où la récursion ne fonctionnerait pas.
            coeff_g = [
                beta ** 2,
                2 * beta ** 2 * (2 * a - 1) - (1 - a) * alpha(a),
                beta ** 2 * (2 * a - 1) ** 2 - a * (1 - a) * alpha(a),
            ]
            r_g = np.roots(coeff_g)
            for i in range(len(r_g)):
                if r_g[i] == np.conjugate(r_g[i]):
                    r_g[i] = r_g[i].real
                    if (r_g[i] > 0) & (r_g[i] < 1):
                        g = r_g[i]
                        g = g.real
                        break
            coeff_x = [1, 3 * g - 1, beta ** 2 * (1 + g) ** 4 + 3 * g * (g - 1), (g - 3) * g ** 2, -g ** 3]
            r = np.roots(coeff_x)
            for i in range(len(r)):
                if r[i] == np.conjugate(r[i]):
                    r[i] = r[i].real
                    if (r[i] > 0) & (r[i] < 1):
                        a = r[i]
                        a = a.real
                        break
            db = np.abs(balance(a, g, beta))
            dder = np.abs(balance_der(a, g, beta))
            g_look = g
            ctrl += 1
        else:
            break
    if both_mechs == 1:
        g = np.sqrt(1 + g) - 1

    return g, db, dder
コード例 #27
0
def Poly_Zeros_T(Poly_z_K, Poly_p_K, Poly_z_G, Poly_p_G):
    """Given the polynomial expansion in the denominator and numerator of the controller function K and G
    then this function return s the poles and zeros of the closed loop transfer function in terms of reference signal

    the arrays for the input must range from the highest order of the polynomial to the lowest"""

    Poly_z = numpy.polymul(Poly_z_K, Poly_z_G)
    Poly_p = numpy.polyadd(numpy.polymul(Poly_p_K, Poly_z_G), numpy.polymul(Poly_p_K, Poly_p_G))

    # return the poles and zeros of T
    Zeros = numpy.roots(Poly_z)
    Poles = numpy.roots(Poly_p)

    return Poles, Zeros
コード例 #28
0
ファイル: gfiltpak.py プロジェクト: gnarayan81/PythonMath
def gtf2zp(B, A = np.array([1])):
	k = 1.0
	if B[0] != 1:
		k *= B[0]
		B /= k
	
	if A[0] != 1:
		k /= A[0]
		A /= k
	
	z = np.roots(B)
	p = np.roots(A)
	
	return z, p, k
コード例 #29
0
ファイル: shock.py プロジェクト: jadelord/caeroc
    def get_M_1(self, M_2=None, p2_p1=None, rho2_rho1=None, T2_T1=None,
            p02_p01=None, p2_p01=None, **kwargs):
        """
        Computes Mach number when one of the arguments are specified

        """
        try:
            g = self.gamma
        except KeyError:
            g = kwargs['gamma']

        if p2_p1 is not None:
            M_1 = np.sqrt((p2_p1 - 1) * (g + 1.) / 2. /  g + 1.)
        elif rho2_rho1 is not None:
            M_1 = np.sqrt(2. * rho2_rho1 / (g + 1. - rho2_rho1 * (g - 1.)))
        elif T2_T1 is not None:
            a = 2. * g * (g - 1.)
            b = 4. * g - (g - 1.) * (g - 1.)- T2_T1 * (g + 1.) * (g + 1.)
            c = -2. * (g - 1.)
            M_1, M_11 = np.roots([a, b, c])
        elif p02_p01 is not None:
            raise NotImplementedError
        elif p2_p01 is not None:
            raise NotImplementedError
        elif 'M' in kwargs.keys():
            return kwargs['M']
        else:
            logger.error('Insufficient data to calculate Mach number')

        return M_1
コード例 #30
0
ファイル: curvefit.py プロジェクト: kwheeler27/smbanalyze
def MMS(F, Lp, Lc, F0, K):
    "Modified Marko-Siggia model as a function of force"
    f = float(F - F0) * Lp / kT(parameters["T"])
    inverted_roots = roots([1.0, f - 0.75, 0.0, -0.25])
    root_index = int(f >= 0.75) * 2
    root_of_inverted_MS = real(inverted_roots[root_index])
    return Lc * (1 - root_of_inverted_MS + (F - F0) / float(K))
コード例 #31
0
                  0.0, -3 * P.m1 * P.g / 4 / (.25 * P.m1 + P.m2),
                  -P.b / (.25 * P.m1 + P.m2), 0.0
              ],
              [
                  0.0,
                  3 * (P.m1 + P.m2) * P.g / 2 / (.25 * P.m1 + P.m2) / P.ell,
                  3 * P.b / 2 / (.25 * P.m1 + P.m2) / P.ell, 0.0
              ]])
B = np.array([[0.0], [0.0], [1 / (.25 * P.m1 + P.m2)],
              [-3.0 / 2 / (.25 * P.m1 + P.m2) / P.ell]])

C = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]])

# gain calculation
wn_th = 2.2 / tr_theta  # natural frequency for angle
wn_z = 2.2 / tr_z  # natural frequency for position
des_char_poly = np.convolve([1, 2 * zeta_z * wn_z, wn_z**2],
                            [1, 2 * zeta_th * wn_th, wn_th**2])
des_poles = np.roots(des_char_poly)

# Compute the gains if the system is controllable
if np.linalg.matrix_rank(cnt.ctrb(A, B)) != 4:
    print("The system is not controllable")
else:
    K = cnt.acker(A, B, des_poles)
    Cr = np.array([[1.0, 0.0, 0.0, 0.0]])
    kr = -1.0 / (Cr * np.linalg.inv(A - B @ K) @ B)

print('K: ', K)
print('kr: ', kr)
コード例 #32
0
def arma_acovf(ar, ma, nobs=10, sigma2=1, dtype=None):
    """
    Theoretical autocovariances of stationary ARMA processes

    Parameters
    ----------
    ar : array_like, 1d
        The coefficients for autoregressive lag polynomial, including zero lag.
    ma : array_like, 1d
        The coefficients for moving-average lag polynomial, including zero lag.
    nobs : int
        The number of terms (lags plus zero lag) to include in returned acovf.
    sigma2 : float
        Variance of the innovation term.

    Returns
    -------
    ndarray
        The autocovariance of ARMA process given by ar, ma.

    See Also
    --------
    arma_acf : Autocorrelation function for ARMA processes.
    acovf : Sample autocovariance estimation.

    References
    ----------
    .. [*] Brockwell, Peter J., and Richard A. Davis. 2009. Time Series:
        Theory and Methods. 2nd ed. 1991. New York, NY: Springer.
    """
    if dtype is None:
        dtype = np.common_type(np.array(ar), np.array(ma), np.array(sigma2))

    p = len(ar) - 1
    q = len(ma) - 1
    m = max(p, q) + 1

    if sigma2.real < 0:
        raise ValueError('Must have positive innovation variance.')

    # Short-circuit for trivial corner-case
    if p == q == 0:
        out = np.zeros(nobs, dtype=dtype)
        out[0] = sigma2
        return out
    elif p > 0 and np.max(np.abs(np.roots(ar))) >= 1:
        raise ValueError(NONSTATIONARY_ERROR)

    # Get the moving average representation coefficients that we need
    ma_coeffs = arma2ma(ar, ma, lags=m)

    # Solve for the first m autocovariances via the linear system
    # described by (BD, eq. 3.3.8)
    A = np.zeros((m, m), dtype=dtype)
    b = np.zeros((m, 1), dtype=dtype)
    # We need a zero-right-padded version of ar params
    tmp_ar = np.zeros(m, dtype=dtype)
    tmp_ar[:p + 1] = ar
    for k in range(m):
        A[k, :(k + 1)] = tmp_ar[:(k + 1)][::-1]
        A[k, 1:m - k] += tmp_ar[(k + 1):m]
        b[k] = sigma2 * np.dot(ma[k:q + 1], ma_coeffs[:max((q + 1 - k), 0)])
    acovf = np.zeros(max(nobs, m), dtype=dtype)
    try:
        acovf[:m] = np.linalg.solve(A, b)[:, 0]
    except np.linalg.LinAlgError:
        raise ValueError(NONSTATIONARY_ERROR)

    # Iteratively apply (BD, eq. 3.3.9) to solve for remaining autocovariances
    if nobs > m:
        zi = signal.lfiltic([1], ar, acovf[:m:][::-1])
        acovf[m:] = signal.lfilter([1],
                                   ar,
                                   np.zeros(nobs - m, dtype=dtype),
                                   zi=zi)[0]

    return acovf[:nobs]
コード例 #33
0
ファイル: System.py プロジェクト: Wardi0/Ideal-Gas
    def time_of_collision(self, object_1, object_2):
        """
        Returns the time after which these objects will collide next, returning np.infty if they will not collide
        on their current trajectories

        object_1 - int type value representing the index of the colliding particle in self.particles
        object_2 - int or str type value representing the index of the colliding particle in self.particles or the
                   dimension of the constraining wall
        """
        particle_1 = self.particles[object_1]

        # Check if object_2 is one of the container walls
        if type(object_2) == str:
            dimension, side = object_2.split('.')
            dimension = int(dimension) - 1
            if side == 'Min':
                coordinate = particle_1.radius
            else:
                coordinate = self.box[dimension] - particle_1.radius
            # In case the particle is already touching the wall and has 0 velocity
            if coordinate == particle_1.position[dimension]:
                return 0
            # To avoid division by zero errors
            elif particle_1.velocity[dimension] == 0:
                return np.infty

            else:
                time = (coordinate - particle_1.position[dimension]
                        ) / particle_1.velocity[dimension]
            if time > 0:
                return time
            else:
                return np.infty

        else:
            particle_2 = self.particles[object_2]

            # Check if both particles have 0 velocity
            if particle_1.velocity.magnitude(
            ) == 0 and particle_2.velocity.magnitude() == 0:
                return np.infty

            velocity_difference = particle_2.velocity - particle_1.velocity
            position_difference = particle_2.position - particle_1.position

            # Define the quadratic coefficients to find the collision time
            a = 0
            b = 0
            c = -(particle_1.radius + particle_2.radius)**2
            for D in range(self.dimensions):
                a += velocity_difference[D]**2
                b += 2 * velocity_difference[D] * position_difference[D]
                c += position_difference[D]**2
            roots = np.roots([a, b, c])

            # Find the smallest positive and real root if applicable
            if type(roots[0]) == np.float64:
                if (roots[0] < 0) != (roots[1] < 0):
                    return max(roots)
                elif (roots[0] > 0) and (roots[1] > 0):
                    return min(roots)
            return np.infty
コード例 #34
0
@author: Hrithik
"""

import numpy as np
from scipy import signal
import matplotlib.pyplot as plt

#if using termux
import subprocess
import shlex
#end if

gain = 50
num = [0, 0, 50, 150]
char_eq = [1, 6, 58, 150]
zeros = np.roots(num)
poles = np.roots(char_eq)

gain_2 = (poles[0]) * (poles[1])  # second order approximated transfer function
num_2 = [1]
zeros_2 = np.roots(num_2)
poles_2 = np.array([poles[0], poles[1]])

system = signal.lti(zeros, poles, np.array([gain]))
system_2 = signal.lti(zeros_2, poles_2, np.array([gain_2]))

T, yout = signal.step(system)  # step response without approximation
T_2, yout_2 = signal.step(system_2)  # step response with approximation

plt.plot(T, yout, label="Without approximation")
plt.plot(T_2, yout_2, 'g', label="With approximation")
コード例 #35
0
def CreateBoundaryNew(h, n):
    coeff = [1.5, 0.0, h, 1.0]
    a = np.roots(coeff)
    x = numpy.linspace(-max(a) * 0.999, max(a) * 0.999, int(n))
    x_ret, y_ret = CreateBoundary(x, h)
    return x_ret, y_ret
コード例 #36
0
ファイル: regression.py プロジェクト: leetoby1215/mbed12
import matplotlib.pyplot as plt
import numpy as np

Ts = 30
# signal interval
end = 150
# signal end point
n = int(end / Ts) + 1
x = np.linspace(0, end, num=n)  # signal vector

#TODO: revise this array to your results
y = np.array([0.000, 5.582, 10.926, 14.355, 16.189, 16.827])  # speed vector
z = np.polyfit(x, y,
               2)  # Least squares polynomial fit, and return the coefficients.

goal = 0  # if we want to let the servo run at 7 cm/sec
# equation : z[0]*x^2 + z[1]*x + z[2] = goal
z[2] -= goal  # z[0]*x^2 + z[1]*x + z[2] - goal = 0
result = np.roots(
    z)  # Return the roots of a polynomial with coefficients given

# output the correct one
if (0 <= result[0]) and (result[0] <= end):
    print(result[0])
else:
    print(result[1])
コード例 #37
0
import numpy as np
import matplotlib.pyplot as plt
coeffs = [1, -9, 20]  #given condition x*x-9x+20<=0
M = np.roots(coeffs)  #s2<x<s1
print(np.roots(coeffs))
s1 = M.T @ np.array([1, 0])  #s1=5
s2 = M.T @ np.array([0, 1])  #s2=4
cur_x = 1  # The algorithm starts at x=4
gamma = 0.01  # step size multiplier
precision = 0.00001
previous_step_size = 1
max_iters = 10000  # maximum number of iterations
iters = 0  #iteration counter

df = lambda x: 6 * x**2 - 30 * x + 36
while (previous_step_size > precision) & (iters < max_iters):
    prev_x = cur_x
    cur_x += gamma * df(prev_x)
    previous_step_size = abs(cur_x - prev_x)
    iters += 1

print("f'(x)=0 at", cur_x)
k1 = cur_x

cur_x = s2  # The algorithm starts at x=1
gamma = 0.01  # step size multiplier
precision = 0.00001
previous_step_size = 1
max_iters = 10000  # maximum number of iterations
iters = 0  #iteration counter
コード例 #38
0
def analysis(data_org, col_name):
    """
    """
    y_org = data_org[col_name]
    # =============================================================================
    # Find Peak(s)
    # =============================================================================
    peaks, properties = find_peaks(y_org, width=10)
    gutter, properties_gutter = find_peaks(-y_org, width=2)

    img_save_path = './imgs/plot_with_peak_' + col_name + '.png'
    plt.figure()
    plt.plot(y_org)
    plt.plot(peaks, y_org[peaks], "x")
    plt.plot(np.zeros_like(y_org), "--", color="gray")

    plt.savefig(img_save_path, dpi=300)
    print('')
    print(f"Image saved to: \"{img_save_path}\"")
    # plt.show()
    plt.close()

    x_arr = np.array(x)

    peaks_index = peaks
    # peak_x = x[peaks_index[0]]
    peak_x = x_arr[peaks].mean()
    print(f"peak_x: {peak_x}")

    # =============================================================================
    # Poly Fit
    # =============================================================================
    deg = 10
    z = np.polyfit(x, y_org, deg, rcond=None, full=False, w=None, cov=False)

    p1 = np.poly1d(z)
    # print(f"p1: {p1}")
    y_fit = p1(x)

    z2 = np.polyfit(x, y_org, 1, rcond=None, full=False, w=None, cov=False)
    p2 = np.poly1d(z2)
    # print(f"p2: {p2}")
    y_line = p2(x)

    pp1 = np.polynomial.Polynomial.fit(x, y_org, deg)
    pp2 = np.polynomial.Polynomial.fit(x, y_org, 1)
    x_e = (pp1 - pp2).roots()

    x_e_gt_peak = x_e[(x_e > peak_x)]
    x_e_lt_peak = x_e[(x_e < peak_x)]

    # print("\n***"*3)
    # print(f"x_e_gt_peak: {x_e_gt_peak}")
    # print(f"x_e_lt_peak: {x_e_lt_peak}")
    # print("\n***"*3)

    x_left = x_e_lt_peak[-1]
    x_right = x_e_gt_peak[0]

    print(f"\nleft: {x_left}, peak: {peak_x}, right{x_right}")

    ## Get polynomial changing points
    # y = np.polyval(p1, x)
    Q = np.polyder(p1)  # f'
    xs = np.roots(Q)  # get the root of polynomial
    xs = xs[(xs > x[0]) & (xs < x[-1])]

    # print("\n********"*3)
    # print(f"  Diff 2nd: {y_d2}")
    # print(f"  xs: {xs}")

    Q2 = np.polyder(Q)  # f''
    y_d2 = np.polyval(Q2, xs)
    # is_gutter = [y_d2 > 0]
    is_gutter = np.array(y_d2 > 0)
    print(f"is_gutter: {is_gutter}")

    xs = np.array(xs)
    # xs = xs[tuple(is_gutter)]
    xs = xs[is_gutter]

    x_gutter_gt_peak = xs[(xs > peak_x)][0]
    x_gutter_lt_peak = xs[(xs < peak_x)][-1]

    xs = [x_gutter_lt_peak, x_gutter_gt_peak]
    ys = np.polyval(p1, xs)

    z_support = np.polyfit(xs,
                           ys,
                           1,
                           rcond=None,
                           full=False,
                           w=None,
                           cov=False)
    p_support = np.poly1d(z_support)
    y_support = p_support(x)

    # =============================================================================
    # Plot
    # =============================================================================
    img_save_path = './imgs/poly_fit_' + col_name + '.png'
    plt.figure()
    plt.plot(x, y_org, '*', label='original values')
    plt.plot(x, y_fit, 'r', label='polyfit values')
    plt.plot(x, y_line, 'yellow', label='fit line')
    plt.plot(x, y_support, 'grey', label='support line')
    plt.plot(xs, ys, "ro")
    plt.legend()

    plt.savefig(img_save_path, dpi=300)
    print('')
    print(f"Image saved to: \"{img_save_path}\"")
    # plt.show()
    plt.close()

    # =============================================================================
    # Calculate Peak Height
    # =============================================================================
    peaks_value = np.array(y_org[peaks])
    peaks_value_fit = np.array(y_fit[peaks])
    support_line_value = y_support[peaks_index]

    peak_height_org = peaks_value - support_line_value
    peak_height_fit = peaks_value_fit - support_line_value
    print("")
    print("----------------------------------------------")
    print(f"Peak Height (Orginal Data): {peak_height_org}")
    print(f"Peak Height (Curve Fitted Data): {peak_height_fit}")
    print("----------------------------------------------")

    cor1 = [xs[0], ys[0]]
    cor2 = [xs[1], ys[1]]
    cor3 = [x[peaks[0]], y_org[peaks[0]]]

    # a = distance(cor1, cor2)
    # b = distance(cor1, cor3)
    # c = distance(cor2, cor3)
    peak_area = calc_area(cor1, cor2, cor3)

    print("")
    print("----------------------------------------------")
    print(f"Peak Area: {peak_area}")
    print("----------------------------------------------")

    return peak_height_org, peak_height_fit, peak_area
コード例 #39
0
plano = 200

r = 300

k = np.array([255, 141, 0])

amb = np.array([85,47,0])

L = np.array([-1,-1,-0.5])

scene = Image.new('RGB', size, color = 'black')

for i in range(size[0]):
	for j in range(size[1]):
		sigma = np.array([i,j,plano] - o)
		coef = [np.linalg.norm(sigma)*np.linalg.norm(sigma),2*np.dot(sigma,o-c),np.linalg.norm(o-c)*np.linalg.norm(o-c) - r*r]
		raiz = np.roots(coef)[1]
		if np.isreal(raiz) and raiz >= 0:
			normal = (raiz*sigma + o - c)/np.linalg.norm(raiz*sigma + o - c)
			L = L/np.linalg.norm(L)
			cos = np.dot(normal,L)
			if cos < 0:
				cos = 0
			cor = cos*k + amb
			scene.putpixel((i,j),pixcolor(cor))


imshow(scene)
show()
コード例 #40
0
ファイル: pcaGPU.py プロジェクト: Parall-UD/sallfus
def fusion_images(multispectral, panchromatic, save_image=False, savepath=None, timeCondition=True):
    end = 0
    start = 0

    #Verifica que ambas imagenes cumplan con las condiciones
    if multispectral.shape[2] == 3:
        print('The Multispectral image has '+str(multispectral.shape[2])+' channels and size of '+str(multispectral.shape[0])+'x'+str(multispectral.shape[1]))
    else:
        sys.exit('The first image is not multispectral')

    if len(panchromatic.shape) == 2:
        print(' The Panchromatic image has a size of '+str(panchromatic.shape[0])+'x'+str(panchromatic.shape[1]))
    else:
        sys.exit('The second image is not panchromatic')

    size_rgb = multispectral.shape

    # Definición del tamaño del bloque
    BLOCK_SIZE = 32


    # Convierte a float32 y separa las bandas RGB de la multispectral
    m_host = multispectral.astype(np.float32)
    r_host = m_host[:,:,0].astype(np.float32)
    g_host = m_host[:,:,1].astype(np.float32)
    b_host = m_host[:,:,2].astype(np.float32)
    size_rgb = multispectral.shape
    # Convierte la pancromatica a float32
    panchromatic_host = panchromatic.astype(np.float32)


    # Inicial el time_calculated de ejecucion
    start=time.time()

    # Se pasan los array en el host al device
    r_gpu = gpuarray.to_gpu(r_host)
    g_gpu = gpuarray.to_gpu(g_host)
    b_gpu = gpuarray.to_gpu(b_host)
    p_gpu = gpuarray.to_gpu(panchromatic_host)

    # Se calcula la media de cada una de las bandas y se forma un arreglo con estos valores, todo esto en GPU
    mean_r_gpu = misc.mean(r_gpu)
    mean_g_gpu = misc.mean(g_gpu)
    mean_b_gpu = misc.mean(b_gpu)

    # Se obtiene el numero de bandas
    n_bands = size_rgb[2]

    # Se aparta memoria en GPU
    r_gpu_subs = gpuarray.zeros_like(r_gpu,np.float32)
    g_gpu_subs = gpuarray.zeros_like(g_gpu,np.float32)
    b_gpu_subs = gpuarray.zeros_like(b_gpu,np.float32)

    # Se realiza la resta de su respectiva media a cada uno de los pixeles de cada banda,
    substract( r_gpu, mean_r_gpu.get(), r_gpu_subs)
    substract( g_gpu, mean_g_gpu.get(), g_gpu_subs)
    substract( b_gpu, mean_b_gpu.get(), b_gpu_subs)

    # Se divide cada una de las bandas después de ser restada su media, en un conjunto de submatrices cuadradas del tamaño del bloque
    r_subs_split = split(r_gpu_subs.get(),BLOCK_SIZE,BLOCK_SIZE)
    g_subs_split = split(g_gpu_subs.get(),BLOCK_SIZE,BLOCK_SIZE)
    b_subs_split = split(b_gpu_subs.get(),BLOCK_SIZE,BLOCK_SIZE)

    #Se obtiene la matrix de varianza y covarianza
    mat_var_cov = varianza_cov(r_subs_split,g_subs_split,b_subs_split)

    # Coeficiente para diaganalizar ortogonalmente
    coefficient = 1.0/((size_rgb[0]*size_rgb[1])-1)

    # Matriz diagonalizada ortogonalmente
    ortogonal_matrix = mat_var_cov*coefficient

    # Se calcula la traza de las sucesivas potencias de la matriz ortogonal inicial
    polynomial_trace = successive_powers(ortogonal_matrix)


    # Se calculan los coeficientes del polinomio caracteristico
    characteristic_polynomial = polynomial_coefficients(polynomial_trace, ortogonal_matrix)

    # Se obtienen las raices del polinomio caracteristico
    characteristic_polynomial_roots = np.roots(np.insert(characteristic_polynomial,0,1))


    # Los vectores propios aparecen en la diagonal de la matriz eigenvalues_mat
    eigenvalues_mat = np.diag(characteristic_polynomial_roots)


    # Vectores propios para cada valor propio
    eigenvectors_mat = -1*ortogonal_matrix[1:n_bands,0]

    # Se calcular los vectores propios normalizados
    # Cada vector propio es una columna de la matriz mat_ortogonal_base
    mat_ortogonal_base, q_matrix = eigenvectors_norm(eigenvalues_mat, ortogonal_matrix, eigenvectors_mat)
    q_matrix_list = q_matrix.tolist()
    q_matrix_cpu = np.array(q_matrix_list).astype(np.float32)
    w1 = q_matrix_cpu[0,:]
    w2 = (-1)*q_matrix_cpu[1,:]
    w3 = q_matrix_cpu[2,:]
    eigenvectors = np.array((w1,w2,w3))

    # Se calcula la inversa de los vectores propios
    inv_eigenvectors = la.inv(eigenvectors)
    inv_list = inv_eigenvectors.tolist()
    inv_eigenvector_cpu = np.array(inv_list).astype(np.float32)

    # Se realiza la división de las bandas en submatrices del tamaño del bloque
    r_subs_split_cp = split(r_host,BLOCK_SIZE,BLOCK_SIZE)
    g_subs_split_cp = split(g_host,BLOCK_SIZE,BLOCK_SIZE)
    b_subs_split_cp = split(b_host,BLOCK_SIZE,BLOCK_SIZE)

    # Se calculan los componentes principales con las bandas originales y los vectores propios
    pc_1,pc_2,pc_3 = componentes_principales_original(r_subs_split_cp,g_subs_split_cp,b_subs_split_cp,q_matrix_cpu,r_host.shape[0], BLOCK_SIZE)

    # Se realiza la división en submatrices de la pancromática, el componente principal 2 y 3, del tamaño del bloque,
    p_subs_split_nb = split(panchromatic_host,BLOCK_SIZE,BLOCK_SIZE)
    pc_2_subs_split_nb = split(pc_2,BLOCK_SIZE,BLOCK_SIZE)
    pc_3_subs_split_nb = split(pc_3,BLOCK_SIZE,BLOCK_SIZE)

    # Se calculan los componentes con la pancromatica, componentes principales originales 2 y 3, y la inversa de los vectores propios
    nb1,nb2,nb3 = componentes_principales_panchromartic(p_subs_split_nb,pc_2_subs_split_nb,pc_3_subs_split_nb,inv_eigenvector_cpu,r_host.shape[0], BLOCK_SIZE)

    nb11 = nb1.astype(np.float32)
    nb22 = nb2.astype(np.float32)
    nb33 = nb3.astype(np.float32)


    nb11_gpu = gpuarray.to_gpu(nb11)
    nb22_gpu = gpuarray.to_gpu(nb22)
    nb33_gpu = gpuarray.to_gpu(nb33)

    # Se separa espacio en memoria para las matrices resultado de realizar el ajuste
    nb111_gpu = gpuarray.empty_like(nb11_gpu)
    nb222_gpu = gpuarray.empty_like(nb22_gpu)
    nb333_gpu = gpuarray.empty_like(nb33_gpu)

    # Se realiza un ajuste cuando los valores de cada pixel es menor a 0, en GPU
    negative_adjustment(nb11_gpu,nb111_gpu)
    negative_adjustment(nb22_gpu,nb222_gpu)
    negative_adjustment(nb33_gpu,nb333_gpu)

    nb111_cpu = nb111_gpu.get().astype(np.uint8)
    nb222_cpu = nb222_gpu.get().astype(np.uint8)
    nb333_cpu = nb333_gpu.get().astype(np.uint8)


    end = time.time()


    fusioned_image=np.stack((nb111_cpu,nb222_cpu,nb333_cpu),axis=2);
    if(save_image):
        # Guarda la imagen resultando de acuerdo al tercer parametro establecido en la linea de ejecución del script
        if(savepath != None):
            t = skimage.io.imsave(savepath+'/pcagpu_image.tif',fusioned_image, plugin='tifffile')
        else:
            t = skimage.io.imsave('pcagpu_image.tif',fusioned_image, plugin='tifffile')
    #time_calculated de ejecución para la transformada de Brovey en GPU
    time_calculated = (end-start)
    if(timeCondition):
        return {"image": fusioned_image, "time" :  time_calculated}
    else:
        return fusioned_image
コード例 #41
0
# In[1]:

import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')

# ## Exemplo
#
# Estudamos a função $f(x) = x^2 + x - 6$.

# In[2]:

x = np.linspace(-4, 4, 50)
f = lambda x: x**2 + x - 6

xr = np.roots([1, 1, -6])
print('Raízes: x1 = {:f}, x2 = {:f}'.format(xr[0], xr[1]))

# função de iteração
g1 = lambda x: 6 - x**2

plt.plot(x, f(x), label='$f(x)$')
plt.plot(x, x, 'k--', label='$y=x$')
plt.plot(x, 0 * x, 'c-.', label='$y=0$')
plt.plot(x, g1(x), 'r--', label='$g_1(x)$')

plt.axvline(-3, -5, 10, color='m')
plt.axvline(2, -5, 10, color='m')
plt.legend(loc='best')

# ## Exemplo
コード例 #42
0
ファイル: models.py プロジェクト: bc-jmoork/SFND
import math
import numpy as np

a = 5.  # in m/sec2
v = 30.  # in km/h
d = 25.  # in m
v = 30 * 1000 / (60.0 * 60.0)
tlc_cam = math.sqrt(2 * d / (3 * a))
print(tlc_cam)

coeff = [a / 2, v, -d]
roots = (np.roots(coeff))
print(roots)
print(pow(roots[0], 2) * a / 2 + roots[0] * v - d)
print(pow(roots[1], 2) * a / 2 + roots[1] * v - d)

tlc_cvm = (d / (v))
print(tlc_cvm)
コード例 #43
0
def class_D(event_list,
            particle_keys,
            sqrt_s134,
            sqrt_s256,
            sqrt_s13,
            sqrt_s25,
            m1=0,
            m2=0,
            m3=-1,
            m4=-1,
            m5=-1,
            m6=-1):
    """
    Gets the two missing momenta for the following topology:
                   ╭────────── missing (p1)
     x1 ╲    s_13 ╱╰────────── visible (p3)
         ╲       ╱──────────── visible (p4)
          ╲     ╱ s_134
           █████
          ╱     ╲ s_256
         ╱       ╲──────────── visible (p6)
     x2 ╱    s_25 ╲╭────────── visible (p5)
                   ╰────────── missing (p2)
    m3, ..., m6 can be specified, if -1 they are calculated
    """
    solutions = []
    if not isinstance(event_list, list):
        if isinstance(event_list, Event):
            event_list = [event_list]
        else:
            raise TypeError("'event_list' should be a list of Event objects")

    for event in event_list:
        p3 = event[particle_keys[2]]
        p4 = event[particle_keys[3]]
        p5 = event[particle_keys[4]]
        p6 = event[particle_keys[5]]
        p_branches = FourMomentum(0.0, 0.0, 0.0, 0.0)
        for k in event.particles:
            if event.is_visible(k):
                p_branches += event[k]

        [event.add_particle(key, Particle(0, 1, FourMomentum())) for key in particle_keys[:2] \
            if not key in event.particles]

        m3 = set_mass(m3, p3)
        m4 = set_mass(m4, p4)
        m5 = set_mass(m5, p5)
        m6 = set_mass(m6, p6)

        s13 = sqrt_s13**2
        s25 = sqrt_s25**2
        s134 = sqrt_s134**2
        s256 = sqrt_s256**2

        R13 = (s13 - m3**2 - m1**2) / 2.0
        R25 = (s25 - m5**2 - m2**2) / 2.0
        R134 = (s134 - m1**2 - m3**2 - m4**2 - 2 * (p3 * p4)) / 2.0
        R256 = (s256 - m2**2 - m5**2 - m6**2 - 2 * (p5 * p6)) / 2.0
        R14 = R134 - R13
        R26 = R256 - R25

        # First define coefficients for vector equations:
        # p1 = a1 + b1*E1 + c1*E2 / p2 = a2 + b2*E2 + c2*E1
        # Numbers from SymPy
        PF = 1/(p3.x*p4.z*p5.y*p6.z - p3.x*p4.z*p5.z*p6.y - \
            p3.y*p4.z*p5.x*p6.z + p3.y*p4.z*p5.z*p6.x - \
            p3.z*p4.x*p5.y*p6.z + p3.z*p4.x*p5.z*p6.y + \
            p3.z*p4.y*p5.x*p6.z - p3.z*p4.y*p5.z*p6.x)
        a1 = PF*np.array(
            [-(R13*p4.z*p5.y*p6.z - R13*p4.z*p5.z*p6.y - R14*p3.z*p5.y*p6.z + \
            R14*p3.z*p5.z*p6.y + R25*p3.y*p4.z*p6.z - R25*p3.z*p4.y*p6.z - \
            R26*p3.y*p4.z*p5.z + R26*p3.z*p4.y*p5.z - p3.y*p4.z*p5.x*p6.z*p_branches.x - \
            p3.y*p4.z*p5.y*p6.z*p_branches.y + p3.y*p4.z*p5.z*p6.x*p_branches.x + \
            p3.y*p4.z*p5.z*p6.y*p_branches.y + p3.z*p4.y*p5.x*p6.z*p_branches.x + \
            p3.z*p4.y*p5.y*p6.z*p_branches.y - p3.z*p4.y*p5.z*p6.x*p_branches.x - \
            p3.z*p4.y*p5.z*p6.y*p_branches.y),
            (R13*p4.z*p5.x*p6.z - R13*p4.z*p5.z*p6.x - R14*p3.z*p5.x*p6.z + \
            R14*p3.z*p5.z*p6.x + R25*p3.x*p4.z*p6.z - R25*p3.z*p4.x*p6.z - \
            R26*p3.x*p4.z*p5.z + R26*p3.z*p4.x*p5.z - p3.x*p4.z*p5.x*p6.z*p_branches.x - \
            p3.x*p4.z*p5.y*p6.z*p_branches.y + p3.x*p4.z*p5.z*p6.x*p_branches.x + \
            p3.x*p4.z*p5.z*p6.y*p_branches.y + p3.z*p4.x*p5.x*p6.z*p_branches.x + \
            p3.z*p4.x*p5.y*p6.z*p_branches.y - p3.z*p4.x*p5.z*p6.x*p_branches.x - \
            p3.z*p4.x*p5.z*p6.y*p_branches.y),
            (R13*p4.x*p5.y*p6.z - R13*p4.x*p5.z*p6.y - R13*p4.y*p5.x*p6.z + R13*p4.y*p5.z*p6.x - \
            R14*p3.x*p5.y*p6.z + R14*p3.x*p5.z*p6.y + R14*p3.y*p5.x*p6.z - R14*p3.y*p5.z*p6.x - \
            R25*p3.x*p4.y*p6.z + R25*p3.y*p4.x*p6.z + R26*p3.x*p4.y*p5.z - R26*p3.y*p4.x*p5.z + \
            p3.x*p4.y*p5.x*p6.z*p_branches.x + p3.x*p4.y*p5.y*p6.z*p_branches.y - \
            p3.x*p4.y*p5.z*p6.x*p_branches.x - p3.x*p4.y*p5.z*p6.y*p_branches.y - \
            p3.y*p4.x*p5.x*p6.z*p_branches.x - p3.y*p4.x*p5.y*p6.z*p_branches.y + \
            p3.y*p4.x*p5.z*p6.x*p_branches.x + p3.y*p4.x*p5.z*p6.y*p_branches.y)],
            dtype=np.float
        )
        b1 = PF*np.array(
            [-(-p3.E*p4.z*p5.y*p6.z + p3.E*p4.z*p5.z*p6.y + p3.z*p4.E*p5.y*p6.z - p3.z*p4.E*p5.z*p6.y),
            -p3.E*p4.z*p5.x*p6.z + p3.E*p4.z*p5.z*p6.x + p3.z*p4.E*p5.x*p6.z - p3.z*p4.E*p5.z*p6.x,
            (-p3.E*p4.x*p5.y*p6.z + p3.E*p4.x*p5.z*p6.y + p3.E*p4.y*p5.x*p6.z - p3.E*p4.y*p5.z*p6.x + \
            p3.x*p4.E*p5.y*p6.z - p3.x*p4.E*p5.z*p6.y - p3.y*p4.E*p5.x*p6.z + p3.y*p4.E*p5.z*p6.x)],
            dtype=np.float
        )
        c1 = PF * np.array([
            -(-p3.y * p4.z * p5.E * p6.z + p3.y * p4.z * p5.z * p6.E +
              p3.z * p4.y * p5.E * p6.z - p3.z * p4.y * p5.z * p6.E),
            -p3.x * p4.z * p5.E * p6.z + p3.x * p4.z * p5.z * p6.E +
            p3.z * p4.x * p5.E * p6.z - p3.z * p4.x * p5.z * p6.E,
            (p3.x * p4.y * p5.E * p6.z - p3.x * p4.y * p5.z * p6.E -
             p3.y * p4.x * p5.E * p6.z + p3.y * p4.x * p5.z * p6.E)
        ],
                           dtype=np.float)
        a2 = PF*np.array(
            [R13*p4.z*p5.y*p6.z - R13*p4.z*p5.z*p6.y - R14*p3.z*p5.y*p6.z + R14*p3.z*p5.z*p6.y + \
            R25*p3.y*p4.z*p6.z - R25*p3.z*p4.y*p6.z - R26*p3.y*p4.z*p5.z + R26*p3.z*p4.y*p5.z - \
            p3.x*p4.z*p5.y*p6.z*p_branches.x + p3.x*p4.z*p5.z*p6.y*p_branches.x - \
            p3.y*p4.z*p5.y*p6.z*p_branches.y + p3.y*p4.z*p5.z*p6.y*p_branches.y + \
            p3.z*p4.x*p5.y*p6.z*p_branches.x - p3.z*p4.x*p5.z*p6.y*p_branches.x + \
            p3.z*p4.y*p5.y*p6.z*p_branches.y - p3.z*p4.y*p5.z*p6.y*p_branches.y,
            -(R13*p4.z*p5.x*p6.z - R13*p4.z*p5.z*p6.x - R14*p3.z*p5.x*p6.z + R14*p3.z*p5.z*p6.x + \
            R25*p3.x*p4.z*p6.z - R25*p3.z*p4.x*p6.z - R26*p3.x*p4.z*p5.z + R26*p3.z*p4.x*p5.z - \
            p3.x*p4.z*p5.x*p6.z*p_branches.x + p3.x*p4.z*p5.z*p6.x*p_branches.x - \
            p3.y*p4.z*p5.x*p6.z*p_branches.y + p3.y*p4.z*p5.z*p6.x*p_branches.y + \
            p3.z*p4.x*p5.x*p6.z*p_branches.x - p3.z*p4.x*p5.z*p6.x*p_branches.x + \
            p3.z*p4.y*p5.x*p6.z*p_branches.y - p3.z*p4.y*p5.z*p6.x*p_branches.y),
            R13*p4.z*p5.x*p6.y - R13*p4.z*p5.y*p6.x - R14*p3.z*p5.x*p6.y + R14*p3.z*p5.y*p6.x + \
            R25*p3.x*p4.z*p6.y - R25*p3.y*p4.z*p6.x - R25*p3.z*p4.x*p6.y + R25*p3.z*p4.y*p6.x - \
            R26*p3.x*p4.z*p5.y + R26*p3.y*p4.z*p5.x + R26*p3.z*p4.x*p5.y - R26*p3.z*p4.y*p5.x - \
            p3.x*p4.z*p5.x*p6.y*p_branches.x + p3.x*p4.z*p5.y*p6.x*p_branches.x - \
            p3.y*p4.z*p5.x*p6.y*p_branches.y + p3.y*p4.z*p5.y*p6.x*p_branches.y + \
            p3.z*p4.x*p5.x*p6.y*p_branches.x - p3.z*p4.x*p5.y*p6.x*p_branches.x + \
            p3.z*p4.y*p5.x*p6.y*p_branches.y - p3.z*p4.y*p5.y*p6.x*p_branches.y],
            dtype=np.float
        )
        b2 = PF * np.array([
            -p3.E * p4.z * p5.y * p6.z + p3.E * p4.z * p5.z * p6.y +
            p3.z * p4.E * p5.y * p6.z - p3.z * p4.E * p5.z * p6.y,
            -(-p3.E * p4.z * p5.x * p6.z + p3.E * p4.z * p5.z * p6.x +
              p3.z * p4.E * p5.x * p6.z - p3.z * p4.E * p5.z * p6.x),
            -p3.E * p4.z * p5.x * p6.y + p3.E * p4.z * p5.y * p6.x +
            p3.z * p4.E * p5.x * p6.y - p3.z * p4.E * p5.y * p6.x
        ],
                           dtype=np.float)
        c2 = PF*np.array(
            [-p3.y*p4.z*p5.E*p6.z + p3.y*p4.z*p5.z*p6.E + p3.z*p4.y*p5.E*p6.z - p3.z*p4.y*p5.z*p6.E,
            -(-p3.x*p4.z*p5.E*p6.z + p3.x*p4.z*p5.z*p6.E + p3.z*p4.x*p5.E*p6.z - p3.z*p4.x*p5.z*p6.E),
            -p3.x*p4.z*p5.E*p6.y + p3.x*p4.z*p5.y*p6.E + p3.y*p4.z*p5.E*p6.x - p3.y*p4.z*p5.x*p6.E + \
            p3.z*p4.x*p5.E*p6.y - p3.z*p4.x*p5.y*p6.E - p3.z*p4.y*p5.E*p6.x + p3.z*p4.y*p5.x*p6.E],
            dtype=np.float
        )

        # Enforcing the mass-shell equation for p1:
        # E1^2 = m1^2 + p1·p1
        # 0 = A*E1^2 + (B0 + B1*E2)*E1 + C0 + C1*E2 + C2*E2^2
        A = np.dot(b1, b1) - 1
        B0 = 2 * np.dot(a1, b1)
        B1 = 2 * np.dot(b1, c1)
        C0 = np.dot(a1, a1) + m1**2
        C1 = 2 * np.dot(a1, c1)
        C2 = np.dot(c1, c1)
        # 0 = D*E^2 + (F0 + F1*E1)*E2 + G0 + G1*E1 + G2*E1^2
        D = np.dot(c2, c2) - 1
        F0 = 2 * np.dot(a2, c2)
        F1 = 2 * np.dot(b2, c2)
        G0 = np.dot(a2, a2) + m2**2
        G1 = 2 * np.dot(a2, b2)
        G2 = np.dot(b2, b2)

        E2_sols = np.roots(np.nan_to_num([(-A**2*D**2 + A*(B1*D*F1 + 2*C2*D*G2 - C2*F1**2) + \
            G2*(-B1**2*D + B1*C2*F1 - C2**2*G2))/A**2,
            (-2*A**2*D*F0 + A*(B0*D*F1 + B1*D*G1 + B1*F0*F1 + 2*C1*D*G2 - C1*F1**2 + \
            2*C2*F0*G2 - 2*C2*F1*G1) + G2*(-2*B0*B1*D + B0*C2*F1 - B1**2*F0 + B1*C1*F1 + \
            B1*C2*G1 - 2*C1*C2*G2))/A**2,
            (-A**2*(2*D*G0 + F0**2) + A*(B0*D*G1 + B0*F0*F1 + B1*F0*G1 + B1*F1*G0 + \
            2*C0*D*G2 - C0*F1**2 + 2*C1*F0*G2 - 2*C1*F1*G1 + 2*C2*G0*G2 - C2*G1**2) + \
            G2*(-B0**2*D - 2*B0*B1*F0 + B0*C1*F1 + B0*C2*G1 - B1**2*G0 + B1*C0*F1 + B1*C1*G1 - \
            2*C0*C2*G2 - C1**2*G2))/A**2,
            (-2*A**2*F0*G0 + A*(B0*F0*G1 + B0*F1*G0 + B1*G0*G1 + 2*C0*F0*G2 - 2*C0*F1*G1 + \
            2*C1*G0*G2 - C1*G1**2) + G2*(-B0**2*F0 - 2*B0*B1*G0 + B0*C0*F1 + B0*C1*G1 + B1*C0*G1 - 2*C0*C1*G2))/A**2,
            (-A**2*G0**2 + A*(B0*G0*G1 + 2*C0*G0*G2 - C0*G1**2) + G2*(-B0**2*G0 + B0*C0*G1 - C0**2*G2))/A**2]))
        E2_sols = remove_complex(E2_sols)

        # For each E2, there will be 2 p1's, one will have the wrong mass
        for E2_sol in E2_sols:
            if not valid_energy(E2_sol):
                continue
            for pm in [-1, 1]:
                E1_sol = (-B0 - B1 * E2_sol + pm * (
                    ((B0 + B1 * E2_sol)**2 - 4 * A *
                     (C0 + C1 * E2_sol + C2 * E2_sol**2))**0.5)) / (2 * A)
                p1_sol = FourMomentum(E1_sol,
                                      *(a1 + E1_sol * b1 + E2_sol * c1))
                if abs(p1_sol.m2 -
                       m1**2) > FP_TOLERANCE or not valid_energy(E1_sol):
                    continue
                p2_sol = FourMomentum(E2_sol,
                                      *(a2 + E1_sol * b2 + E2_sol * c2))
                if abs(p2_sol.m2 - m2**2) > FP_TOLERANCE:
                    continue
                x1 = (p1_sol.E + p1_sol.z + p_branches.E + p_branches.z +
                      p2_sol.E + p2_sol.z) / event.sqrt_s
                x2 = (p1_sol.E - p1_sol.z + p_branches.E - p_branches.z +
                      p2_sol.E - p2_sol.z) / event.sqrt_s
                if x1 < 0.0 or x2 < 0.0 or x1 > 1.0 or x2 > 1.0:
                    continue
                output_event = deepcopy(event)
                output_event.set_momentum(particle_keys[0], p1_sol)
                output_event.set_momentum(particle_keys[1], p2_sol)
                output_event.x1 = x1
                output_event.x2 = x2
                solutions.append(output_event)

    return solutions
コード例 #44
0
ファイル: task3.py プロジェクト: mohamadamoud/GroupHWS3
         color='red',
         linestyle='dashed')

ax1.set_title(r'$\alpha= 1$')
plt.legend()

fig2 = plt.figure()
ax2 = fig2.gca(projection='3d')

A1 = np.zeros(int(33e3))
A2 = np.zeros(int(33e3))
X = np.zeros(int(33e3))
ctr = 0
for a1 in np.arange(-4, 4, 0.03):
    for a2 in np.arange(-4, 4, 0.03):
        rrr = np.roots([-1, 0, a2, a1])
        #rrr = np.roots([a1,a2,0,-1])

        xd0 = round(rrr[0], 5)
        xd1 = round(rrr[1], 5)
        xd2 = round(rrr[2], 5)
        if (np.isreal(xd0) and np.isreal(xd1) and np.isreal(xd2)):

            A1[ctr] = a1
            A1[ctr + 1] = a1
            A1[ctr + 2] = a1

            A2[ctr] = a2
            A2[ctr + 1] = a2
            A2[ctr + 2] = a2
コード例 #45
0
    prev_saida = y
    prev_entrada = input
    output_data[i] = y

# Escreve no arquivo
with open("sai_sweep_mm_4.pcm", "wb") as output:
    for x in output_data:
        output.write(x)
output.close()

#Zero = wcz + wc
#Polo = Flz + wcz + wc - Fl
zero = [0, wc, wc]
polo = [0, (Fl + wc), (wc - Fl)]

z = np.roots(zero)
p = np.roots(polo)
print(z, p)
#zplane(z,p)

# Nome e tamanho da janela a ser aberta
matp.figure(u"Gráfico da transformada Z", figsize=(15, 8))

# Plota os gráficos
matp.subplot(411)
matp.title(u"Gráfico input")
matp.xlabel("amostra")
matp.ylabel("Amplitude")
matp.grid(1)
matp.plot(data)
コード例 #46
0
    def _parabolic_fit(self, pseudodensity, superscript, atomi):
        """ Optimize phi using parabolic fitting.
            This is used in step 3 (for phi_A^I) and in steps 4-6 (for phi_A^II).
            
            --- Inputs ---
            phiA            Either phi_A^I or phi_A^II
            superscript     1 when calculating phi_I (STEP 3)
                            2 when calculating phi_II (STEPS 4-6)
            atomi           Index of target atom as in ccData object
            
            Refer to Figure S1 and S3 for an overview.
        """
        # Set update methods for phi_A^I or phi_A^II
        if superscript == 1:

            def update(pdens, bigPhi, atomi):
                return self._update_phiai(pdens, bigPhi, atomi)

        elif superscript == 2:

            def update(pdens, bigPhi, atomi):
                return self._update_phiaii(pseudodensity, bigPhi, atomi)

        # lowerbigPhi is bigPhi that yields biggest negative phi.
        # upperbigPhi is bigPhi that yields smallest positive phi.
        # The point here is to find two phi values that are closest to zero (from positive side
        # and negative side respectively).
        self._candidates_phi[atomi] = numpy.array(self._candidates_phi[atomi],
                                                  dtype=float)
        self._candidates_bigPhi[atomi] = numpy.array(
            self._candidates_bigPhi[atomi], dtype=float)
        if numpy.count_nonzero(self._candidates_phi[atomi] < 0) > 0:
            # If there is at least one candidate phi that is negative
            lower_ind = numpy.where(
                self._candidates_phi[atomi] == self._candidates_phi[atomi][
                    self._candidates_phi[atomi] < 0].max())[0][0]
            lowerbigPhi = self._candidates_bigPhi[atomi][lower_ind]
            lowerphi = self._candidates_phi[atomi][lower_ind]
        else:  # assign some large negative number otherwise
            lowerbigPhi = numpy.NINF
            lowerphi = numpy.NINF
        if numpy.count_nonzero(self._candidates_phi[atomi] > 0) > 0:
            # If there is at least one candidate phi that is positive
            upper_ind = numpy.where(
                self._candidates_phi[atomi] == self._candidates_phi[atomi][
                    self._candidates_phi[atomi] > 0].min())[0][0]
            upperbigPhi = self._candidates_bigPhi[atomi][upper_ind]
            upperphi = self._candidates_phi[atomi][upper_ind]
        else:  # assign some large positive number otherwise
            upperbigPhi = numpy.PINF
            upperphi = numpy.PINF

        for iteration in range(self.max_iteration):
            # Flow diagram on Figure S1 in doi: 10.1039/c6ra04656h details the procedure.
            # Find midpoint between positive bigPhi that yields phi closest to zero and negative
            # bigPhi closest to zero. Then, evaluate phi.
            # This can be thought as linear fitting compared to parabolic fitting below.
            midbigPhi = (lowerbigPhi + upperbigPhi) / 2.0
            midphi = update(pseudodensity, midbigPhi, atomi)[0]
            # Exit conditions -- if any of three phi values are within the convergence level.
            if abs(lowerphi) < self.convergence_level:
                return lowerbigPhi
            elif abs(upperphi) < self.convergence_level:
                return upperbigPhi
            elif abs(midphi) < self.convergence_level:
                return midbigPhi

            # Parabolic fitting as described on Figure S1 in doi: 10.1039/c6ra04656h
            # Type casting here converts from size 1 numpy.ndarray to float
            xpts = numpy.array(
                [float(lowerbigPhi),
                 float(midbigPhi),
                 float(upperbigPhi)],
                dtype=float)
            ypts = numpy.array(
                [float(lowerphi),
                 float(midphi),
                 float(upperphi)], dtype=float)
            fit = numpy.polyfit(xpts, ypts, 2)
            roots = numpy.roots(
                fit)  # max two roots (bigPhi) from parabolic fitting

            # Find phi for two bigPhis that were obtained from parabolic fitting.
            belowphi = update(pseudodensity, roots.min(), atomi)[0]
            abovephi = update(pseudodensity, roots.max(), atomi)[0]

            # If phi values from parabolically fitted bigPhis lie within the convergence level,
            # exit the iterative algorithm.
            if abs(abovephi) < self.convergence_level:
                return roots.min()
            elif abs(belowphi) < self.convergence_level:
                return roots.max()
            else:
                # Otherwise, corrected phi value is obtained in a way that cuts the numerical
                # search domain in half in each iteration.
                if 3 * abs(abovephi) < abs(belowphi):
                    corbigPhi = roots.max() - 2.0 * abovephi * (
                        roots.max() - roots.min()) / (abovephi - belowphi)
                elif 3 * abs(belowphi) < abs(abovephi):
                    corbigPhi = roots.min() - 2.0 * belowphi * (
                        roots.max() - roots.min()) / (abovephi - belowphi)
                else:
                    corbigPhi = (roots.max() + roots.min()) / 2.0
                # New candidates of phi and bigPhi are determined as bigPhi yielding largest
                # negative phi and bigPhi yielding smallest positve phi. This is analogous to how
                # the first candidiate phi values are evaluated.
                corphi = update(pseudodensity, corbigPhi, atomi)[0]
                self._candidates_bigPhi[atomi] = numpy.array(
                    [
                        lowerbigPhi,
                        midbigPhi,
                        upperbigPhi,
                        roots.max(),
                        roots.min(),
                        corbigPhi,
                    ],
                    dtype=float,
                )
                self._candidates_phi[atomi] = numpy.array(
                    [lowerphi, midphi, upperphi, abovephi, belowphi, corphi],
                    dtype=float)

                # Set new upperphi and lowerphi
                lower_ind = numpy.where(
                    self._candidates_phi[atomi] == self._candidates_phi[atomi][
                        self._candidates_phi[atomi] < 0].max())[0][0]
                upper_ind = numpy.where(
                    self._candidates_phi[atomi] == self._candidates_phi[atomi][
                        self._candidates_phi[atomi] > 0].min())[0][0]

                lowerphi = self._candidates_phi[atomi][lower_ind]
                upperphi = self._candidates_phi[atomi][upper_ind]

                # If new lowerphi or upperphi values are within convergence level, exit the
                # iterative algorithm. Otherwise, start new linear/parabolic fitting.
                if abs(lowerphi) < self.convergence_level:
                    return self._candidates_bigPhi[atomi][lower_ind]
                elif abs(upperphi) < self.convergence_level:
                    return self._candidates_bigPhi[atomi][upper_ind]
                else:
                    # Fitting needs to continue in this case.
                    lowerbigPhi = self._candidates_bigPhi[atomi][lower_ind]
                    lowerphi = self._candidates_phi[atomi][lower_ind]
                    upperbigPhi = self._candidates_bigPhi[atomi][upper_ind]
                    upperphi = self._candidates_phi[atomi][upper_ind]

        # Raise Exception if convergence is not achieved within max_iteration.
        raise ConvergenceError("Iterative conditioning failed to converge.")
コード例 #47
0
#
# Determine as raízes de $P(x) = 3x^3 +7x^2 - 36x + 20$.

# #### Resolução

# Para tornar claro, em primeiro lugar, vamos inserir os coeficientes de $P(x)$ em um _array_ chamado `p`.

# In[2]:

p = np.array([3, 7, -36, 20])

# Em seguida, fazemos:

# In[3]:

x = np.roots(p)

# Podemos imprimir as raízes da seguinte forma:

# In[4]:

for i, v in enumerate(x):
    print(f'Raiz {i}: {v}')

# ### `polyval`
#
# Podemos usar a função `polyval` do `numpy` para avaliar $P(x)$ em $x = x_0$. Verifiquemos, analiticamente, se as raízes anteriores satisfazem realmente o polinômio dado.

# In[5]:

for i in x:
コード例 #48
0
def run_sparse(GG, CC, W, d, n=2, fS=1.0, xunits='Hz'):
    print("=" * 40)
    print(f"run_sparse: n={n} fS={fS}")

    numerator_coeffs, denominator_coeffs = interpolate(GG, CC, W, d, n, fS)

    def f(x):
        s = np.format_float_positional(x, 5, trim='-')
        if s[-1] == '.':
            s = s[:-1]
        if s == "-0":
            s = "0"
        return s

    def cf(z):
        if type(z) is complex or type(z) is np.complex128:
            sRe = f(z.real)
            sIm = f(z.imag)
            if sIm == "0":
                return sRe
            elif sRe == "0":
                return f"{sIm}j"
            elif z.imag < 0:
                return f"{sRe}{sIm}j"
            else:
                return f"{sRe}+{sIm}j"
        else:
            return f(z)

    def p(a):
        return '(' + (' '.join(cf(z) for z in a)) + ')'

    def p_factored(a):
        return ''.join(f'(s-({cf(-z)}))' for z in a)

    def convert_to_poly(k, a):
        c = np.array([k])
        # (s - p)*c
        for z in a:
            c = z * np.block([c, np.zeros(
                (1, ))]) + np.block([np.zeros((1, )), c])
        return c

    numerator_coeffs = reduce_poly(numerator_coeffs)
    denominator_coeffs = reduce_poly(denominator_coeffs)
    print(f"DFT Interpolated: {p(numerator_coeffs)} {p(denominator_coeffs)}")
    plot_transfer_function(1,
                           numerator_coeffs,
                           1,
                           denominator_coeffs,
                           xunits=xunits)

    n_roots = np.roots(numerator_coeffs[::-1])
    d_roots = np.roots(denominator_coeffs[::-1])

    def pc(k, negative_roots):
        print(cf(k), p_factored(negative_roots))

    pc(numerator_coeffs[-1], (-x for x in n_roots))
    pc(denominator_coeffs[-1], (-x for x in d_roots))

    print("=" * 40)

    n_k, n_a = numerator(GG.A, CC.A, W, d)
    pc(n_k, n_a)
    d_k, d_a = denominator(GG.A, CC.A, W, d)
    pc(d_k, d_a)

    print(
        f"QZ Regenerated: {p(convert_to_poly( n_k, n_a))} {p(convert_to_poly( d_k, d_a))}"
    )
コード例 #49
0
    def get_safe_action(self, observation, action, environment):
        flag = True
        landmark_near = []
        for i, landmark in enumerate(environment.world.landmarks[0:-1]):
            dist = np.sqrt(np.sum(np.square(environment.world.policy_agents[0].state.p_pos - landmark.state.p_pos))) \
                   - (environment.world.policy_agents[0].size + landmark.size) - 0.044
            if dist <= 0:
                landmark_near.append(landmark)
                flag = False
        if flag:
            return action, False

        x = observation[1]
        y = observation[2]
        V = observation[0]
        theta = observation[3]
        omega = observation[4]
        # print(theta)
        d_omega = action[3] - action[4]
        dt = environment.world.dt
        a1 = x + V * np.cos(theta) * dt + theta * V * np.sin(theta) * dt
        b1 = -V * np.sin(theta) * dt
        a2 = y + V * np.sin(theta) * dt - theta * V * np.cos(theta) * dt
        b2 = V * np.cos(theta) * dt
        c1 = a1 + b1 * theta
        d1 = b1 * dt
        c2 = a2 + b2 * theta
        d2 = b2 * dt
        e1 = -2 * x * c1
        f1 = -2 * x * d1
        e2 = -2 * y * c2
        f2 = -2 * y * d2

        flag = True
        for _, landmark in enumerate(landmark_near):
            self.R = landmark.size + 0.5 * environment.world.policy_agents[
                0].size
            x0 = landmark.state.p_pos[0]
            y0 = landmark.state.p_pos[1]
            landmark0 = landmark

            g = d1 * d1 + d2 * d2
            h = 2 * c1 * d1 + 2 * c2 * d2 + f1 + f2
            i = c1 * c1 + c2 * c2 + e1 + e2 + np.square(landmark0.state.p_pos[0]) + \
                np.square(landmark0.state.p_pos[1])
            lower_c = np.square(landmark0.size + 0.01)
            upper_c = inf
            A = landmark0.state.p_pos[0] - x
            B = landmark0.state.p_pos[1] - y
            C = - (landmark0.state.p_pos[0]) * x \
                + np.square(x) \
                - (landmark0.state.p_pos[1]) * y \
                + np.square(y)

            # solve x3
            self.x0 = np.array([x, y])
            self.x1 = np.array([x0, y0])
            args = [
                np.square(self.x1[1] - self.x0[1]) +
                np.square(self.x1[0] - self.x0[0]),
                2 * (self.x1[1] - self.x0[1]) * np.square(self.R),
                np.square(np.square(self.R)) -
                np.square(self.R) * np.square(self.x1[0] - self.x0[0])
            ]
            root = np.roots(args)
            y3_0 = root[0] + self.x1[1]
            y3_1 = root[1] + self.x1[1]
            x3_0 = (-(y3_0 - self.x1[1]) * (self.x1[1] - self.x0[1]) - np.square(self.R))/(self.x1[0] - self.x0[0]) + \
                    self.x1[0]
            x3_1 = (-(y3_1 - self.x1[1]) * (self.x1[1] - self.x0[1]) - np.square(self.R)) / (self.x1[0] - self.x0[0]) + \
                    self.x1[0]

            x3 = np.array([x3_0, y3_0])
            temp1 = ((y - y0) * c1 - (x - x0) * c2 - y * x0 + y0 * x) \
                    * ((y - y0) * x3[0] - (x - x0) * x3[1] - y * x0 + y0 * x)
            x3 = np.array([x3_1, y3_1])
            x3 = np.array([x3_0, y3_0]) if temp1 > 0 else np.array(
                [x3_1, y3_1])
            x3[0] = 1.1 * x3[0] - 0.1 * self.x1[0]
            x3[1] = 1.1 * x3[1] - 0.1 * self.x1[1]
            '''print(((y - y0) * c1 - (x - x0) * d1 - y * x0 + y0 * x)
                  * ((y - y0) * x3[0] - (x - x0) * x3[1] - y * x0 + y0 * x))
            print((x3[0] - self.x0[0]) * (x3[0] - self.x1[0]) + (x3[1] - self.x0[1]) * (x3[1] - self.x1[1]))
            print((x3[0] - self.x1[0]) * (x3[0] - self.x1[0]) + (x3[1] - self.x1[1]) * (x3[1] - self.x1[1]) - self.R * self.R)'''
            temp2 = ((x3[1] - self.x0[1]) * self.x1[0] - (x3[0] - self.x0[0]) * self.x1[1] + self.x0[1] * x3[0] - self.x0[0] * x3[1]) \
                    * ((x3[1] - self.x0[1]) * c1 - (x3[0] - self.x0[0]) * c2 + self.x0[1] * x3[0] - self.x0[0] * x3[1])
            k1 = temp2 * (x3[1] - self.x0[1])
            k2 = temp2 * (x3[0] - self.x0[0])
            k3 = temp2 * (self.x0[1] * x3[0] - self.x0[0] * x3[1])
            dist = np.sqrt(np.sum(np.square(environment.world.landmarks[-1].state.p_pos - landmark.state.p_pos))) \
                   - (environment.world.landmarks[-1].size + 1.2 * landmark.size)
            if temp2 > 0:
                flag = False
                break
        if flag:
            return action, False
        self.num_call = self.num_call + 1

        # Make a MOSEK environment
        with mosek.Env() as env:
            # Attach a printer to the environment
            # env.set_Stream(mosek.streamtype.log, streamprinter)

            # Create a task
            with env.Task(0, 0) as task:
                # task.set_Stream(mosek.streamtype.log, streamprinter)
                # Set up and input bounds and linear coefficients
                bkc = [mosek.boundkey.up]
                blc = [-inf]
                buc = [-k3 - k1 * c1 + k2 * c2]
                numvar = 1
                bkx = [mosek.boundkey.fr] * numvar
                blx = [-inf] * numvar
                bux = [inf] * numvar
                temp = 0.12
                c = [-2.0 * omega - 2 * dt * d_omega]
                asub = [[0]]
                aval = [[k1 * d1 - k2 * d2]]

                numvar = len(bkx)
                numcon = len(bkc)

                # Append 'numcon' empty constraints.
                # The constraints will initially have no bounds.
                task.appendcons(numcon)

                # Append 'numvar' variables.
                # The variables will initially be fixed at zero (x=0).
                task.appendvars(numvar)

                for j in range(numvar):
                    # Set the linear term c_j in the objective.
                    task.putcj(j, c[j])
                    # Set the bounds on variable j
                    # blx[j] <= x_j <= bux[j]
                    task.putvarbound(j, bkx[j], blx[j], bux[j])
                    # Input column j of A
                    task.putacol(
                        j,  # Variable (column) index.
                        # Row index of non-zeros in column j.
                        asub[j],
                        aval[j])  # Non-zero Values of column j.
                for i in range(numcon):
                    task.putconbound(i, bkc[i], blc[i], buc[i])

                # Set up and input quadratic objective
                qsubi = [0]
                qsubj = [0]
                qval = [2.0]

                task.putqobj(qsubi, qsubj, qval)

                # Input the objective sense (minimize/maximize)
                task.putobjsense(mosek.objsense.minimize)

                # Optimize
                task.optimize()
                # Print a summary containing information
                # about the solution for debugging purposes
                # task.solutionsummary(mosek.streamtype.msg)

                prosta = task.getprosta(mosek.soltype.itr)
                solsta = task.getsolsta(mosek.soltype.itr)

                # Output a solution
                xx = [0.] * numvar
                task.getxx(mosek.soltype.itr, xx)
                '''if solsta == mosek.solsta.optimal:
                    print("Optimal solution: %s" % xx)
                elif solsta == mosek.solsta.dual_infeas_cer:
                    print("Primal or dual infeasibility.\n")
                elif solsta == mosek.solsta.prim_infeas_cer:
                    print("Primal or dual infeasibility.\n")
                elif mosek.solsta.unknown:
                    print("Unknown solution status")
                else:
                    print("Other solution status")'''

                xx = (xx[0] - omega) / dt
                if xx > temp:
                    xx = temp
                if xx < -temp:
                    xx = -temp

                if np.abs(xx / 0.12 - d_omega) < 0.02:
                    return action, False

                delta_action = xx / 0.12 - d_omega
                action[3] = action[3] + delta_action / 2
                action[4] = action[4] - delta_action / 2

                # temp = action[3] - action[4]
                '''action[3] = + xx[0]/2
                action[4] = - xx[0]/2'''
                return action, True
コード例 #50
0
 def test_roots(self):
     assert_array_equal(np.roots([1, 0, 0]), [0, 0])
コード例 #51
0
ファイル: financial.py プロジェクト: JamesHe1990/eeg-site
def irr(values):
    """
    Return the Internal Rate of Return (IRR).

    This is the "average" periodically compounded rate of return
    that gives a net present value of 0.0; for a more complete explanation,
    see Notes below.

    Parameters
    ----------
    values : array_like, shape(N,)
        Input cash flows per time period.  By convention, net "deposits"
        are negative and net "withdrawals" are positive.  Thus, for
        example, at least the first element of `values`, which represents
        the initial investment, will typically be negative.

    Returns
    -------
    out : float
        Internal Rate of Return for periodic input values.

    Notes
    -----
    The IRR is perhaps best understood through an example (illustrated
    using np.irr in the Examples section below).  Suppose one invests 100
    units and then makes the following withdrawals at regular (fixed)
    intervals: 39, 59, 55, 20.  Assuming the ending value is 0, one's 100
    unit investment yields 173 units; however, due to the combination of
    compounding and the periodic withdrawals, the "average" rate of return
    is neither simply 0.73/4 nor (1.73)^0.25-1.  Rather, it is the solution
    (for :math:`r`) of the equation:

    .. math:: -100 + \\frac{39}{1+r} + \\frac{59}{(1+r)^2}
     + \\frac{55}{(1+r)^3} + \\frac{20}{(1+r)^4} = 0

    In general, for `values` :math:`= [v_0, v_1, ... v_M]`,
    irr is the solution of the equation: [G]_

    .. math:: \\sum_{t=0}^M{\\frac{v_t}{(1+irr)^{t}}} = 0

    References
    ----------
    .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed.,
       Addison-Wesley, 2003, pg. 348.

    Examples
    --------
    >>> round(irr([-100, 39, 59, 55, 20]), 5)
    0.28095
    >>> round(irr([-100, 0, 0, 74]), 5)
    -0.0955
    >>> round(irr([-100, 100, 0, -7]), 5)
    -0.0833
    >>> round(irr([-100, 100, 0, 7]), 5)
    0.06206
    >>> round(irr([-5, 10.5, 1, -8, 1]), 5)
    0.0886

    (Compare with the Example given for numpy.lib.financial.npv)

    """
    res = np.roots(values[::-1])
    mask = (res.imag == 0) & (res.real > 0)
    if res.size == 0:
        return np.nan
    res = res[mask].real
    # NPV(rate) = 0 can have more than one solution so we return
    # only the solution closest to zero.
    rate = 1.0 / res - 1
    rate = rate.item(np.argmin(np.abs(rate)))
    return rate
コード例 #52
0
def reactive_flow_solve_dispersion(k_guess, sigma_guess, par, verbose=False):
    # input parameters:
    #   k -- horizontal wavenumber (required)
    #   sigma_guess -- guess at the eigenvalue (optional)
    #   par -- parameter structure (optional)

    sa = SA()

    if type(k_guess) is not np.ndarray:
        # solving for growthrate sigma at a fixed value of wavenumber k
        solve_for_sigma = True
        sa.k = k_guess
        if sigma_guess is None:
            sigma_guess = np.logspace(-1.0, 1.0, 100)
        if type(sigma_guess) is not np.ndarray:
            sigma_guess = np.asarray([sigma_guess])
    else:
        # solving for wavenumber k at a fixed value of growthrate sigma
        solve_for_sigma = False
        sa.sigma = sigma_guess if type(sigma_guess) is not np.ndarray else sigma_guess[0]

    if par.F is None:
        par.F = np.power(1.0 + par.S * par.M * (1.0 + par.G), 1.0 / par.n)

    sigma = np.zeros_like(sigma_guess)
    k = np.zeros_like(k_guess)
    if solve_for_sigma:
        # solve eigenvalue problem to find growth rate of fastest-growing mode
        res = np.zeros_like(sigma_guess)
        exitflag = np.zeros_like(sigma_guess)
        converged = np.zeros_like(sigma_guess)
        problem_sigma = lambda s: zero_by_sigma_or_wavenumber(s, sa.k, par)
        for j in range(len(sigma_guess)):
            [sigma[j], infodict, exitflag[j], _] = fsolve(problem_sigma, sigma_guess[j], 
                                                          full_output=True, xtol=1.e-14)
            res[j] = infodict["fvec"]
            converged[j] = exitflag[j] == 1 and np.abs(res[j]) < par.tol
            if par.largeDa:
                m = np.roots(characteristic_polynomial(sa.k, sigma[j], par))
                converged[j] = converged[j] and np.pi / 2 < np.abs(np.imag(m[0])) < np.pi
            if par.plot and converged[j] == 1:
                eig = form_eigenfunction(sa.k, sigma[j], par)
                # plt.plot(np.linspace(0.0, 1.0, par.nz), np.real(eig.P))
    else:
        # solve eigenvalue problem to find wavenumber of mode
        problem_wavenumber = lambda s: zero_by_sigma_or_wavenumber(sa.sigma, s, par)
        res = np.zeros_like(k_guess)
        exitflag = np.zeros_like(k_guess)
        converged = np.zeros_like(k_guess)
        for j in range(len(k_guess)):
            [k[j], infodict, exitflag[j], _] = fsolve(problem_wavenumber, k_guess[j], full_output=True, xtol=1.e-14)
            res[j] = infodict["fvec"]
            converged[j] = exitflag[j] == 1 or abs(res[j]) < par.tol
            if par.largeDa:
                m = np.roots(characteristic_polynomial(k[j], sa.sigma, par))
                converged[j] = converged[j] and np.pi / 2 < np.abs(np.imag(m[0])) < np.pi
            if par.plot and converged[j] == 1:
                eig = form_eigenfunction(k[j], sa.sigma, par)
                plt.plot(np.linspace(0.0, 1.0, par.nz), np.real(eig.P))

        # [converged', exitflag', log10(abs(res'))];
    none_converged = not np.sum(converged)

    # handle failure to find solution
    if none_converged:
        if verbose:
            print(f'FAILURE: no solution found for k={k_guess}')
        sa.sigma = np.nan
        sa.k = np.nan
        sa.m = [np.nan, np.nan, np.nan]
        sa.eig.P = np.nan
        sa.flag = False
        return sa
    elif solve_for_sigma:
        sa.sigma = np.amax(sigma[converged != 0])
    else:
        sa.k = np.amax(k[converged != 0])

    sa.m = np.roots(characteristic_polynomial(sa.k, sa.sigma, par))

    # form and check eigenfunction
    sa.eig = form_eigenfunction(sa.k, sa.sigma, par)
    gP = np.gradient(sa.eig.P)

    if (gP < 0).any() and par.bc_type != 1:
        sa.flag = False
        if verbose:
            print(f'FAILURE: non-monotonic eigenfunction for k={sa.k}, sigma={sa.sigma}')
    else:
        sa.flag = True
        if verbose:
            print(f'SUCCESS: monotonic eigenfunction for k={sa.k}, sigma={sa.sigma}')

    if par.plot:
        plt.plot(np.linspace(0, 1, par.nz), np.real(sa.eig.P), linewidth=2)

    return sa
コード例 #53
0
def update_temporal_cvxpy(y, g, sn, A=None, **kwargs):
    """
    spatial:
    (d, f), (u, p), (d), (d, u)
    (d, f), (p), (d), (d)
    trace:
    (u, f), (u, p), (u)
    (f), (p), ()
    """
    # get_parameters
    sparse_penal = kwargs.get('sparse_penal')
    max_iters = kwargs.get('max_iters')
    use_cons = kwargs.get('use_cons', False)
    scs = kwargs.get('scs_fallback')
    # conform variables to generalize multiple unit case
    if y.ndim < 2:
        y = y.reshape((1, -1))
    if g.ndim < 2:
        g = g.reshape((1, -1))
    sn = np.atleast_1d(sn)
    if A is not None:
        if A.ndim < 2:
            A = A.reshape((-1, 1))
    # get count of frames and units
    _T = y.shape[-1]
    _u = g.shape[0]
    if A is not None:
        _d = A.shape[0]
    # construct G matrix and decay vector per unit
    dc_vec = np.zeros((_u, _T))
    G_ls = []
    for cur_u in range(_u):
        cur_g = g[cur_u, :]
        # construct first column and row
        cur_c, cur_r = np.zeros(_T), np.zeros(_T)
        cur_c[0] = 1
        cur_r[0] = 1
        cur_c[1:len(cur_g) + 1] = -cur_g
        # update G with toeplitz matrix
        G_ls.append(cvx.Constant(dia_matrix(toeplitz(cur_c, cur_r))))
        # update dc_vec
        cur_gr = np.roots(cur_c)
        dc_vec[cur_u, :] = np.max(cur_gr.real)**np.arange(_T)
    # get noise threshold
    thres_sn = sn * np.sqrt(_T)
    # construct variables
    b = cvx.Variable(_u)  # baseline fluorescence per unit
    c0 = cvx.Variable(_u)  # initial fluorescence per unit
    c = cvx.Variable((_u, _T))  # calcium trace per unit
    s = cvx.vstack([G_ls[u] * c[u, :]
                    for u in range(_u)])  # spike train per unit
    # residual noise per unit
    if A is not None:
        sig = cvx.vstack([(A * c)[px, :] + (A * b)[px, :] +
                          (A * cvx.diag(c0) * dc_vec)[px, :]
                          for px in range(_d)])
        noise = y - sig
    else:
        sig = cvx.vstack(
            [c[u, :] + b[u] + c0[u] * dc_vec[u, :] for u in range(_u)])
        noise = y - sig
    noise = cvx.vstack(
        [cvx.norm(noise[i, :], 2) for i in range(noise.shape[0])])
    # construct constraints
    cons = []
    cons.append(b >= np.min(y, axis=-1))  # baseline larger than minimum
    cons.append(c0 >= 0)  # initial fluorescence larger than 0
    cons.append(s >= 0)  # spike train non-negativity
    # noise constraints
    cons_noise = [noise[i] <= thres_sn[i] for i in range(thres_sn.shape[0])]
    try:
        obj = cvx.Minimize(cvx.sum(cvx.norm(s, 1, axis=1)))
        prob = cvx.Problem(obj, cons + cons_noise)
        if use_cons:
            _ = prob.solve(solver='ECOS')
        if not (prob.status == 'optimal'
                or prob.status == 'optimal_inaccurate'):
            if use_cons:
                warnings.warn("constrained version of problem infeasible")
            raise ValueError
    except (ValueError, cvx.SolverError):
        lam = sn * sparse_penal / sn.shape[
            0]  # hacky correction for near-linear relationship between sparsity and number of concurrently updated units
        obj = cvx.Minimize(
            cvx.sum(cvx.sum(noise, axis=1) + lam * cvx.norm(s, 1, axis=1)))
        prob = cvx.Problem(obj, cons)
        try:
            _ = prob.solve(solver='ECOS', max_iters=max_iters)
            if prob.status in ["infeasible", "unbounded", None]:
                raise ValueError
        except (cvx.SolverError, ValueError):
            try:
                if scs:
                    _ = prob.solve(solver='SCS', max_iters=200)
                if prob.status in ["infeasible", "unbounded", None]:
                    raise ValueError
            except (cvx.SolverError, ValueError):
                warnings.warn(
                    "problem status is {}, returning null".format(prob.status),
                    RuntimeWarning)
                return np.full((5, c.shape[0], c.shape[1]), np.nan).squeeze()
    if not (prob.status == 'optimal'):
        warnings.warn("problem solved sub-optimally", RuntimeWarning)
    try:
        return np.stack(
            np.broadcast_arrays(c.value, s.value, b.value.reshape((-1, 1)),
                                c0.value.reshape((-1, 1)), dc_vec)).squeeze()
    except:
        set_trace()
コード例 #54
0
    while abs(check) > 0.000005: # checking if convergence criterium fulfilled

        y1 = y1_new
        y2 = y2_new
        P = Pneu

       # Coefficients of the EoS model equation
        A = a * P / (R**2 * T**2)
        B = b * P / (R * T)

       # Cubic equation for the gas phase
        c = [1, -(1-B), A - 2 * B - 3 * B**2, -(A * B - B**2 - B**3)]


       # Roots finds the three solutions for the cubic equation
        r = np.roots(c)

        Zliq = min(r) # liquid compressibility factor is the smallest solution

       # a and b for the gas phase
        bg = y1 * b1 + y2 * b2
        ag = y1**2 * a11 + 2 * a12 * y1 * y2 + y2**2 * a22

        A = ag * P / (R**2 * T**2)
        B = bg * P / (R * T)
        # Cubic equation
        c[0] = 1
        c[1] = -(1 - B)
        c[2] = A - 2 * B - 3 * B**2
        c[3] = -(A * B - B**2 - B**3)
コード例 #55
0
ファイル: bpg.py プロジェクト: lascauje/byosp
def t_star(coeff: np.ndarray) -> float:
    """See p.21 (6.10) next."""
    return np.roots([np.linalg.norm(coeff)**2, 0, 1, -1])[-1].real
コード例 #56
0
def generate_velocity_profile(env: Env, path: path_t) -> np.ndarray:
    """

    :param env: Env
    :param path: [n x 2] float
    :return: [n-1] float
    """
    # TODO: Many repeated calculations, store value
    # e.g.
    # if a > foo(a): a = foo(a)
    # if a > (a_max := foo(a)): a = a_max)

    v = np.zeros(len(path) - 1)
    deltav = np.zeros(len(path) - 1)
    theta = np.zeros(len(path) - 1)
    deltav[0] = 0.0
    v[0] = env.state[3]

    x, y = path[:, 0], path[:, 1]

    for xx in range(1, len(path) - 1):
        params = env.vel_params
        m = params.m
        mu = params.mu
        rolling_friction = params.rolling_friction

        # TODO: What are these magic numbers? g and __ Cd ?
        normal_reaction = m * 9.81 * np.cos(theta[xx]) + 0.96552 * np.square(v[xx])
        d = 4.0
        u = v[xx - 1]

        a = np.sqrt(np.square(x[xx] - x[xx - 1]) +
                    np.square(y[xx] - y[xx - 1]))
        # assert (a == np.linalg.norm(path[xx] - path[xx - 1]))

        b = np.sqrt(np.square(x[xx] - x[xx + 1]) +
                    np.square(y[xx] - y[xx + 1]))
        # assert (b == np.linalg.norm(path[xx] - path[xx + 1]))

        c = np.sqrt(np.square(x[xx + 1] - x[xx - 1]) +
                    np.square(y[xx + 1] - y[xx - 1]))
        # assert (c == np.linalg.norm(path[xx + 1] - path[xx - 1]))

        if -1e-9 <= (a + b + c) * (a + b - c) * (a + c - b) * (b + c - a) <= 0:  # colinear
            radius = 1000000000000000000.0
        else:
            radius = a * b * c / (np.sqrt((a + b + c) * (a + b - c) * (a + c - b) * (b + c - a)))

        possible_velocities = np.roots(
            [
                np.square(m / radius) + np.square(m) / (np.square(d) * 4) - np.square(0.96552),
                0,
                -2 * np.square(m) * 9.81 * np.sin(theta[xx]) / radius +
                m * (rolling_friction + 0.445 * np.square(u) - m * np.square(u) / (2 * d)) / d
                - 2 * 0.96552 * mu * m * 9.81 * np.cos(theta[xx]),
                0,
                np.square(m * 9.81 * np.sin(theta[xx])) +
                np.square(rolling_friction + 0.445 * np.square(u) - m * np.square(u) / (2 * d))
                - np.square(mu * m * 9.81 * np.cos(theta[xx]))
            ]
        )
        possible_velocities = np.sort(possible_velocities)
        delta = possible_velocities[3] - v[xx - 1]
        # assert (delta == np.max(possible_velocities) - v[xx - 1])

        if delta >= (np.sqrt(
                np.square(v[xx - 1]) - 2 * d * (rolling_friction + 0.445 * np.square(v[xx - 1]) - 1550.0) / m) - v[xx - 1]):
            deltav[xx] = (np.sqrt(np.square(v[xx - 1]) - 2 * d * (rolling_friction + 0.445 * np.square(v[xx - 1]) - 1550.0) / m) - v[xx - 1])
        else:
            if delta < (
                    np.sqrt(np.square(v[xx - 1]) - 2 * d * (rolling_friction + 0.445 * np.square(v[xx - 1])) / m) -
                    v[xx - 1]):
                b = 2
                for changes in range(1, b):
                    u = np.sqrt(np.square(np.sqrt(
                        radius * (mu * normal_reaction / m + 9.81 * np.sin(theta[xx])))) - 2 * d * (
                                        rolling_friction + 0.445 * np.square(v[xx - 1])) / m)
                    # TODO: we are changing the previous deltas, but not the previous velocities !?
                    for i in range(1, changes + 1):
                        deltav[xx - changes + i - 1] = max(deltav[xx - changes + i - 1] - (v[xx - 1] - u) / changes,
                                                           (np.sqrt(np.square(v[xx - 1]) - 2 * d * (
                                                                   rolling_friction + 0.445 * np.square(
                                                               v[xx - 1])) / m) - v[xx - 1]))
                    # find delta again
                    if delta < (np.sqrt(
                            np.square(v[xx - 1]) - 2 * d * (rolling_friction + 0.445 * np.square(v[xx - 1])) / m) -
                                v[xx - 1]):
                        if xx > (b - 1):
                            b += 1
                    else:
                        if delta >= (np.sqrt(np.square(v[xx - 1]) - 2 * d * (
                                rolling_friction + 0.445 * np.square(v[xx - 1]) - 1550.0) / m) - v[xx - 1]):
                            deltav[xx] = (np.sqrt(np.square(v[xx - 1]) - 2 * d * (
                                    rolling_friction + 0.445 * np.square(v[xx - 1]) - 1550.0) / m) - v[xx - 1])
                        else:
                            deltav[xx] = delta
            else:
                deltav[xx] = delta

        v[xx] = v[xx - 1] + deltav[xx]

    return v
コード例 #57
0
def calibration1(size, am):
    '''
    Gets size of the desk and point on photomatrix
    Returns 3d coordinates of the lens in the space - L, and componets of optical axis of the lens - f
    norm of f approximately shows distance between lens and photomatrix
    '''

    x1 = am[0][0]  #necessary variables
    x2 = am[1][0]
    x3 = am[2][0]

    y1 = am[0][1]
    y2 = am[1][1]
    y3 = am[2][1]

    X1 = x1 - x2
    X2 = x2 - x3
    X3 = x3 - x1

    Y1 = y1 - y2
    Y2 = y2 - y3
    Y3 = y3 - y1

    Z1 = y1 * x2 - y2 * x1
    Z2 = y2 * x3 - y3 * x2
    Z3 = y3 * x1 - y1 * x3

    dx1 = X1 / Z1 - X2 / Z2
    dx2 = X2 / Z2 - X3 / Z3
    dx3 = X3 / Z3 - X1 / Z1

    dy1 = Y1 / Z1 - Y2 / Z2
    dy2 = Y2 / Z2 - Y3 / Z3
    dy3 = Y3 / Z3 - Y1 / Z1

    k3 = -Y1 / Z1 * dx2  #coefficients of third degree polynomial ki where i - power of argument
    k2 = Y1 / Z1 * dx1 + dy1 * dx3
    k1 = -Y3 / Z3 * dx2 + dy2 * dx3
    k0 = Y3 / Z3 * dx1

    cb = np.roots([k3, k2, k1,
                   k0])  #find polynomial zeros. cb - tan of turn angle

    nans = []

    for i in range(len(cb)):  #delete imaginary roots
        if (np.imag(cb[i]) != 0):
            nans += [i]

    cb = np.real(np.delete(cb, nans))

    cc = (cb * dy1 + dy2) / (cb * dx2 - dx1)  #find cc - sin of incline angle

    nans = []  #delete solution > 1 by abs value. sin can't be > 1
    for i in range(
            len(cc)
    ):  #delete solution > 0. angle should be < 0. camera pointing down
        if ((abs(cc[i]) > 1) or (cc[i] > 0)):
            nans += [i]

    cc = np.delete(cc, nans)  #delete cc unsuitable roots and cb both
    cb = np.delete(cb, nans)

    ca = np.zeros(len(cb))  #find ca - norm of optical vector f
    for i in range(len(ca)):
        ca[i] = -(sqrt(1 - (cc[i])**2) * Z2 / (cc[i] * X2 + (cb[i] - 1) /
                                               (cb[i] + 1) * Y2))

    nans = []  #delete solutions < 0. norm can't be < 0
    for i in range(len(ca)):
        if (ca[i] < 0):
            nans += [i]

    ca = np.delete(ca, nans)  #delete unsiutable solutions for all parameters
    cb = np.delete(cb, nans)
    cc = np.delete(cc, nans)

    prams = np.transpose(arr([ca, atan(cb),
                              asin(cc)]))  #collecte all parameters to an array
    p = np.real(prams)  #delete imaginary part(=0) to not spoil output

    f = np.zeros(
        p.shape)  #find components of optical vector for all posible solutions

    for i in range(len(p)):
        f[i] = -p[i][0] * arr([
            -cos(p[i][2]) * sin(p[i][1]),
            cos(p[i][1]) * cos(p[i][2]),
            sin(p[i][2])
        ])

    q = np.zeros([len(f), 3, 3])  #find components of the vector of light ray
    for i in range(len(f)):
        q[i] = pixels2rays(
            am, f[i]
        )  #cast coordinates of points on the photomatrix to components of ray vectors

    L = np.zeros(f.shape)  #find coordinates of the lens
    for i in range(len(L)):
        L[i][2] = size / (q[i][0][0] / q[i][0][2] - q[i][2][0] / q[i][2][2])
        L[i][1] = L[i][2] * q[i][0][1] / q[i][0][2]
        L[i][0] = L[i][2] * q[i][0][0] / q[i][0][2]

    return f, L
コード例 #58
0
ファイル: roots.py プロジェクト: shawnafk/LH_tools
#wid=9.5e-3
#n=wid*8/L
k_para = 2 * np.pi / (720)
OMEGA = np.linspace(0.000001, 0.010, 20000)
realomega = OMEGA * 3e8 / L
N_PARA = k_para * c / OMEGA
#solve ???
for n_para, omega in zip(N_PARA, OMEGA):
    #[n_para,omega] = [k_para*c/OMEGA,OMEGA]
    epsi_perp = 1 + (omega_pe / omega_ce)**2 - (omega_pi / omega)**2
    epsi_para = 1 - (omega_pe / omega)**2 - (omega_pi / omega)**2
    epsi_xy = (omega_pe)**2 / (omega_ce * omega)
    P0 = epsi_para * ((n_para**2 - epsi_perp)**2 - epsi_xy**2)
    P2 = (epsi_perp + epsi_para) * (n_para**2 - epsi_perp) + epsi_xy**2
    P4 = epsi_perp
    for _ in np.roots([P4, P2, P0]):
        if (np.isreal(_) and _ > 0):
            w.append(omega)
            kv.append(np.sqrt(_) * omega / c)
    #       print(kv)
DP = zip(kv, w)
res = sorted(DP, key=lambda v: v[0])
kv, w = zip(*res)
plt.semilogx(kv, w)
'''
w=[]
kv=[]
vte=0.00/3
for n_para,omega in zip(N_PARA,OMEGA):
#[n_para,omega] = [k_para*c/OMEGA,OMEGA]
	epsi_perp=1+(omega_pe/omega_ce)**2 - (omega_pi/omega)**2
コード例 #59
0
def class_B(event_list, particle_keys, sqrt_s12, m1=0, m2=-1):
    """
    Gets the missing momentum for the following topology:
            
     x1 ╲     s_12 ╭── p2  (visible)
         █████┄┄┄┄┄┤          
     x2 ╱          ╰── p1  (missing)    
                   
    m2 can be specified, if -1 it is calculated
    """
    solutions = []
    if not isinstance(event_list, list):
        if isinstance(event_list, Event):
            event_list = [event_list]
        else:
            raise TypeError("'event_list' should be a list of Event objects")

    for event in event_list:
        p2 = event[particle_keys[1]]
        p_branches = FourMomentum(0.0, 0.0, 0.0, 0.0)
        for k in event.particles:
            if event.is_visible(k):
                p_branches += event[k]

        if particle_keys[0] not in event.particles:
            event.add_particle(particle_keys[0],
                               Particle(0, 1, FourMomentum()))

        p1x = -1 * p_branches.x
        p1y = -1 * p_branches.y

        m2 = set_mass(m2, p2)

        s12 = sqrt_s12**2

        R12 = (s12 - m1**2 - m2**2) / 2.0

        # Solve polynomial equation for p1z (Ax^2 + Bx + C)
        A = 1 - p2.z**2 / p2.E**2
        B = -2.0 * p2.z / p2.E**2 * (R12 + p1x * p2.x + p1y * p2.y)
        C = m1**2 + p1x**2 + p1y**2 - 1.0/p2.E**2*(R12**2 + p1x**2*p2.x**2 + p1y**2*p2.y**2 + \
            2*(R12*p1x*p2.x + R12*p1y*p2.y + p1x*p2.x*p1y*p2.y))
        p1z_sols = np.roots([A, B, C])
        E1_sols = (m1**2 + p1x**2 + p1y**2 + p1z_sols**2)**0.5

        E1_sols = remove_complex(E1_sols)

        for E1, p1z in zip(E1_sols, p1z_sols):
            if not valid_energy(E1):
                continue
            p1_sol = FourMomentum(E1, p1x, p1y, p1z)
            if abs(p1_sol.m2 - m1**2) > FP_TOLERANCE:
                continue
            x1 = (E1 + p1z + p_branches.E + p_branches.z) / event.sqrt_s
            x2 = (E1 - p1z + p_branches.E - p_branches.z) / event.sqrt_s
            if x1 < 0.0 or x2 < 0.0 or x1 > 1.0 or x2 > 1.0:
                continue
            output_event = deepcopy(event)
            output_event.set_momentum(particle_keys[0], p1_sol)
            output_event.x1 = x1
            output_event.x2 = x2
            solutions.append(output_event)

    return solutions
コード例 #60
0
def class_E(event_list,
            particle_keys,
            sqrt_s13,
            sqrt_s24,
            sqrt_s_hat,
            rapidity,
            m1=0,
            m2=0,
            m3=-1,
            m4=-1):
    """
    Gets the two missing momenta for the following topology:
                     ╭─────── missing (p1)
    x1 ╲       s_13 ╱╰─────── visible (p3)
        ╲          ╱
         ███┄┄┄┄███
        ╱   s_hat  ╲ 
    x2 ╱       s_24 ╲╭─────── missing (p2)
                     ╰─────── visible (p4)
    x1 and x2 can be set manually for debugging
    m1, ..., m4 can be specified, if -1 they are calculated
    """
    solutions = []
    if not isinstance(event_list, list):
        if isinstance(event_list, Event):
            event_list = [event_list]
        else:
            raise TypeError("'event_list' should be a list of Event objects")

    for event in event_list:
        # Set up the kinematic variables
        s_13, s_24 = sqrt_s13**2, sqrt_s24**2

        p3 = event[particle_keys[2]]
        p4 = event[particle_keys[3]]
        p_branches = FourMomentum(0.0, 0.0, 0.0, 0.0)
        for k in event.particles:
            if event.is_visible(k):
                p_branches += event[k]

        [event.add_particle(key, Particle(0, 1, FourMomentum())) for key in particle_keys[:2] \
            if not key in event.particles]

        x1 = math.exp(rapidity) * sqrt_s_hat / event.sqrt_s
        x2 = math.exp(-rapidity) * sqrt_s_hat / event.sqrt_s

        m3 = set_mass(m3, p3)
        m4 = set_mass(m4, p4)

        p_i1 = FourMomentum(x1 * event.sqrt_s / 2, 0.0, 0.0,
                            x1 * event.sqrt_s / 2)
        p_i2 = FourMomentum(x2 * event.sqrt_s / 2, 0.0, 0.0,
                            -x2 * event.sqrt_s / 2)
        p_vis = p_i1 + p_i2 - p_branches

        Rvis = (m2**2 - m1**2 + p_vis * p_vis) / 2
        R3 = p_vis * p3 - (s_13 - m1**2 - m3**2) / 2
        R4 = (s_24 - m2**2 - m4**2) / 2

        # Solved equations in terms of E2 from SymPy
        # p = E2*v_s + v_i <- vector equation
        PF = 1/(p3.x*p4.y*p_vis.z - p3.x*p4.z*p_vis.y - p3.y*p4.x*p_vis.z + \
            p3.y*p4.z*p_vis.x + p3.z*p4.x*p_vis.y - p3.z*p4.y*p_vis.x)
        v_i = PF*np.array([-(R3*p4.y*p_vis.z - R3*p4.z*p_vis.y - R4*p3.y*p_vis.z + \
                        R4*p3.z*p_vis.y + Rvis*p3.y*p4.z - Rvis*p3.z*p4.y),
                        (R3*p4.x*p_vis.z - R3*p4.z*p_vis.x - R4*p3.x*p_vis.z + \
                        R4*p3.z*p_vis.x + Rvis*p3.x*p4.z - Rvis*p3.z*p4.x),
                        -(R3*p4.x*p_vis.y - R3*p4.y*p_vis.x - R4*p3.x*p_vis.y + \
                        R4*p3.y*p_vis.x + Rvis*p3.x*p4.y - Rvis*p3.y*p4.x)],
                    dtype=np.float)
        v_s = PF*np.array([-(-p3.E*p4.y*p_vis.z + p3.E*p4.z*p_vis.y + p3.y*p4.E*p_vis.z - \
                        p3.y*p4.z*p_vis.E - p3.z*p4.E*p_vis.y + p3.z*p4.y*p_vis.E),
                        (-p3.E*p4.x*p_vis.z + p3.E*p4.z*p_vis.x + p3.x*p4.E*p_vis.z - \
                        p3.x*p4.z*p_vis.E - p3.z*p4.E*p_vis.x + p3.z*p4.x*p_vis.E),
                        -(-p3.E*p4.x*p_vis.y + p3.E*p4.y*p_vis.x + p3.x*p4.E*p_vis.y - \
                        p3.x*p4.y*p_vis.E - p3.y*p4.E*p_vis.x + p3.y*p4.x*p_vis.E)],
                    dtype=np.float)

        # Now the solutions can be taken from E2^2 = m2^2 + p2·p2
        # Using the symbols above, we have A*E2^2 + B*E2 + C = 0
        A = 1 - np.dot(v_s, v_s)
        B = -2 * (np.dot(v_s, v_i))
        C = -(m2**2 + np.dot(v_i, v_i))

        E2_sols = np.roots(np.nan_to_num([A, B, C]))
        E2_sols = remove_complex(E2_sols)

        p2_sols = [
            FourMomentum(E2_sol, *(E2_sol * v_s + v_i)) for E2_sol in E2_sols
        ]
        p1_sols = [p_vis - p2_sol for p2_sol in p2_sols]

        solutions = []
        for p1_sol, p2_sol in zip(p1_sols, p2_sols):
            if valid_energy(p1_sol.E) and valid_energy(p2_sol.E):
                output_event = deepcopy(event)
                output_event.set_momentum(particle_keys[0], p1_sol)
                output_event.set_momentum(particle_keys[1], p2_sol)
                output_event.x1 = x1
                output_event.x2 = x2
                solutions.append(output_event)

    return solutions