def get_immigration2(S, starting_age, ending_age, E):
    '''
    Parameters:
        S - Number of age cohorts
        starting age - initial age of cohorts

    Returns:
        im_array - S x 1 array of immigration rates for each
                   age cohort
        child_imm_rate - starting_age x 1 array of immigration
            rates for children
    '''
    imm_rate_condensed1 = get_immigration1(
        S, starting_age, ending_age, pop_2010, pop_2011, E)
    imm_rate_condensed2 = get_immigration1(
        S, starting_age, ending_age, pop_2011, pop_2012, E)
    imm_rate_condensed3 = get_immigration1(
        S, starting_age, ending_age, pop_2012, pop_2013, E)
    im_array = (
        imm_rate_condensed1 + imm_rate_condensed2 + imm_rate_condensed3) / 3.0
    poly_imm = poly.polyfit(np.linspace(
        1, ending_age, ending_age-1), im_array[:-1], deg=18)
    poly_imm_int = poly.polyint(poly_imm)
    child_imm_rate = poly.polyval(np.linspace(
        0, starting_age, E+1), poly_imm_int)
    imm_rate = poly.polyval(np.linspace(
        starting_age, ending_age, S+1), poly_imm_int)
    child_imm_rate = np.diff(child_imm_rate)
    imm_rate = np.diff(imm_rate)
    imm_rate[-1] = 0.0
    return imm_rate, child_imm_rate
def mobileye_callback(mobileye_pb):
    global nx, ny, local_seg_x, local_seg_y
    global left_marker_x, left_marker_y
    global right_marker_x, right_marker_y
    global local_smooth_seg_x, local_smooth_seg_y
    global local_smooth_seg_x2, local_smooth_seg_y2

    mobileye_provider.update(mobileye_pb)
    mobileye_provider.process_obstacles()

    vx = localization_provider.localization_pb.pose.position.x
    vy = localization_provider.localization_pb.pose.position.y
    heading = localization_provider.localization_pb.pose.heading

    local_seg_x, local_seg_y = routing_provider.get_local_segment(vx, vy,
                                                                  heading)

    local_smooth_seg_x, local_smooth_seg_y = \
        routing_provider.get_local_segment_spline(vx, vy, heading)

    left_marker_coef = mobileye_provider.left_lm_coef
    left_marker_x = []
    left_marker_y = []
    for x in range(int(mobileye_provider.left_lane_marker_range)):
        y = polyval(x, left_marker_coef)
        left_marker_x.append(x)
        left_marker_y.append(-y)

    right_marker_coef = mobileye_provider.right_lm_coef
    right_marker_x = []
    right_marker_y = []
    for x in range(int(mobileye_provider.right_lane_marker_range)):
        y = polyval(x, right_marker_coef)
        right_marker_x.append(x)
        right_marker_y.append(-y)
 def plot(self,U,V,ctr):
     X1=arange(min(U)-2,max(U)+2,0.1)
     x=[1]
     for i in range(len(U)):
         x=P.polymul(x,[-1*U[i],1])
     plt.axis([-10,10,min(V)-1,max(V)+1])
     b=[0]
     for i in range(len(U)):
         a=P.polydiv(x,[-1*U[i],1])
         b=P.polyadd(P.polymul((P.polydiv(a[0],P.polyval(U[i],a[0])))[0],[V[i]]),b)
         Y=P.polyval(X1,P.polymul((P.polydiv(a[0],P.polyval(U[i],a[0])))[0],[V[i]]))
         plt.plot(X1,Y,'y')
     plt.plot(U,V,'ro')
     X1=arange(-5,5,0.1)
     Y=P.polyval(X1,b)
     plt.plot(X1,Y,'b',label='Required Polynomial')
     plt.plot((-8,8),(0,0),'k')
     
     plt.grid(b=True, which='both', color='0.65',linestyle='-')
     Y=list(Y)
     plt.plot((0,0),(-max(Y)-5,max(Y)+5),'k')
     plt.xlabel('x-axis')
     plt.ylabel('y-axis')
     plt.legend(loc=1)
     filename = "this_plot"+str(ctr)+".png"
     path = "C:\\Users\HP\Anaconda3\static"
     fullpath = os.path.join(path, filename)
     plt.savefig(fullpath)
Exemple #4
0
def smoothing_poly_lnprior(poly, degree, xmin, xmax, gamma=1):
    """
    A smoothing prior that suppresses higher order derivatives of a polynomial,
    poly = a + b x + c x*x + ..., described by a vector of its coefficients,
    [a, b, c, ...].

    Functional form is:

    ln p(poly coeffs) =
      -gamma * integrate( (diff(poly(x), x, degree))^2, x, xmin, xmax)

    So it takes the `degree`th derivative of the polynomial, squares it,
    integrates that from xmin to xmax, and scales by -gamma.
    """
    # Take the `degree`th derivative of the polynomial.
    poly_diff = P.polyder(poly, m=degree)
    # Square the polynomial.
    poly_diff_sq = P.polypow(poly_diff, 2)
    # Take the indefinite integral of the polynomial.
    poly_int_indef = P.polyint(poly_diff_sq)
    # Evaluate the integral at xmin and xmax to get the definite integral.
    poly_int_def = (
        P.polyval(xmax, poly_int_indef) - P.polyval(xmin, poly_int_indef)
    )
    # Scale by -gamma to get the log prior
    lnp = -gamma * poly_int_def

    return lnp
Exemple #5
0
    def _transform(self, m):
        # Set model parameters
        alpha = self.slope
        sig1,sig2 = m[0],m[1]
        c = m[2:]
        if self.logSigma:
            sig1, sig2 = np.exp(sig1), np.exp(sig2)
        #2D
        if self.mesh.dim == 2:
            X = self.mesh.gridCC[:,0]
            Y = self.mesh.gridCC[:,1]
            if self.normal =='X':
                f = polynomial.polyval(Y, c) - X
            elif self.normal =='Y':
                f = polynomial.polyval(X, c) - Y
            else:
                raise(Exception("Input for normal = X or Y or Z"))
        #3D
        elif self.mesh.dim == 3: 
            X = self.mesh.gridCC[:,0]
            Y = self.mesh.gridCC[:,1]            
            Z = self.mesh.gridCC[:,2]            
            if self.normal =='X':
                f = polynomial.polyval2d(Y, Z, c.reshape((self.order[0]+1,self.order[1]+1))) - X
            elif self.normal =='Y':
                f = polynomial.polyval2d(X, Z, c.reshape((self.order[0]+1,self.order[1]+1))) - Y
            elif self.normal =='Z':
                f = polynomial.polyval2d(X, Y, c.reshape((self.order[0]+1,self.order[1]+1))) - Z
            else:
                raise(Exception("Input for normal = X or Y or Z"))
        else:
            raise(Exception("Only supports 2D"))
                    

        return sig1+(sig2-sig1)*(np.arctan(alpha*f)/np.pi+0.5)
Exemple #6
0
def fit_polynomial(data, ln_xray_property, deg, whatIsFit):
    """Fit a DEG-order polynomial in x, y space.

    numpy.polynomial.polinomial.polyfit() returns coefficients,
    from 0th order first to N-th order last (note that this is
    *opposite* from how np.polyfit behaves!).
    """
    radiuspackage = extrapolate_radius(data)

    r = radiuspackage[0]
    ln_r = radiuspackage[1]
    r_fine = radiuspackage[2]
    ln_r_fine = radiuspackage[4]

    print("Now fitting    |" + "  " + make_number_ordinal(deg) +
          " order polynomial to " + whatIsFit)

    coeffs = poly.polyfit(ln_r, ln_xray_property, deg)

    # polyval() is used to assemble cubic fit:
    # $p(x) = c_0 + c_1 x + c_2 x^2 + c3 x^3$
    # where c_n are the coeffs returned by polyfit()
    ln_fit = poly.polyval(ln_r, coeffs)
    fit = np.exp(ln_fit)

    # Now use these coefficients to extrapolate fit
    # across larger radius

    ln_fit_fine = poly.polyval(ln_r_fine, coeffs)
    fit_fine = np.exp(ln_fit_fine)

    fitpackage = (fit, r, fit_fine, r_fine, coeffs)

    return fitpackage
Exemple #7
0
def polynomial(ny, nx, order=2, nz=None):    
    coeffs = poly.polyfit(nx,ny,order)
    if nz is not None:
        continuum = poly.polyval(nz, coeffs)
    else:
        continuum = poly.polyval(nx, coeffs)
    return continuum
Exemple #8
0
def poly_solve(poly, tau, line, terminate):
    # Derivative
    if poly.size > 3 and np.abs(poly[-1]) < np.finfo(float).eps:
        poly[-1] = 0.0
    der = p.polyder(poly)
    # Stationary Points
    stat_x = p.polyroots(der)
    stat_f = p.polyval(stat_x, poly)
    stat_x_feasible = stat_x[terminate.feasible(line, stat_x, stat_f)]
    stat_f_feasible = stat_f[terminate.feasible(line, stat_x, stat_f)]
    # Bounds Extrema
    bound_x = tau
    bound_f = p.polyval(bound_x, poly)
    bound_x_feasible = bound_x[terminate.feasible(line, bound_x, bound_f)]
    bound_f_feasible = bound_f[terminate.feasible(line, bound_x, bound_f)]
    # Termination Extrema
    term_x = terminate.extrema(line, poly)
    if term_x.size > 0:
        term_f = p.polyval(term_x, poly)
    else:
        term_f = np.array([])
    # Combine
    comb_x = np.concatenate((stat_x_feasible, bound_x_feasible, term_x))
    comb_f = np.concatenate((stat_f_feasible, bound_f_feasible, term_f))
    # Bounds Filter
    comb_final_x = comb_x[np.logical_and(bound_x[0] <= comb_x, comb_x <= bound_x[1])]
    comb_final_f = comb_f[np.logical_and(bound_x[0] <= comb_x, comb_x <= bound_x[1])]
    # Return Result
    if comb_final_x.size == 0:
        return None
    else:
        return comb_final_x[np.argmin(comb_final_f)]
Exemple #9
0
 def plot(x_values,y_values,polylist,result):                
     fig=plt.figure()
     Color=["b","g","r","y","m"]                                         
     Legend=[]                                                           
     for i in range(len(polylist)):                                 
         x=list(arange(min(x_values)-1,max(x_values)+1,0.01))
         y=list(map(lambda num:P.polyval(num,polylist[i]),x))
         plt.plot(x,y,linewidth=2.0,color=Color[i%5])
         Legend.append(mpatches.Patch(color=Color[i%5],label="Polynomial "+str(i+1)))    
     x=list(arange(min(x_values)-1,max(x_values)+1,0.01))
     y=list(map(lambda num:P.polyval(num,array(result)),x))
     plt.plot(x,y,linewidth=3.0,color="k")                               
     Legend.append(mpatches.Patch(color="k",label="Final polynomial"))   
     x=x_values
     y=list(map(lambda num:P.polyval(num,array(result)),x))         
     plt.plot(x,y,"o",color="c")                                         
     plt.axvline(0,color="k")
     plt.axhline(0,color="k")
     plt.xlabel(" x values ")
     plt.ylabel("f(x) values")
     plt.legend(handles=Legend)
     dir=sys.path[0]
     dir+="\\app\\static\\graph.png" 
     if os.path.exists(dir):
         os.remove(dir)
         plt.savefig(dir, format="png", dpi=fig.dpi)
     else:             
         plt.savefig(dir, format="png", dpi=fig.dpi)                                                                                         
def integrate(func, points):
    params_guess = [1, 1]
    a, b = opt.fsolve(fit_exp_right, params_guess, args=([40, poly.polyval(40, func)], [49.5, .0007]))
    func_int = poly.polyint(func)
    integral = np.empty(points.shape)
    integral[points <= 40] = poly.polyval(points[points <= 40], func_int)
    integral[points > 40] = poly.polyval(40, func_int) + exp_int(points[points > 40], a, b)
    return np.diff(integral)
 def test_spec_calib(self):
     """
     Check that the calibration of the wavelength make _some_ sense
     It's not expected that the calibration is correct, but it should be at
     least some how logical.
     """
     # the wavelength bandwidth across the CCD should be pretty much constant
     # independent of the resolution (not exactly, as the wavelength is for
     # the center of the pixel, so the bigger are the pixels, the closer are
     # the centers) 
     
     # horizontal maximum res/min binning
     binning = (self.spectrometer.binning.range[0][0], # min
                self.spectrometer.binning.range[1][1]) # max
     self.spectrometer.binning.value = binning
     res = self.spectrometer.resolution.range[1] # max
     self.spectrometer.resolution.value = res
     res = self.spectrometer.resolution.value # actual value
     
     # read calibration
     data = self.spectrometer.data.get()
     pn = data.metadata[model.MD_WL_POLYNOMIAL]
     if len(pn) <= 1:
         logging.warning("Wavelength polynomial is of very low quality: length = %d", len(pn))
     # pixel 0 to pixel N +1 => whole CCD
     wl_bw_max_res =  polynomial.polyval(res[0], pn) - polynomial.polyval(0, pn)
     cwl_max_res = (polynomial.polyval(0, pn) + polynomial.polyval(res[0]-1, pn)) / 2
     logging.info("Wl bw = %f nm, center = %f nm", 
                  wl_bw_max_res * 1e9, cwl_max_res * 1e9)
     
     cwl_max_res_s = (polynomial.polyval(res[0]//2, pn) + 
                      polynomial.polyval(math.ceil(res[0]/2), pn)) / 2
     
     # do they make any sense?
     # should be a monotonic function
     self.assertTrue(cwl_max_res / 1.1 < cwl_max_res_s and cwl_max_res_s < cwl_max_res * 1.1)
     # centre wavelength should about (~30%) the same as the wavelength position
     exp_cwl = self.spectrograph.position.value["wavelength"] 
     self.assertTrue(exp_cwl / 1.3 < cwl_max_res and cwl_max_res < exp_cwl * 1.3)
     # never heard of bandwidth higher than a few 1000 nm
     self.assertGreater(wl_bw_max_res, 0)
     self.assertLess(wl_bw_max_res, 10000e-9)
     
     # 8 times smaller resolution
     binning = (min(binning[0] * 8, self.spectrometer.binning.range[1][0]),
                binning[1])
     self.spectrometer.binning.value = binning
     res = self.spectrometer.resolution.value # new resolution
     
     # read calibration
     data = self.spectrometer.data.get()
     pn = data.metadata[model.MD_WL_POLYNOMIAL]
     # pixel 0 to pixel N +1 => whole CCD
     wl_bw_low_res = polynomial.polyval(res[0], pn) - polynomial.polyval(0, pn)
     cwl_low_res = (polynomial.polyval(0, pn) + polynomial.polyval(res[0]-1, pn)) / 2
     
     self.assertAlmostEqual(wl_bw_low_res, wl_bw_max_res, 2)
     self.assertAlmostEqual(cwl_low_res, cwl_max_res)
def current_latlon():
    point = {}
    point['lat'] = lat
    point['lon'] = lon
    points = [point]

    utm_vehicle_x, utm_vehicle_y = projector(lon, lat)

    right_lane = []
    left_lane = []
    if mobileye_pb is not None:
        rc0 = mobileye_pb.lka_768.position
        rc1 = mobileye_pb.lka_769.heading_angle
        rc2 = mobileye_pb.lka_768.curvature
        rc3 = mobileye_pb.lka_768.curvature_derivative
        right_lane_marker_range = mobileye_pb.lka_769.view_range
        right_lane_marker_coef = [rc0, rc1, rc2, rc3]

        for x in range(int(right_lane_marker_range)):
            y = -1 * polyval(x, right_lane_marker_coef)
            newx = x * math.cos(heading) - y * math.sin(heading)
            newy = y * math.cos(heading) + x * math.sin(heading)
            plon, plat = projector(utm_vehicle_x + newx, utm_vehicle_y + newy,
                                   inverse=True)
            right_lane.append({'lat': plat, 'lng': plon})
        # print right_lane

        lc0 = mobileye_pb.lka_766.position
        lc1 = mobileye_pb.lka_767.heading_angle
        lc2 = mobileye_pb.lka_766.curvature
        lc3 = mobileye_pb.lka_766.curvature_derivative
        left_lane_marker_range = mobileye_pb.lka_767.view_range
        left_lane_marker_coef = [lc0, lc1, lc2, lc3]

        for x in range(int(left_lane_marker_range)):
            y = -1 * polyval(x, left_lane_marker_coef)
            newx = x * math.cos(heading) - y * math.sin(heading)
            newy = y * math.cos(heading) + x * math.sin(heading)
            plon, plat = projector(utm_vehicle_x + newx, utm_vehicle_y + newy,
                                   inverse=True)
            left_lane.append({'lat': plat, 'lng': plon})
    points.append(right_lane)
    points.append(left_lane)

    planned_path = []
    if planning_pb is not None:
        for traj_point in planning_pb.trajectory_point:
            x = traj_point.path_point.x
            y = traj_point.path_point.y
            newx = x * math.cos(heading) - y * math.sin(heading)
            newy = y * math.cos(heading) + x * math.sin(heading)
            plon, plat = projector(utm_vehicle_x + newx, utm_vehicle_y + newy,
                                   inverse=True)
            planned_path.append({'lat': plat, 'lng': plon})
    points.append(planned_path)

    return jsonify(points)
def mobileye_callback(mobileye_pb):
    global nx, ny, local_seg_x, local_seg_y
    global left_marker_x, left_marker_y
    global right_marker_x, right_marker_y
    global local_smooth_seg_x, local_smooth_seg_y
    global history_x, history_y
    mobileye_provider.update(mobileye_pb)
    mobileye_provider.process_obstacles()

    if localization_provider.localization_pb is None:
        return

    vx = localization_provider.localization_pb.pose.position.x
    vy = localization_provider.localization_pb.pose.position.y
    heading = localization_provider.localization_pb.pose.heading
    speed = chassis_provider.get_speed_mps()
    mobileye_provider.process_history(heading, speed)

    hist_x = []
    hist_y = []
    for line in mobileye_provider.history_left_lines:
        if line is None:
            continue
        x = []
        y = []
        for p in line.coords:
            x.append(p[0])
            y.append(-p[1])
        hist_x.append(x)
        hist_y.append(y)
    history_x = hist_x
    history_y = hist_y

    local_seg_x, local_seg_y = routing_provider.get_local_segment(vx, vy,
                                                                  heading)

    local_smooth_seg_x, local_smooth_seg_y = routing_provider.get_local_segment_spline(
        vx, vy, heading)

    left_marker_coef = mobileye_provider.left_lm_coef
    left_marker_x = []
    left_marker_y = []
    for x in range(int(mobileye_provider.left_lane_marker_range)):
        y = polyval(x, left_marker_coef)
        left_marker_x.append(x)
        left_marker_y.append(-y)

    right_marker_coef = mobileye_provider.right_lm_coef
    right_marker_x = []
    right_marker_y = []
    for x in range(int(mobileye_provider.right_lane_marker_range)):
        y = polyval(x, right_marker_coef)
        right_marker_x.append(x)
        right_marker_y.append(-y)
Exemple #14
0
    def deriv(self, m, v=None):
        alpha = self.slope
        sig1, sig2, c = m[0], m[1], m[2:]
        if self.logSigma:
            sig1, sig2 = np.exp(sig1), np.exp(sig2)

        # 2D
        if self.mesh.dim == 2:
            X = self.mesh.gridCC[self.actInd, 0]
            Y = self.mesh.gridCC[self.actInd, 1]

            if self.normal == 'X':
                f = polynomial.polyval(Y, c) - X
                V = polynomial.polyvander(Y, len(c)-1)
            elif self.normal == 'Y':
                f = polynomial.polyval(X, c) - Y
                V = polynomial.polyvander(X, len(c)-1)
            else:
                raise(Exception("Input for normal = X or Y or Z"))

        # 3D
        elif self.mesh.dim == 3:
            X = self.mesh.gridCC[self.actInd, 0]
            Y = self.mesh.gridCC[self.actInd, 1]
            Z = self.mesh.gridCC[self.actInd, 2]

            if self.normal == 'X':
                f = (polynomial.polyval2d(Y, Z, c.reshape((self.order[0]+1,
                     self.order[1]+1))) - X)
                V = polynomial.polyvander2d(Y, Z, self.order)
            elif self.normal == 'Y':
                f = (polynomial.polyval2d(X, Z, c.reshape((self.order[0]+1,
                     self.order[1]+1))) - Y)
                V = polynomial.polyvander2d(X, Z, self.order)
            elif self.normal == 'Z':
                f = (polynomial.polyval2d(X, Y, c.reshape((self.order[0]+1,
                     self.order[1]+1))) - Z)
                V = polynomial.polyvander2d(X, Y, self.order)
            else:
                raise(Exception("Input for normal = X or Y or Z"))

        if self.logSigma:
            g1 = -(np.arctan(alpha*f)/np.pi + 0.5)*sig1 + sig1
            g2 = (np.arctan(alpha*f)/np.pi + 0.5)*sig2
        else:
            g1 = -(np.arctan(alpha*f)/np.pi + 0.5) + 1.0
            g2 = (np.arctan(alpha*f)/np.pi + 0.5)

        g3 = Utils.sdiag(alpha*(sig2-sig1)/(1.+(alpha*f)**2)/np.pi)*V

        if v is not None:
            return sp.csr_matrix(np.c_[g1, g2, g3]) * v
        return sp.csr_matrix(np.c_[g1, g2, g3])
Exemple #15
0
 def __linfit(self,bottom,test):
     cv = 299792.458
     fit   = pol.polyfit(self.group.lmbd[self.idx[bottom]],self.data[:,bottom].T,2)
     a,b,c = fit[2,:],fit[1,:],fit[0,:]
     lmin  = -b/(2*a)
     bot   = pol.polyval(lmin,fit,tensor=False)
     pred  = pol.polyval(self.group.lmbd[test],fit)
     vel   = cv*(lmin-self.cent)/self.cent
     # Error of fit with extra error term to penalize fits 
     # that gets wildly off center, with extra weight so it *hurts*
     err   = np.sqrt( np.mean( (self.data[:,test]-pred)**2,axis=1) 
                                      + 2*(lmin-self.cent)**2 )
     return vel,bot,err
Exemple #16
0
def integrate(func, points, j):
    params_guess = [1, 1]
    # fit_to = j/2.0
    fit_to = poly.polyval(70, func) * .5
    a, b = opt.fsolve(fit_exp_right, params_guess, args=(
        [70, poly.polyval(70, func)], [100, fit_to]))
    func_int = poly.polyint(func)
    integral = np.empty(points.shape)
    integral[points <= 70] = poly.polyval(points[points <= 70], func_int)
    integral[points > 70] = poly.polyval(70, func_int) + exp_int(
        points[points > 70], a, b)
    vals = np.diff(integral)
    # vals[50:] = np.ones(30) * vals[50]
    return vals
Exemple #17
0
 def test_polyvander(self) :
     # check for 1d x
     x = np.arange(3)
     v = poly.polyvander(x, 3)
     assert_(v.shape == (3,4))
     for i in range(4) :
         coef = [0]*i + [1]
         assert_almost_equal(v[...,i], poly.polyval(x, coef))
     # check for 2d x
     x = np.array([[1,2],[3,4],[5,6]])
     v = poly.polyvander(x, 3)
     assert_(v.shape == (3,2,4))
     for i in range(4) :
         coef = [0]*i + [1]
         assert_almost_equal(v[...,i], poly.polyval(x, coef))
Exemple #18
0
def evalpolyfit(coefs, num_points=100, spacing=None, lengths=None):
    """
    Evaluate the polynomial fit at discrete points.
    There are four options for constructing the spline:
    1) Same number of points, variable lengths, variable spacing
    => must specify only num_points (default)
    2) Variable number of points, variable lengths, same spacing
    => must specify only spacing
    3) Same number of points, same lengths, same spacing
    => must specify length and either spacing or num_points
    
    args:
        coefs (dict): dict of coefficients (output of makepolyfit)
    
    kwargs:
        num_points (int): number of points in final midline
        
        spacing (int): spacing between points
        
        lengths (dict): dict of desired lengths of each midline
    
    returns:
        nosedist (dict): dict of uniformly-sampled distances from nose to tail
        
        nosetail (dict): dict of midlines evaluated using polynomial fit at 
        each point in the output nosedist
    
    """
    nosedist, nosetail = {}, {}
    for k, (cx, cy) in coefs.iteritems():
        try:
            # Determine spacing, number of points and length based on inputs
            start, stop = 0.0, lengths[i]
            if spacing:
                uu = np.arange(start, stop, spacing)
            else:
                uu = np.linspace(start, stop, num_points)

            # Construct polynomial from its coefficients
            vx = polyval(uu, cx)
            vy = polyval(uu, cy)
            vv = np.asarray(zip(vx, vy))

            nosedist[k] = uu
            nosetail[k] = vv
        except:
            pass
    return nosedist, nosetail
Exemple #19
0
    def __call__(self, i, x, p = 0):
        """
        k is degree is 0+
        0 <= i < len(t)
        t[0] <= x <= t[-1]
        p < k
        """
        t = self._t
        jj = np.where(np.logical_and(x >= t[:-1], x < t[1:]))[0]
        if len(jj) == 0:
            if x == t[-1]:
                j = self._n + self._k - 2
            else:
                return 0
        else:
            j = jj[0]

        if self._derivatives:
            k = self._k - p
            a = self._a[i, j, k, :k+1]
        else:
            a = self._a[i, j, -1]
            if p > 0:
                a = polyder(a, p)

        return polyval(x, a)
Exemple #20
0
def density_kovacs82(d0, z):
    """Convert depth into firn density from Kovacs [1982]
    measurements over the brine of McMurdo Ice Shelf

    Arguments
    ---------
    d0 : float
        first polynom equals the surface density [kg^m{-3}]
    z : array of floats
        Depth [m]
    """ 

    p = {'B': [d0, 2.79e1, 0, 0, 0, 0, 0],
         'C': [d0, 2.21e1, -3.09e-1, 0, 0, 0, 0],
         'D': [d0, 2.38e1, -5.11e-1, 4.62e-3, 0, 0, 0],
         'E': [d0, 3.36e1, -1.89e00, 7.83e-2, -1.85e-3, 2.24e-5, -1.07e-7],
         'F': [d0, 3.57e1, -2.15e00, 8.77e-2, -1.94e-3, 2.15e-5, -9.54e-8]}
    p = pd.DataFrame(p)

    # Density caclcuation
    dns = {'B': z*0, 'C': z*0, 'D': z*0, 'E': z*0, 'F': z*0}
    dns = pd.DataFrame(dns)

    for i, val in enumerate(z):
        dns.ix[i] = polyval(val,p.ix[:])
    dns[dns > 917] = 917.
    return dns
Exemple #21
0
    def generateJSON(self, data, wlcalib, original_images):
        from numpy.polynomial.polynomial import polyval

        self.logger.info('start JSON generation')

        result = {}
        counter = 0

        for image in data:
            name = original_images[counter].filename
            result[name] = {}
            for fiber, value in image.items():
                result[name][fiber] = []
                for arco in value:
                    try:
                        res = polyval(arco[0], wlcalib[fiber])
                        result[name][fiber].append(
                            [arco[0], arco[1], arco[2], res])
                    except:
                        self.logger.error('Error in JSON generation. Check later...')
            counter += 1

        self.logger.info('end JSON generation')

        return result
def fit(xcoords_fit, ycoords_fit, dat_fit): #does fit heavy lifting
    x_fit = np.linspace(xcoords_fit[0], xcoords_fit[-1], 50)
    np.concatenate([xcoords_fit, x_fit])
    dat_fit = int(dat_fit)
    coefs = ply.polyfit(xcoords_fit, ycoords_fit, dat_fit, w=np.divide(1, yerrors_no_ul))
    y_fit = ply.polyval(x_fit, coefs)
    return (x_fit, y_fit, coefs)
def get_fert(S, starting_age, ending_age, E):
    '''
    Parameters:
        S - Number of age cohorts
        starting age - initial age of cohorts

    Returns:
        fert_rate - Sx1 array of fertility rates for each
            age cohort
        children_fertrate  - starting_age x 1 array of zeros, to be
            used in get_omega()
    '''
    # Fit a polynomial to the fertility rates
    poly_fert = poly.polyfit(age_midpoint, fert_data, deg=4)
    fert_rate = integrate(poly_fert, np.linspace(
        starting_age, ending_age, S+1))
    fert_rate /= 2.0
    children_fertrate_int = poly.polyint(poly_fert)
    children_fertrate_int = poly.polyval(np.linspace(
        0, starting_age, E + 1), children_fertrate_int)
    children_fertrate = np.diff(children_fertrate_int)
    children_fertrate /= 2.0
    children_fertrate[children_fertrate < 0] = 0
    children_fertrate[:int(10*S/float(ending_age-starting_age))] = 0
    return fert_rate, children_fertrate
Exemple #24
0
    def generate_focus_wl(self, all_measures, wlcalib):


        self.logger.info('start result generation')

        result = {}

        wlfib = {}
        for s in wlcalib.contents:
            wlfib[s.fibid] = s.solution

        for focus, image in all_measures.items():
            cresult = {}
            result[focus] = cresult
            for fiber, value in image.items():
                cresult[fiber] = []
                for arco in value:
                    try:
                        # FIXME: hardcoded sizes
                        x = 2048 * 2 - arco[0]
                        res = polynomial.polyval(x, wlfib[fiber].coeff)
                        cresult[fiber].append([arco[0], arco[1], arco[2], res])
                    except KeyError:
                        self.logger.warning("Fiber %d hasn't WL calibration, skipping", fiber)

        self.logger.info('end result generation')

        return result
def vary_gauss(a, sig=1, verbose=False):
    n = len(a)
    b = np.empty_like(a)

    if np.isscalar(sig):
        sig *= np.arange(n)
    elif isinstance(sig, tuple):
        sig = poly.polyval(np.arange(n), sig)
    elif callable(sig):
        sig = sig(np.arange(n))
    elif hasattr(sig, '__getitem__'):
        assert len(a) == len(sig)
    else: raise TypeError('`sig` is neither callable nor arraylike')

    for i, s in enumerate(sig):
        # build the kernel:
        w = round(2*s) # kernel half-width, must be integer
        if s == 0: s = 1
        k = np.arange(-w, w+1, dtype=float)
        k = np.exp(-.5 * k**2 / s**2)

        # slice the array (min/max prevent going past ends)
        al = max(i - w,     0)
        ar = min(i + w + 1, n)
        ao = a[al:ar]

        # and the kernel
        kl = max(w - i,     0)
        kr = min(w - i + n, 2*w+1)
        ko = k[kl:kr]
        b[i] = np.dot(ao, ko)/ko.sum()

    return b
Exemple #26
0
    def __init__(self, poly_degree=7, numPoints = 300, numTrain = None, h5=None):
        if h5 is not None:
            self.h5read(h5)
            return
        assert poly_degree > 0, "must have at least 1 root"
        if numTrain is None:
            numTrain = max(1,int(.1 * numPoints))
        assert 3*numTrain < numPoints, "numTrain must be < 3*numPoints to produce test/train/cv sets"
        # make a polynomial with the given number of roots, but no root at 0
        poly_roots = list(np.arange(poly_degree + 1) - int(poly_degree//2))
        poly_roots.remove(0)
        poly_roots = np.array(poly_roots)
        poly_coeffs = polynomial.polyfromroots(poly_roots)

        self.x_all = vec2columnMat(np.linspace(start = poly_roots[0],
                                                   stop =  poly_roots[-1],
                                                   num = 300))
        self.y_all = polynomial.polyval(self.x_all[:], poly_coeffs)
        inds = range(len(self.x_all))
        random.shuffle(inds)
        self.x_train = vec2columnMat(self.x_all[:][inds[0:numTrain]])
        self.y_train = vec2columnMat(self.y_all[:][inds[0:numTrain]])
        self.x_test = vec2columnMat(self.x_all[:][inds[numTrain:(2*numTrain)]])
        self.y_test = vec2columnMat(self.y_all[:][inds[numTrain:(2*numTrain)]])
        self.x_cv = vec2columnMat(self.x_all[:][inds[(2*numTrain):(3*numTrain)]])
        self.y_cv = vec2columnMat(self.y_all[:][inds[(2*numTrain):(3*numTrain)]])
    def work(self, fig=None, ax=None):
        """Draw the polynomial fit on matplotlib figure or axis

        Parameters:
        -----------
        fig: matplotlib figure
        ax: matplotlib axis

        Returns:
        --------
        a tuple with figure and axis objects
        """
        if ax is None:
            if fig is None:
                return fig, ax
            else:
                ax = fig.gca()
        from numpy.polynomial.polynomial import polyfit
        from numpy.polynomial.polynomial import polyval
        x = self.data[self.aes['x']]
        y = self.data[self.aes['y']]
        min_x = min(x)
        max_x = max(x)
        c = polyfit(x, y, self.degree)
        x_ = np.linspace(min_x, max_x, len(x))
        y_ = polyval(x_, c)
        ax.plot(x_, y_, lw=self.lw, c=self.colour)
        return fig, ax
Exemple #28
0
 def calc_freq(self, time):
     """
     time: MJD object
     """
     dt = float(MJD(time)-self.tmid)*1440.
     multy = np.arange(1, self.ncoeff, 1)
     return self.profile.f0 + (1./60.)*polyval(dt, multy*self.polycos[1:])
def fluxesFromIm(image, xstart=86., xend=540., ycoord=234., radius=1.5):
    ycents = np.concatenate((np.repeat(ycoord+1,50),np.repeat(ycoord,70)))
    xcents = np.linspace(xstart, xend, nWgsOP)
    xpx=range(len(image[0,:]))    
    ypx=range(len(image[:,0]))
    xinds,yinds=np.meshgrid(xpx,ypx)
    #pdb.set_trace()

    image = np.squeeze(polyval(image, nonLinCoeffs)) #Non-linear correction
    
    if False: #showPlots: <- This is too slow.
    #if showPlots: #<- This is too slow.
        plt.figure(2)
        #plt.clf()
        plt.imshow(image, interpolation='nearest')
        plt.plot(xcents,ycents,'+r',linewidth=1)
        circles(xcents, ycents, radius, facecolor='none',edgecolor='r')
        plt.pause(0.01)
        
    fluxes = np.zeros(nWgsOP)
    for wg in range(nWgsOP):  
        circleInds = np.sqrt( (xinds-xcents[wg])**2 + (yinds-ycents[wg])**2 ) < radius
        fluxes[wg]=np.average(image[circleInds])

    return fluxes
Exemple #30
0
    def Lagrange(L,M):                                                               
        polylist=[]
        n=len(L)                                                           
        w=(-1*L[0],1)                                                      
        for i in range(1,n):
            w=P.polymul(w,(-1*L[i],1))                                    
        result=array([0.0 for i in range(len(w)-1)])                    
        derivative=P.polyder(w)                                             
        for i in range(n):
            polylist.append((P.polydiv(w,(-1*L[i],1))[0]*M[i])/P.polyval(L[i],derivative))
            result+=polylist[-1]   

        polynomial=""                                                  
        for i in range(len(result)-1,0,-1):                                 
            if(result[i]!=0):
                if(result[i]>0 and i!=(len(result)-1)):
                    polynomial+=" + "+str(result[i])+"x^"+str(i)+" "
                elif(result[i]>0 and i==(len(result)-1)):
                    polynomial+=str(result[i])+"x^"+str(i)+" "
                else:
                    polynomial+=" - "+str(-1*result[i])+"x^"+str(i)+" "
        if(result[0]!=0):
            polynomial+=" + "+str(result[0]) if result[0]>0 else " - "+str(-1*result[0])
        plot(L,M,polylist,result)
        return (polynomial)
Exemple #31
0
def eval_poly(x, coef):
    return polynomial.polyval(x, coef)
Exemple #32
0
p_volt = p_data[:, 0]
p_count = p_data[:, 1]
h_volt = h_data[:, 0]
h_height = h_data[:, 1]

# get plateau region data
p_region_v = []
p_region_r = []
for v, r in zip(p_volt, p_count):
    if 250 < r < 350:
        p_region_v += [v]
        p_region_r += [r]

p_coeff = poly.polyfit(p_region_v, p_region_r, 1)
p_v_fit = np.linspace(p_region_v[0], p_region_v[-1], 10)
p_r_fit = poly.polyval(p_v_fit, p_coeff)
v_f, v_i = p_v_fit[-1], p_v_fit[0]
r_f, r_i = p_r_fit[-1], p_r_fit[0]
operating_v = (v_f + v_i) / 2
p_slope = 100 * 100 * (r_f - r_i) / (r_i * (v_f - v_i))
# print((r_f-r_i)/r_i,r_i,r_f,operating_v, p_slope)

plt.plot(p_volt, p_count, "o")
plt.plot(p_v_fit, p_r_fit)
plt.title("Plateau Characteristics of a G.M Counter")
plt.xlabel("Baseline Voltage(Volt)$\longrightarrow$")
plt.ylabel("Count /20s$\longrightarrow$")
plt.text(v_i - 50, r_i, "($V_1,R_1$)")
plt.annotate("($V_2,R_2$)", (v_f, r_f))
plt.text(
    550,
    def test_polyvalfromroots(self):
        # check exception for broadcasting x values over root array with
        # too few dimensions
        assert_raises(ValueError,
                      poly.polyvalfromroots, [1], [1],
                      tensor=False)

        # check empty input
        assert_equal(poly.polyvalfromroots([], [1]).size, 0)
        assert_(poly.polyvalfromroots([], [1]).shape == (0, ))

        # check empty input + multidimensional roots
        assert_equal(poly.polyvalfromroots([], [[1] * 5]).size, 0)
        assert_(poly.polyvalfromroots([], [[1] * 5]).shape == (5, 0))

        # check scalar input
        assert_equal(poly.polyvalfromroots(1, 1), 0)
        assert_(poly.polyvalfromroots(1, np.ones((3, 3))).shape == (3, ))

        # check normal input)
        x = np.linspace(-1, 1)
        y = [x**i for i in range(5)]
        for i in range(1, 5):
            tgt = y[i]
            res = poly.polyvalfromroots(x, [0] * i)
            assert_almost_equal(res, tgt)
        tgt = x * (x - 1) * (x + 1)
        res = poly.polyvalfromroots(x, [-1, 0, 1])
        assert_almost_equal(res, tgt)

        # check that shape is preserved
        for i in range(3):
            dims = [2] * i
            x = np.zeros(dims)
            assert_equal(poly.polyvalfromroots(x, [1]).shape, dims)
            assert_equal(poly.polyvalfromroots(x, [1, 0]).shape, dims)
            assert_equal(poly.polyvalfromroots(x, [1, 0, 0]).shape, dims)

        # check compatibility with factorization
        ptest = [15, 2, -16, -2, 1]
        r = poly.polyroots(ptest)
        x = np.linspace(-1, 1)
        assert_almost_equal(poly.polyval(x, ptest),
                            poly.polyvalfromroots(x, r))

        # check multidimensional arrays of roots and values
        # check tensor=False
        rshape = (3, 5)
        x = np.arange(-3, 2)
        r = np.random.randint(-5, 5, size=rshape)
        res = poly.polyvalfromroots(x, r, tensor=False)
        tgt = np.empty(r.shape[1:])
        for ii in range(tgt.size):
            tgt[ii] = poly.polyvalfromroots(x[ii], r[:, ii])
        assert_equal(res, tgt)

        # check tensor=True
        x = np.vstack([x, 2 * x])
        res = poly.polyvalfromroots(x, r, tensor=True)
        tgt = np.empty(r.shape[1:] + x.shape)
        for ii in range(r.shape[1]):
            for jj in range(x.shape[0]):
                tgt[ii, jj, :] = poly.polyvalfromroots(x[jj], r[:, ii])
        assert_equal(res, tgt)
Exemple #34
0
 def model(self, pars, mdata):
     mdata += npoly.polyval(self.x, pars)
Exemple #35
0
print volumes, energies

coefs = poly.polyfit(OPTIONS, energies, 3)

C = coefs[2] / volumes[0] / kJ * 1.0e24

print C

save(result, C)

x_new = np.linspace(OPTIONS[0] - 0.01,
                    OPTIONS[-1] + 0.01,
                    num=len(OPTIONS) * 10)

ffit = poly.polyval(x_new, coefs)

plt.scatter(OPTIONS, energies)
plt.plot(x_new, ffit)
plt.savefig('{0}.png'.format(name))

os.system('mv {0}.png {1}/graph'.format(name, temp_dir))

save(result, OPTIONS)
save(result, volumes)
save(result, energies)

save(result, '------------------------')

save(result_sum, '{0}, {1}, {2}, {3}'.format(name, C, volumes, energies))
Exemple #36
0
def towards_zero(z0, dz):
    return abs2(npp.polyval(z0 + dz, f_as_poly)) < abs2(
        npp.polyval(z0, f_as_poly))
Exemple #37
0
 def update_animation(self, frame_i):
     for i in range(len(self.nodes)):
         if i in self.locked:
             continue
         angle = poly.polyval(frame_i/self.n_frames, self.node_poly[i])
         self.nodes[i].r = Rotation.from_euler('zxy', angle)
Exemple #38
0
def create_rotations(rotation_table):
    """
    Convert an ISIS rotation table into rotation objects.

    Parameters
    ----------
    rotation_table : dict
                     The rotation ISIS table as a dictionary

    Returns
    -------
    : list
      A list of time dependent or constant rotation objects from the table. This
      list will always have either 1 or 2 elements. The first rotation will be
      time dependent and the second rotation will be constant. The rotations will
      be ordered such that the reference frame the first rotation rotates to is
      the reference frame the second rotation rotates from.
    """
    rotations = []
    root_frame = rotation_table['TimeDependentFrames'][-1]
    last_time_dep_frame = rotation_table['TimeDependentFrames'][0]
    # Case 1: It's a table of quaternions and times
    if 'J2000Q0' in rotation_table:
        # SPICE quaternions are (W, X, Y, Z) and ALE uses (X, Y, Z, W).
        quats = np.array([rotation_table['J2000Q1'],
                          rotation_table['J2000Q2'],
                          rotation_table['J2000Q3'],
                          rotation_table['J2000Q0']]).T
        if 'AV1' in rotation_table:
            av = np.array([rotation_table['AV1'],
                           rotation_table['AV2'],
                           rotation_table['AV3']]).T
        else:
            av = None
        time_dep_rot = TimeDependentRotation(quats,
                                             rotation_table['ET'],
                                             root_frame,
                                             last_time_dep_frame,
                                             av=av)
        rotations.append(time_dep_rot)
    # Case 2: It's a table of Euler angle coefficients
    elif 'J2000Ang1' in rotation_table:
        ephemeris_times = np.linspace(rotation_table['CkTableStartTime'],
                                      rotation_table['CkTableEndTime'],
                                      rotation_table['CkTableOriginalSize'])
        base_time = rotation_table['J2000Ang1'][-1]
        time_scale = rotation_table['J2000Ang2'][-1]
        scaled_times = (ephemeris_times - base_time) / time_scale
        coeffs = np.array([rotation_table['J2000Ang1'][:-1],
                           rotation_table['J2000Ang2'][:-1],
                           rotation_table['J2000Ang3'][:-1]]).T
        angles = polyval(scaled_times, coeffs).T
        # ISIS is hard coded to ZXZ (313) Euler angle axis order.
        # SPICE also interprets Euler angle rotations as negative rotations,
        # so negate them before passing to scipy.
        time_dep_rot = TimeDependentRotation.from_euler('zxz',
                                                        -angles,
                                                        ephemeris_times,
                                                        root_frame,
                                                        last_time_dep_frame)
        rotations.append(time_dep_rot)

    if 'ConstantRotation' in rotation_table:
        last_constant_frame = rotation_table['ConstantFrames'][0]
        rot_mat =  np.reshape(np.array(rotation_table['ConstantRotation']), (3, 3))
        constant_rot = ConstantRotation.from_matrix(rot_mat,
                                                    last_time_dep_frame,
                                                    last_constant_frame)
        rotations.append(constant_rot)
    return rotations
Exemple #39
0

#---------------------Runge function------------------------------------------
def f(x):
    return 1.0 / (1.0 + 25.0 * x**2)


# Evenly spaced interpolation nodes:
x_grid = [-1, 1]
x1 = ny.linspace(x_grid[0], x_grid[1], 11)
y = f(x1)
xs = ny.linspace(x_grid[0], x_grid[1], 101)

#Order 3:
runge3_coef = poly.polyfit(x1, y, 3)
runge3_f = poly.polyval(xs, runge3_coef)

#Order 5:
runge5_coef = poly.polyfit(x1, y, 5)
runge5_f = poly.polyval(xs, runge5_coef)

#Order 10:
runge10_coef = poly.polyfit(x1, y, 10)
runge10_f = poly.polyval(xs, runge10_coef)


#------------------------Exponential Function----------------------------------
def g(x):
    with ny.errstate(divide='ignore', invalid='ignore'):
        return ny.exp(1 / x)
Exemple #40
0
def h(s, b, a):
    z = polyval(s, b[::-1]) / polyval(s, a[::-1])
    return z
Exemple #41
0
def calc_moon(t):
    """
    Lunar position model ELP2000-82 of (Chapront-Touze' and Chapront, 1983, 124, 50)

    This is the simplified version of Jean Meeus, Astronomical Algorithms,
    second edition, 1998, Willmann-Bell. Meeus claims approximate accuracy of 10"
    in longitude and 4" in latitude, with no specified time range.

    Tests against JPL ephemerides show accuracy of 10 arcseconds and 50 km over the
    date range CE 1950-2050.

    Parameters
    -----------
    t : `~astropy.time.Time`
        Time of observation.

    Returns
    --------
    skycoord : `~astropy.coordinates.SkyCoord`
        ICRS Coordinate for the body
    """
    # number of centuries since J2000.0.
    # This should strictly speaking be in Ephemeris Time, but TDB or TT
    # will introduce error smaller than intrinsic accuracy of algorithm.
    T = (t.tdb.jyear-2000.0)/100.

    # constants that are needed for all calculations
    Lc = u.Quantity(polyval(T, _coLc), u.deg)
    D = u.Quantity(polyval(T, _coD), u.deg)
    M = u.Quantity(polyval(T, _coM), u.deg)
    Mc = u.Quantity(polyval(T, _coMc), u.deg)
    F = u.Quantity(polyval(T, _coF), u.deg)

    A1 = u.Quantity(polyval(T, _coA1), u.deg)
    A2 = u.Quantity(polyval(T, _coA2), u.deg)
    A3 = u.Quantity(polyval(T, _coA3), u.deg)
    E = polyval(T, _coE)

    suml = sumr = 0.0
    for DNum, MNum, McNum, FNum, LFac, RFac in _MOON_L_R:
        corr = E ** abs(MNum)
        suml += LFac*corr*np.sin(D*DNum+M*MNum+Mc*McNum+F*FNum)
        sumr += RFac*corr*np.cos(D*DNum+M*MNum+Mc*McNum+F*FNum)

    sumb = 0.0
    for DNum, MNum, McNum, FNum, BFac in _MOON_B:
        corr = E ** abs(MNum)
        sumb += BFac*corr*np.sin(D*DNum+M*MNum+Mc*McNum+F*FNum)

    suml += (3958*np.sin(A1) + 1962*np.sin(Lc-F) + 318*np.sin(A2))
    sumb += (-2235*np.sin(Lc) + 382*np.sin(A3) + 175*np.sin(A1-F) +
             175*np.sin(A1+F) + 127*np.sin(Lc-Mc) - 115*np.sin(Lc+Mc))

    # ensure units
    suml = suml*u.microdegree
    sumb = sumb*u.microdegree

    # nutation of longitude
    jd1, jd2 = get_jd12(t, 'tt')
    nut, _ = erfa.nut06a(jd1, jd2)
    nut = nut*u.rad

    # calculate ecliptic coordinates
    lon = Lc + suml + nut
    lat = sumb
    dist = (385000.56+sumr/1000)*u.km

    # Meeus algorithm gives GeocentricTrueEcliptic coordinates
    ecliptic_coo = GeocentricTrueEcliptic(lon, lat, distance=dist,
                                          equinox=t)

    return SkyCoord(ecliptic_coo.transform_to(ICRS))
Exemple #42
0
    def compute_G(self, lbda, U):
        G = np.zeros((self.dim, self.dim))

        G[0, 0] = U[0, 1] * P.polyval(lbda, self.p[0]) * P.polyval(lbda, P.polyder(self.p[1])) \
                + U[0, 1] * P.polyval(lbda, self.p[1]) * P.polyval(lbda, P.polyder(self.p[0]))

        G[self.dim-1, self.dim-1] = - U[self.dim - 2, self.dim - 1] * P.polyval(lbda, self.p[self.dim - 2]) * P.polyval(lbda,P.polyder(self.p[self.dim - 1])) \
                                    + 1*P.polyval(lbda, self.p[self.dim - 1]) * P.polyval(lbda, P.polyder(self.p_n)) \
                                    - U[self.dim - 2, self.dim - 1] * P.polyval(lbda, self.p[self.dim - 1]) * P.polyval(lbda, P.polyder(self.p[self.dim - 2])) \
                                    + 1*P.polyval(lbda, self.p_n) * P.polyval(lbda, P.polyder(self.p[self.dim - 1]))

        for k in range(1, self.dim-1):
            G[k, k] = - U[k, k-1] * P.polyval(lbda, self.p[k - 1]) * P.polyval(lbda, P.polyder(self.p[k])) \
                      + U[k, k+1] * P.polyval(lbda, self.p[k]) * P.polyval(lbda, P.polyder(self.p[k+1])) \
                      - U[k, k-1] * P.polyval(lbda, self.p[k]) * P.polyval(lbda, P.polyder(self.p[k-1])) \
                      + U[k, k+1] * P.polyval(lbda, self.p[k+1]) * P.polyval(lbda, P.polyder(self.p[k]))

        for l in range(0, self.dim-1):
            G[l+1, l] = -U[l, l+1] * (P.polyval(lbda, self.p[l-1]) * P.polyval(lbda, P.polyder(self.p[l-1]))
                                    - P.polyval(lbda, self.p[l]) * P.polyval(lbda, P.polyder(self.p[l])))

        self.G = -(G + np.diag(np.diag(G, -1), +1))
        return self.G
Exemple #43
0
def SmoothPoly(x, y):
    coefs = poly.polyfit(x, y, 4)  # no change with >4
    return poly.polyval(x, coefs)
Exemple #44
0
 def _return(self):
     return polyval(self.x, self.coeff)
Exemple #45
0
    def compute_G_kl(self, k, l, U, eigenvalues):
        G_kl = np.zeros((self.dim, self.dim))
        if k == l:
            G_kl[k,k] = 1

        elif k == l+1:
            G_kl[k, l] = 1
            G_kl[l, k] = 1

        else:
            sum = 0
            for i, eigenvalue in enumerate(eigenvalues):
                acc = - U[0,1]*P.polyval(eigenvalue, self.p[0])*self.compute_p_kl(k=1, l=l, lbda=eigenvalue, eigenvalues=eigenvalues)\
                      - U[0,1]*P.polyval(eigenvalue, self.p[1])*self.compute_p_kl(k=0, l=l, lbda=eigenvalue, eigenvalues=eigenvalues)
                sum += self.frozen_spectral_w[i]**2 * P.polyval(eigenvalue, self.p[k]) * acc
            G_kl[0,0] = 0.5 * sum


            for r in range(1, self.dim-1):
                sum = 0
                for i, eigenvalue in enumerate(eigenvalues):
                    acc = U[r, r-1] * P.polyval(eigenvalue, self.p[r-1]) * self.compute_p_kl(k=r, l=l, lbda=eigenvalue, eigenvalues=eigenvalues)\
                        - U[r, r+1] * P.polyval(eigenvalue, self.p[r]) * self.compute_p_kl(k=r+1, l=l, lbda=eigenvalue, eigenvalues=eigenvalues)\
                        + U[r, r-1] * P.polyval(eigenvalue, self.p[r]) * self.compute_p_kl(k=r-1, l=l, lbda=eigenvalue, eigenvalues=eigenvalues)\
                        - U[r, r+1] * P.polyval(eigenvalue, self.p[r+1]) * self.compute_p_kl(k=r, l=l, lbda=eigenvalue, eigenvalues=eigenvalues)

                    sum += self.frozen_spectral_w[i]**2 * P.polyval(eigenvalue, self.p[k]) * acc
                G_kl[r, r] = 0.5 * sum

            for u in range(0, self.dim-2):
                sum=0
                for i, eigenvalue in enumerate(eigenvalues):
                    acc = U[r, r+1] * ( P.polyval(eigenvalue, self.p[r]) * self.compute_p_kl(k=r, l=l, lbda=eigenvalue, eigenvalues=eigenvalues)
                                      - P.polyval(eigenvalue, self.p[r+1]) * self.compute_p_kl(k=r+1, l=l, lbda=eigenvalue, eigenvalues=eigenvalues))

                    sum += self.frozen_spectral_w[i]**2 * P.polyval(eigenvalue, self.p[k]) * acc
                G_kl[u, u-1] = 0.5 * sum

        G_kl_tridiag = (G_kl+np.diag(np.diag(G_kl, -1), +1))
        return G_kl_tridiag
class TestEvaluation:
    # coefficients of 1 + 2*x + 3*x**2
    c1d = np.array([4., 2., 3.])
    c2d = np.einsum('i,j->ij', c1d, c1d)
    c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)

    # some random values in [-1, 1)
    x = np.random.random((3, 5)) * 2 - 1
    y = polyval(x, [1., 2., 3.])

    def test_hermeval(self):
        #check empty input
        assert_equal(herme.hermeval([], [1]).size, 0)

        #check normal input)
        x = np.linspace(-1, 1)
        y = [polyval(x, c) for c in Helist]
        for i in range(10):
            msg = f"At i={i}"
            tgt = y[i]
            res = herme.hermeval(x, [0] * i + [1])
            assert_almost_equal(res, tgt, err_msg=msg)

        #check that shape is preserved
        for i in range(3):
            dims = [2] * i
            x = np.zeros(dims)
            assert_equal(herme.hermeval(x, [1]).shape, dims)
            assert_equal(herme.hermeval(x, [1, 0]).shape, dims)
            assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims)

    def test_hermeval2d(self):
        x1, x2, x3 = self.x
        y1, y2, y3 = self.y

        #test exceptions
        assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d)

        #test values
        tgt = y1 * y2
        res = herme.hermeval2d(x1, x2, self.c2d)
        assert_almost_equal(res, tgt)

        #test shape
        z = np.ones((2, 3))
        res = herme.hermeval2d(z, z, self.c2d)
        assert_(res.shape == (2, 3))

    def test_hermeval3d(self):
        x1, x2, x3 = self.x
        y1, y2, y3 = self.y

        #test exceptions
        assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d)

        #test values
        tgt = y1 * y2 * y3
        res = herme.hermeval3d(x1, x2, x3, self.c3d)
        assert_almost_equal(res, tgt)

        #test shape
        z = np.ones((2, 3))
        res = herme.hermeval3d(z, z, z, self.c3d)
        assert_(res.shape == (2, 3))

    def test_hermegrid2d(self):
        x1, x2, x3 = self.x
        y1, y2, y3 = self.y

        #test values
        tgt = np.einsum('i,j->ij', y1, y2)
        res = herme.hermegrid2d(x1, x2, self.c2d)
        assert_almost_equal(res, tgt)

        #test shape
        z = np.ones((2, 3))
        res = herme.hermegrid2d(z, z, self.c2d)
        assert_(res.shape == (2, 3) * 2)

    def test_hermegrid3d(self):
        x1, x2, x3 = self.x
        y1, y2, y3 = self.y

        #test values
        tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
        res = herme.hermegrid3d(x1, x2, x3, self.c3d)
        assert_almost_equal(res, tgt)

        #test shape
        z = np.ones((2, 3))
        res = herme.hermegrid3d(z, z, z, self.c3d)
        assert_(res.shape == (2, 3) * 3)
Exemple #47
0
X_wmd = []
Y_test = []
for i in range(len(testA)):
    s1 = vect.transform([testA[i]]).toarray().ravel()
    s2 = vect.transform([testB[i]]).toarray().ravel()
    # cosine similarity between bag of words
    d_cos = np.cos(np.dot(s1, s2))
    X_cos.append(d_cos)
    # WMD
    s1 = s1.astype(np.float) / np.sum(s1)
    s2 = s2.astype(np.float) / np.sum(s2)
    d_wmd = ot.emd2(s1, s2, D)
    X_wmd.append(d_wmd)
    Y_test.append(scores_test[i])

Y_cos = poly.polyval(X_cos, k_cos)
Y_wmd = poly.polyval(X_wmd, k_wmd)

# We will use MSE, Spearman's rho and Pearson coefficients to measure the quality of our regression model

# In[15]:

from sklearn.metrics import mean_squared_error as mse
from scipy.stats import pearsonr
from scipy.stats import spearmanr

# Estimate the quality of your regression model for both Cosine and WMD dissimilarities

# In[16]:

print('-------- Cosine')
Exemple #48
0
 def _accum_mfunc(res, x):
     res += npoly.polyval(x, pars)
Exemple #49
0
 def spectral_weight(self, lbda):
     q_i = 1 / np.linalg.norm([P.polyval(lbda, self.p[j]) for j in range(self.dim)])
     return q_i
Exemple #50
0
    def __call__(self, t):
        #assert t <= self.time_intervals[-1][1], "t must be lower than the final time tf={}".format(self.time_intervals[-1][1])

        index = len(self.t0_l) - 1
        if t > self.time_intervals[-1][1]:
            t = self.time_intervals[-1][1]
        elif t < self.time_intervals[0][0]:
            t = self.time_intervals[0][0]
            index = 0
        else:
            for k in range(len(self.t0_l)):
                if self.t0_l[k] > t:
                    index = k - 1
                    break

        xyz_polycoeff = self.polycoeff_l[index]
        dxyz_polycoeff = self.dpolycoeff_l[index]
        ddxyz_polycoeff = self.ddpolycoeff_l[index]

        t0 = self.time_intervals[index][0]
        t1 = self.time_intervals[index][1]

        if t0 == t1:
            tau = 0.
            dtau_dt = 0.
        else:
            tau = (t - t0) / (t1 - t0)
            dtau_dt = 1.

        # Evaluate X
        x = polyval(tau, xyz_polycoeff[0])
        if len(dxyz_polycoeff[0]):
            x_dot = polyval(tau, dxyz_polycoeff[0]) * dtau_dt
        else:
            x_dot = 0.

        if len(ddxyz_polycoeff[0]):
            x_dotdot = polyval(tau, ddxyz_polycoeff[0]) * dtau_dt**2
        else:
            x_dotdot = 0.

        # Evaluate Y
        y = polyval(tau, xyz_polycoeff[1])
        if len(dxyz_polycoeff[1]):
            y_dot = polyval(tau, dxyz_polycoeff[1]) * dtau_dt
        else:
            y_dot = 0.

        if len(ddxyz_polycoeff[1]):
            y_dotdot = polyval(tau, ddxyz_polycoeff[1]) * dtau_dt**2
        else:
            y_dotdot = 0.

        # Evaluate Z
        x0 = polyval(0., xyz_polycoeff[0])
        x1 = polyval(1., xyz_polycoeff[0])
        if x0 == x1:
            tau_x = 0.
            dtau_x_dt = 0.
        else:
            tau_x = (x - x0) / (x1 - x0)
            dtau_x_dt = x_dot

        z = polyval(tau_x, xyz_polycoeff[2])
        if len(dxyz_polycoeff[2]):
            z_dot = polyval(tau_x, dxyz_polycoeff[2]) * x_dot
        else:
            z_dot = 0.

        if len(ddxyz_polycoeff[2]):
            z_dotdot = polyval(tau_x, ddxyz_polycoeff[2]) * x_dot**2 + polyval(
                tau_x, dxyz_polycoeff[2]) * x_dotdot
        else:
            z_dotdot = 0.

        M = SE3.Identity()
        v = Motion.Zero()
        a = Motion.Zero()

        M.translation = np.matrix([x, y, z]).T
        M.rotation = self._R
        v.linear = np.matrix([x_dot, y_dot, z_dot]).T
        a.linear = np.matrix([x_dotdot, y_dotdot, z_dotdot]).T

        return M, v, a
Exemple #51
0
 def predict_dim(dim):
     n = ['x', 'y', 'z'].index(dim) + 1
     return P.polyval(scaled_time, poly_dict[dim]) * poly_dict[
         'scale'][n] + poly_dict['mid'][n]
    def test_polyfit(self):
        def f(x):
            return x * (x - 1) * (x - 2)

        def f2(x):
            return x**4 + x**2 + 1

        # Test exceptions
        assert_raises(ValueError, poly.polyfit, [1], [1], -1)
        assert_raises(TypeError, poly.polyfit, [[1]], [1], 0)
        assert_raises(TypeError, poly.polyfit, [], [1], 0)
        assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0)
        assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0)
        assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0)
        assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]])
        assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1])
        assert_raises(ValueError, poly.polyfit, [1], [1], [
            -1,
        ])
        assert_raises(ValueError, poly.polyfit, [1], [1], [2, -1, 6])
        assert_raises(TypeError, poly.polyfit, [1], [1], [])

        # Test fit
        x = np.linspace(0, 2)
        y = f(x)
        #
        coef3 = poly.polyfit(x, y, 3)
        assert_equal(len(coef3), 4)
        assert_almost_equal(poly.polyval(x, coef3), y)
        coef3 = poly.polyfit(x, y, [0, 1, 2, 3])
        assert_equal(len(coef3), 4)
        assert_almost_equal(poly.polyval(x, coef3), y)
        #
        coef4 = poly.polyfit(x, y, 4)
        assert_equal(len(coef4), 5)
        assert_almost_equal(poly.polyval(x, coef4), y)
        coef4 = poly.polyfit(x, y, [0, 1, 2, 3, 4])
        assert_equal(len(coef4), 5)
        assert_almost_equal(poly.polyval(x, coef4), y)
        #
        coef2d = poly.polyfit(x, np.array([y, y]).T, 3)
        assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
        coef2d = poly.polyfit(x, np.array([y, y]).T, [0, 1, 2, 3])
        assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
        # test weighting
        w = np.zeros_like(x)
        yw = y.copy()
        w[1::2] = 1
        yw[0::2] = 0
        wcoef3 = poly.polyfit(x, yw, 3, w=w)
        assert_almost_equal(wcoef3, coef3)
        wcoef3 = poly.polyfit(x, yw, [0, 1, 2, 3], w=w)
        assert_almost_equal(wcoef3, coef3)
        #
        wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w)
        assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
        wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
        assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
        # test scaling with complex values x points whose square
        # is zero when summed.
        x = [1, 1j, -1, -1j]
        assert_almost_equal(poly.polyfit(x, x, 1), [0, 1])
        assert_almost_equal(poly.polyfit(x, x, [0, 1]), [0, 1])
        # test fitting only even Polyendre polynomials
        x = np.linspace(-1, 1)
        y = f2(x)
        coef1 = poly.polyfit(x, y, 4)
        assert_almost_equal(poly.polyval(x, coef1), y)
        coef2 = poly.polyfit(x, y, [0, 2, 4])
        assert_almost_equal(poly.polyval(x, coef2), y)
        assert_almost_equal(coef1, coef2)
 def velocity_at(self, t):
     vel_coef_x = poly.polyder(self.coef_x)
     vel_coef_y = poly.polyder(self.coef_y)
     vel_x = poly.polyval(t, vel_coef_x)
     vel_y = poly.polyval(t, vel_coef_y)
     return (vel_x, vel_y)
def mf_to_wp(MF):
    return P.polyval(MF, MF_to_WP)
Exemple #55
0
def MFDFA(timeseries: np.ndarray,
          lag: np.ndarray = None,
          order: int = 1,
          q: np.ndarray = 2,
          modified: bool = False) -> np.ndarray:
    """
    Multi-Fractal Detrended Fluctuation Analysis of timeseries. MFDFA generates
    a fluctuation function F²(q,s), with s the segment size and q the q-powers,
    Take a timeseries Xₜ, find the integral Yₜ = cumsum(Xₜ), and segment the
    timeseries into Nₛ segments of size s.
                                        ₛ
                          Fᵥ²(s) = ¹/ₛ∑[Yᵥᵢ - yᵥᵢ]²
                                        ⁱ
    with yᵥᵢ the polynomial fittings of order m. Having obtained the variances
    of each (detrended) segment, average over s and increase s, to obtain the
    fluctuation function Fₚ²(s) depending on the segment lenght.

                       F²(q,s) = {1/Nₛ∑[Fᵥ²(s)]^q/2}^1/q,
                                     ᵛ
    The fluctuation function F²(q,s) can now be plotted in a log-log scale, the
    slope of the fluctuation function F²(q,s) vs the s-segment size is the
    self-similarity scaling h(q)

                                  F²(q,s) ~ sʰ.

    If H ≈ 0 in a monofractal series, use a second integration step by setting
    'modified' = True.

    Parameters
    ----------
    timeseries: np.ndarray
        A 1-dimensional timeseries (N, 1). The timeseries of length N.

    lag: np.ndarray of ints
        An array with the window sizes to calculate (ints). Notice
        min(lag) > order + 1 because to fit a polynomial of order m one needs at
        least m points. The results are meaningless for 'order = m' and for
        lag > size of data / 4 since there is low statistics with < 4 windows
        to divide the timeseries.

    order: int
        The order of the polynomials to approximate. 'order = 1' is the DFA1,
        which is a least-square fit of the data with a first order polynomial (a
        line), 'order = 2' is a second-order polynomial, etc..

    q: np.ndarray
        Fractal exponent to calculate. Array in [-10,10]. The values = 0 will be
        removed, since the code does not converge there. q = 2 is the standard
        Detrended Fluctuation Analysis as is set a default.

    modified: bool
        For data with the Hurst exponent ≈ 0, i.e., strongly anticorrelated, a
        standard MFDFA will result in inacurate results, thus a further
        integration of the timeseries yields a modified scaling coefficient.

    Returns
    -------
    lag: np.ndarray of ints
        Array of lags, realigned, preserving only different lags and with
        entries > order + 1

    f: np.ndarray
        A array of shape (size(lag),size(q)) of variances over the indicated
        lag windows and the indicated q-fractal powers.
    """

    # Force lag to be ints, ensure lag > order + 1
    lag = lag[lag > order + 1]
    lag = np.round(lag).astype(int)

    # Assert if timeseries is 1 dimensional
    if timeseries.ndim > 1:
        assert timeseries.shape[1] == 1, "Timeseries needs to be 1 dimensional"

    timeseries = timeseries.reshape(-1, 1)
    # Size of array
    N = timeseries.shape[0]

    # Fractal powers as floats
    q = np.asarray_chkfinite(q, dtype=float)

    # Ensure q≈0 is removed, since it does not converge. Limit set at |q| < 0.1
    q = q[(q < -.1) + (q > .1)]

    # Reshape q to perform np.float_power
    q = q.reshape(-1, 1)

    # x-axis
    X = np.linspace(1, lag.max(), lag.max())

    # "Profile" of the series
    Y = np.cumsum(timeseries - np.mean(timeseries))

    # Cumulative "profile" for strongly anticorrelated data:
    if modified == True:
        Y = np.cumsum(Y - np.mean(Y))

    # Return f of (fractal)-variances
    f = np.empty((0, q.size))

    # Loop over elements in lag
    # Notice that given one has to slip the timeseries into diferent segments of
    # length lag(), so some elements at the end of the array might be missing.
    # The same procedure it run in reverse, were elements at the begining of the
    # series are discared instead
    for i in lag:
        # Reshape into (N/lag, lag)
        Y_ = Y[:N - N % i].reshape((N - N % i) // i, i)
        Y_r = Y[N % i:].reshape((N - N % i) // i, i)

        # Perform a polynomial fit to each segments
        p = polyfit(X[:i], Y_.T, order)
        p_r = polyfit(X[:i], Y_r.T, order)

        # Subtract the trend from the fit and calculate the variance
        F = np.var(Y_ - polyval(X[:i], p), axis=1)
        F_r = np.var(Y_r - polyval(X[:i], p_r), axis=1)

        # Caculate the Multi-Fractal Detrended Fluctuation Analysis
        f = np.append(
            f,
            np.float_power(
                np.mean(np.float_power(F, q / 2), axis=1) / 2, 1 / q.T) +
            np.float_power(
                np.mean(np.float_power(F_r, q / 2), axis=1) / 2, 1 / q.T),
            axis=0)

    return lag, f
    def test_polyint(self):
        # check exceptions
        assert_raises(TypeError, poly.polyint, [0], .5)
        assert_raises(ValueError, poly.polyint, [0], -1)
        assert_raises(ValueError, poly.polyint, [0], 1, [0, 0])
        assert_raises(ValueError, poly.polyint, [0], lbnd=[0])
        assert_raises(ValueError, poly.polyint, [0], scl=[0])
        assert_raises(TypeError, poly.polyint, [0], axis=.5)
        with assert_warns(DeprecationWarning):
            poly.polyint([1, 1], 1.)

        # test integration of zero polynomial
        for i in range(2, 5):
            k = [0] * (i - 2) + [1]
            res = poly.polyint([0], m=i, k=k)
            assert_almost_equal(res, [0, 1])

        # check single integration with integration constant
        for i in range(5):
            scl = i + 1
            pol = [0] * i + [1]
            tgt = [i] + [0] * i + [1 / scl]
            res = poly.polyint(pol, m=1, k=[i])
            assert_almost_equal(trim(res), trim(tgt))

        # check single integration with integration constant and lbnd
        for i in range(5):
            scl = i + 1
            pol = [0] * i + [1]
            res = poly.polyint(pol, m=1, k=[i], lbnd=-1)
            assert_almost_equal(poly.polyval(-1, res), i)

        # check single integration with integration constant and scaling
        for i in range(5):
            scl = i + 1
            pol = [0] * i + [1]
            tgt = [i] + [0] * i + [2 / scl]
            res = poly.polyint(pol, m=1, k=[i], scl=2)
            assert_almost_equal(trim(res), trim(tgt))

        # check multiple integrations with default k
        for i in range(5):
            for j in range(2, 5):
                pol = [0] * i + [1]
                tgt = pol[:]
                for k in range(j):
                    tgt = poly.polyint(tgt, m=1)
                res = poly.polyint(pol, m=j)
                assert_almost_equal(trim(res), trim(tgt))

        # check multiple integrations with defined k
        for i in range(5):
            for j in range(2, 5):
                pol = [0] * i + [1]
                tgt = pol[:]
                for k in range(j):
                    tgt = poly.polyint(tgt, m=1, k=[k])
                res = poly.polyint(pol, m=j, k=list(range(j)))
                assert_almost_equal(trim(res), trim(tgt))

        # check multiple integrations with lbnd
        for i in range(5):
            for j in range(2, 5):
                pol = [0] * i + [1]
                tgt = pol[:]
                for k in range(j):
                    tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1)
                res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1)
                assert_almost_equal(trim(res), trim(tgt))

        # check multiple integrations with scaling
        for i in range(5):
            for j in range(2, 5):
                pol = [0] * i + [1]
                tgt = pol[:]
                for k in range(j):
                    tgt = poly.polyint(tgt, m=1, k=[k], scl=2)
                res = poly.polyint(pol, m=j, k=list(range(j)), scl=2)
                assert_almost_equal(trim(res), trim(tgt))
 def coord_at(self, t):
     x = poly.polyval(t, self.coef_x)
     y = poly.polyval(t, self.coef_y)
     return (x, y)
Exemple #58
0
PRC110r1['pos'] = (((np.sign(PRC110r1['voltage'].diff()) + 1) /
                    2)).astype('bool')

plt.ion()
with plt.style.context('prl'):
    #first plot at 108
    fig, ax = plt.subplots()
    pos108 = PRC108r1[[
        'voltage', 'current'
    ]][1::][PRC108r1['pos'][1::]].sort_values(by=['voltage'])
    t0 = pos108['voltage'].iloc[0]
    t1 = pos108['voltage'].iloc[-1]
    coeffs = poly.polyfit(
        [t0, t1], [pos108['current'].iloc[0], pos108['current'].iloc[-1]], 1)
    pos108['normC'] = pos108['current'] - poly.polyval(pos108['voltage'],
                                                       coeffs)

    ax.plot(pos108['voltage'], pos108['normC'], '.')
    ax2 = ax.twinx()
    ax2.plot(tilt108['E'][1::][tilt108['pos'][1::]],
             tilt108['Theta mean'][1::][tilt108['pos'][1::]],
             '.',
             c='C1')
    # fig,ax =plt.subplots()
    # ax.plot(PRC108r1['voltage'][1::][~PRC108r1['pos'][1::]],PRC108r1['current'][1::][~PRC108r1['pos'][1::]],'.')
    # ax2 = ax.twinx()
    # ax2.plot(tilt108['E'][1::][~tilt108['pos'][1::]], tilt108['Theta mean'][1::][~tilt108['pos'][1::]], '.',c='C1')
    fig, ax = plt.subplots()
    ax.plot(pos108['voltage'], np.cumsum(pos108['normC']), '.')
    ax2 = ax.twinx()
    ax2.plot(tilt108['E'][1::][tilt108['pos'][1::]],
#!/usr/bin/python
#####################################################################
# Author:		Alberto Albz Marocchino
# Date:			26-02-2017
# Purpose:     comparison between python and R :: polinomail fit regression
# Source:       python
#####################################################################

import numpy as np
import pylab as pyl
from scipy import stats
import numpy.polynomial.polynomial as poly

#--- generate sample---#
x = np.linspace(-5, 5, 500)
y_therory = -1 * x**3 + 5 * x**2 + x
y = y_therory + np.random.normal(0, 25, 500)

#---linear interpolation---#
coefficients = poly.polyfit(x, y, 3)
y_reconstructed = poly.polyval(x, coefficients)

#%matplotlib inline
pyl.plot(x, y, '.', label='scattered data')
pyl.plot(x, y_therory, 'x', label='original data')
pyl.plot(x, y_reconstructed, '-', label='fit', lw=3)
pyl.legend()
class TestEvaluation:
    # coefficients of 1 + 2*x + 3*x**2
    c1d = np.array([1., 2., 3.])
    c2d = np.einsum('i,j->ij', c1d, c1d)
    c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)

    # some random values in [-1, 1)
    x = np.random.random((3, 5)) * 2 - 1
    y = poly.polyval(x, [1., 2., 3.])

    def test_polyval(self):
        #check empty input
        assert_equal(poly.polyval([], [1]).size, 0)

        #check normal input)
        x = np.linspace(-1, 1)
        y = [x**i for i in range(5)]
        for i in range(5):
            tgt = y[i]
            res = poly.polyval(x, [0] * i + [1])
            assert_almost_equal(res, tgt)
        tgt = x * (x**2 - 1)
        res = poly.polyval(x, [0, -1, 0, 1])
        assert_almost_equal(res, tgt)

        #check that shape is preserved
        for i in range(3):
            dims = [2] * i
            x = np.zeros(dims)
            assert_equal(poly.polyval(x, [1]).shape, dims)
            assert_equal(poly.polyval(x, [1, 0]).shape, dims)
            assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims)

        #check masked arrays are processed correctly
        mask = [False, True, False]
        mx = np.ma.array([1, 2, 3], mask=mask)
        res = np.polyval([7, 5, 3], mx)
        assert_array_equal(res.mask, mask)

        #check subtypes of ndarray are preserved
        class C(np.ndarray):
            pass

        cx = np.array([1, 2, 3]).view(C)
        assert_equal(type(np.polyval([2, 3, 4], cx)), C)

    def test_polyvalfromroots(self):
        # check exception for broadcasting x values over root array with
        # too few dimensions
        assert_raises(ValueError,
                      poly.polyvalfromroots, [1], [1],
                      tensor=False)

        # check empty input
        assert_equal(poly.polyvalfromroots([], [1]).size, 0)
        assert_(poly.polyvalfromroots([], [1]).shape == (0, ))

        # check empty input + multidimensional roots
        assert_equal(poly.polyvalfromroots([], [[1] * 5]).size, 0)
        assert_(poly.polyvalfromroots([], [[1] * 5]).shape == (5, 0))

        # check scalar input
        assert_equal(poly.polyvalfromroots(1, 1), 0)
        assert_(poly.polyvalfromroots(1, np.ones((3, 3))).shape == (3, ))

        # check normal input)
        x = np.linspace(-1, 1)
        y = [x**i for i in range(5)]
        for i in range(1, 5):
            tgt = y[i]
            res = poly.polyvalfromroots(x, [0] * i)
            assert_almost_equal(res, tgt)
        tgt = x * (x - 1) * (x + 1)
        res = poly.polyvalfromroots(x, [-1, 0, 1])
        assert_almost_equal(res, tgt)

        # check that shape is preserved
        for i in range(3):
            dims = [2] * i
            x = np.zeros(dims)
            assert_equal(poly.polyvalfromroots(x, [1]).shape, dims)
            assert_equal(poly.polyvalfromroots(x, [1, 0]).shape, dims)
            assert_equal(poly.polyvalfromroots(x, [1, 0, 0]).shape, dims)

        # check compatibility with factorization
        ptest = [15, 2, -16, -2, 1]
        r = poly.polyroots(ptest)
        x = np.linspace(-1, 1)
        assert_almost_equal(poly.polyval(x, ptest),
                            poly.polyvalfromroots(x, r))

        # check multidimensional arrays of roots and values
        # check tensor=False
        rshape = (3, 5)
        x = np.arange(-3, 2)
        r = np.random.randint(-5, 5, size=rshape)
        res = poly.polyvalfromroots(x, r, tensor=False)
        tgt = np.empty(r.shape[1:])
        for ii in range(tgt.size):
            tgt[ii] = poly.polyvalfromroots(x[ii], r[:, ii])
        assert_equal(res, tgt)

        # check tensor=True
        x = np.vstack([x, 2 * x])
        res = poly.polyvalfromroots(x, r, tensor=True)
        tgt = np.empty(r.shape[1:] + x.shape)
        for ii in range(r.shape[1]):
            for jj in range(x.shape[0]):
                tgt[ii, jj, :] = poly.polyvalfromroots(x[jj], r[:, ii])
        assert_equal(res, tgt)

    def test_polyval2d(self):
        x1, x2, x3 = self.x
        y1, y2, y3 = self.y

        #test exceptions
        assert_raises_regex(ValueError, 'incompatible', poly.polyval2d, x1,
                            x2[:2], self.c2d)

        #test values
        tgt = y1 * y2
        res = poly.polyval2d(x1, x2, self.c2d)
        assert_almost_equal(res, tgt)

        #test shape
        z = np.ones((2, 3))
        res = poly.polyval2d(z, z, self.c2d)
        assert_(res.shape == (2, 3))

    def test_polyval3d(self):
        x1, x2, x3 = self.x
        y1, y2, y3 = self.y

        #test exceptions
        assert_raises_regex(ValueError, 'incompatible', poly.polyval3d, x1, x2,
                            x3[:2], self.c3d)

        #test values
        tgt = y1 * y2 * y3
        res = poly.polyval3d(x1, x2, x3, self.c3d)
        assert_almost_equal(res, tgt)

        #test shape
        z = np.ones((2, 3))
        res = poly.polyval3d(z, z, z, self.c3d)
        assert_(res.shape == (2, 3))

    def test_polygrid2d(self):
        x1, x2, x3 = self.x
        y1, y2, y3 = self.y

        #test values
        tgt = np.einsum('i,j->ij', y1, y2)
        res = poly.polygrid2d(x1, x2, self.c2d)
        assert_almost_equal(res, tgt)

        #test shape
        z = np.ones((2, 3))
        res = poly.polygrid2d(z, z, self.c2d)
        assert_(res.shape == (2, 3) * 2)

    def test_polygrid3d(self):
        x1, x2, x3 = self.x
        y1, y2, y3 = self.y

        #test values
        tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
        res = poly.polygrid3d(x1, x2, x3, self.c3d)
        assert_almost_equal(res, tgt)

        #test shape
        z = np.ones((2, 3))
        res = poly.polygrid3d(z, z, z, self.c3d)
        assert_(res.shape == (2, 3) * 3)