Beispiel #1
0
def get_ntotal(matName,lambdas):
	fname = join(matDir,'%s%s.csv' % (matPrefix,matName))
	fdata = openFile(fname)[matHeader:]
	# get data from the file
	lambList	= []
	nList		= []
	kList		= []
	for l in fdata:
		wl , n , k = l.split(',')
		wl , n , k = float(wl) , float(n) , float(k)
		lambList.append(wl)
		nList.append(n)
		kList.append(k)
	# make interpolation functions
	int_n	= interp1d(lambList,nList)
	int_k	= interp1d(lambList,kList)
	# interpolate data
	kintList	= int_k(lambdas)
	nintList	= int_n(lambdas)
	# make ntotal
	ntotal = []
	for i,n in enumerate(nintList):
		nt = complex(n,kintList[i])
		ntotal.append(nt)
	return ntotal
def interpolatedData(start_ts = None, end_ts = None, step = 60, files = ['avg-confirmation-time.txt', 'estimated-transaction-volume.txt', 'my-wallet-transaction-volume.txt', 'total-bitcoins.txt', 
                     'bitcoin-days-destroyed-cumulative.txt','hash-rate.txt', 'n-orphaned-blocks.txt','trade-volume.txt', 'bitcoin-days-destroyed.txt','market-cap.txt', 
                     'n-transactions-excluding-popular.txt','transaction-fees.txt', 'blocks-size.txt','n-transactions-per-block.txt', 'tx-trade-ratio.txt', 
                     'cost-per-transaction.txt','miners-revenue.txt', 'n-transactions.txt', 'difficulty.txt','my-wallet-n-tx.txt', 'n-unique-addresses.txt', 
                     'estimated-transaction-volume-usd.txt', 'my-wallet-n-users.txt', 'output-volume.txt']): 
    out = {}
    for f in files: 
    	out[f] = {} 
        data = read_data('data/' + f, True)
        x = [] 
        y = []
        sort_indices = sorted(enumerate(data.keys()), key=lambda x: x[1])
        for k in sort_indices: 
        	x.append(k[1])
        	y.append(data[k[1]])
        #pdb.set_trace()
        interpolator = interp1d(x, y, kind='cubic')
        xnew = []
        ynew = []
        if start_ts is None: cur_start_ts = int(min(x))
    	else: cur_start_ts = start_ts
    	if end_ts is None: cur_end_ts = int(max(x))
    	else: cur_end_ts = end_ts
    	new_y = interp1d(x, y, kind='cubic')(range(cur_start_ts, cur_end_ts, step))
    	out[f] = dict(zip(range(cur_start_ts, cur_end_ts, step), new_y))
    	#pdb.set_trace()
    	#plt.plot(x,y,'bo', range(cur_start_ts, cur_end_ts, step),new_y, 'r--')
    	#plt.show()
    	pickle.dump(out[f], open(f.split('.')[0] + str(step) + '.pickle','w'))
    return out 
Beispiel #3
0
   def interpolateData(self):
      import scipy.interpolate as interpolate 
      
      NyK = self.Ny - 2*self.K.value # The Capon image will be 2*K smaller in range

      if VERBOSE:
         print 'Start interpolating...'

      # IQ-interpolation (in image domain, imag and real, hence coherent) 
      iq_interp_factor = 2
      x_idx = np.arange(self.Nx)
      y_idx = np.arange(NyK)
      x_up_idx = np.linspace(0.25, self.Nx-1-0.25, (self.Nx-1)*iq_interp_factor)  
      y_up_idx = np.arange(NyK)
      self.angles_intrp = interpolate.interp1d( x_idx, self.angles ) (x_up_idx)
      self.ranges_intrp = self.ranges
      if self.K.value > 0:
         self.ranges_intrp = self.ranges_intrp[self.K.value:-self.K.value]
          
      img_das_real = interpolate.RectBivariateSpline( x_idx, y_idx, self.img_das.real ) (x_up_idx, y_up_idx)
      img_das_imag = interpolate.RectBivariateSpline( x_idx, y_idx, self.img_das.imag ) (x_up_idx, y_up_idx)
   
      img_capon_real = interpolate.RectBivariateSpline( x_idx, y_idx, self.img_capon.real ) (x_up_idx, y_up_idx)
      img_capon_imag = interpolate.RectBivariateSpline( x_idx, y_idx, self.img_capon.imag ) (x_up_idx, y_up_idx)
      
      self.img_das_iq_intrp   = img_das_real   + 1j * img_das_imag
      self.img_capon_iq_intrp = img_capon_real + 1j * img_capon_imag   
      
      # In-coherent interpolation
      if self.Ky.value > 1 or self.Kx.value > 1:
         
         NxK = (self.Nx-1) * iq_interp_factor
      
         y_idx = np.arange(NyK)
         x_idx = np.arange(NxK)
      
         y_up_idx = np.linspace(0,NyK-1,NyK*self.Ky.value)
         x_up_idx = np.linspace(0,NxK-1,NxK*self.Kx.value)
         
         self.angles_intrp = interpolate.interp1d( x_idx, self.angles_intrp ) (x_up_idx)
         if self.K.value > 0:
            self.ranges_intrp = interpolate.interp1d( y_idx, self.ranges[self.K.value:-self.K.value].squeeze() ) (y_up_idx)
         else:
            self.ranges_intrp = interpolate.interp1d( y_idx, self.ranges ) (y_up_idx)
      
         self.img_das_intrp   = interpolate.RectBivariateSpline( x_idx, y_idx, abs(self.img_das_iq_intrp) ) (x_up_idx, y_up_idx)
         self.img_capon_intrp = interpolate.RectBivariateSpline( x_idx, y_idx, abs(self.img_capon_iq_intrp) ) (x_up_idx, y_up_idx)
            
      else: # do nothing
         self.img_das_intrp = self.img_das_iq_intrp
         self.img_capon_intrp = self.img_capon_iq_intrp
         
      self.img_das_intrp   = np.transpose(self.img_das_intrp)
      self.img_capon_intrp = np.transpose(self.img_capon_intrp)
      
      self.img_das_detected = self.logCompress(self.img_das_intrp,   self.minDynRange.value, self.maxDynRange.value)
      self.img_cap_detected = self.logCompress(self.img_capon_intrp, self.minDynRangeCapon.value, self.maxDynRangeCapon.value)
         
      if VERBOSE:
         print 'done'
Beispiel #4
0
def sph2cart_scal(scals, radius, nx=96, ny=96, nz=96, minc=1):
    """
    This function interpolates a series of scalar fields from the spherical
    coordinates to the cartesian coordinates.

    :param scals: an array that contains the different scalar quantities
    :type scals: numpy.ndarray[nscals,nphi,ntheta,nr]
    :param radius: the input radius
    :type radius: numpy.ndarray
    :param nx: number of grid points in the x direction
    :type nx: int
    :param ny: number of grid points in the x direction
    :type ny: int
    :param nz: number of grid points in the x direction
    :type nz: int
    :param minc: azimuthal symmetry
    :type minc: int
    :returns: a tuple that contains the scalars, the max of the grid
              and the grid spacing
    :rtype: (numpy.ndarray[nscals,nz,ny,nx],float,float)
    """
    nscals, np, nt, nr = scals.shape
    phi = N.linspace(-N.pi/minc, N.pi/minc, np)
    theta = N.linspace(0., N.pi, nt)
    # Cube: take care of the sqrt(3.) !!!
    gridMax = radius.max()
    spacing = 2.*gridMax/(nx-1)
    Z,Y,X = N.mgrid[-1:1:nz*1j,-1:1:ny*1j,-1:1:nx*1j]*gridMax
    new_r = N.sqrt(X**2+Y**2+Z**2)#.ravel()
    new_phi = N.arctan2(Y, X)#.ravel()
    new_theta = N.arctan2(N.sqrt(X**2+Y**2), Z)#.ravel()
    del X,Y,Z

    ir = interp1d(radius, N.arange(len(radius)), bounds_error=False)
    it = interp1d(theta, N.arange(len(theta)), bounds_error=False)
    ip = interp1d(phi, N.arange(len(phi)), bounds_error=False)

    new_ir = ir(new_r)
    new_it = it(new_theta)
    new_ip = ip(new_phi)

    new_ir[new_r < radius.min()] = 0.
    new_it[new_r < radius.min()] = 0.
    new_it[new_r < radius.min()] = 0.

    coords = N.array([new_ip, new_it, new_ir])

    scals_cart = N.zeros((nscals, nz, ny, nx), 'f')
    for iscal in range(nscals):
        if iscal == 0: # radius has been already calculated
            scals_cart[iscal, ...] = new_r
        else:
            scals_cart[iscal, ...] = map_coordinates(scals[iscal, ...], coords)
            scals_cart[iscal, new_r < radius.min()] = 0.
            scals_cart[iscal, new_r > radius.max()] = 0.
    del coords

    del new_theta, new_phi

    return scals_cart,gridMax,spacing
Beispiel #5
0
def position_interpolator(background):
    global positions
    if not isfile(POSITIONS_DUMP_FILENAME):
        def callback(event, x, y, flags, parameters):
            if event == 1: #cv2.EVENT_RBUTTONDOWN:
                positions.append(Coordinate(x, y))
    
        cv2.namedWindow("Interpolator")
        cv2.setMouseCallback("Interpolator", callback)

        while True: 
            cv2.imshow("Interpolator", background.array)
            if cv2.waitKey() & 0xFF == 27:
                break
        cv2.destroyWindow("Interpolator")
        with open(POSITIONS_DUMP_FILENAME, "w") as positions_dump_file:
            pickle.dump(positions, positions_dump_file) 
    else:
        with open(POSITIONS_DUMP_FILENAME, "r") as positions_dump_file:
            positions = pickle.load(positions_dump_file)
        
    
    t = map(lambda i: i * STEP, range(len(positions)))
    x = map(lambda p: p.x, positions)
    y = map(lambda p: p.y, positions)



    f_x = interpolate.interp1d(t, x, kind = "quadratic")
    f_y = interpolate.interp1d(t, y, kind = "quadratic")
    
    return PositionInterpolator(f_x, f_y)
Beispiel #6
0
def _compare_positions(a, b, max_dist=0.003, max_angle=5.):
    """Compare estimated cHPI positions"""
    from scipy.interpolate import interp1d
    trans, rot, t = a
    trans_est, rot_est, t_est = b
    quats_est = rot_to_quat(rot_est)

    # maxfilter produces some times that are implausibly large (weird)
    use_mask = (t >= t_est[0]) & (t <= t_est[-1])
    t = t[use_mask]
    trans = trans[use_mask]
    quats = rot_to_quat(rot)
    quats = quats[use_mask]

    # double-check our angle function
    for q in (quats, quats_est):
        angles = _angle_between_quats(q, q)
        assert_allclose(angles, 0., atol=1e-5)

    # < 3 mm translation difference between MF and our estimation
    trans_est_interp = interp1d(t_est, trans_est, axis=0)(t)
    worst = np.sqrt(np.sum((trans - trans_est_interp) ** 2, axis=1)).max()
    assert_true(worst <= max_dist, '%0.1f > %0.1f mm'
                % (1000 * worst, 1000 * max_dist))

    # < 5 degrees rotation difference between MF and our estimation
    # (note that the interpolation will make this slightly worse)
    quats_est_interp = interp1d(t_est, quats_est, axis=0)(t)
    worst = 180 * _angle_between_quats(quats_est_interp, quats).max() / np.pi
    assert_true(worst <= max_angle, '%0.1f > %0.1f deg' % (worst, max_angle,))
Beispiel #7
0
def NonlinearGrid():
	def asol(x,epsilon):
		A = []
		for item in x:
			A.append(alpha+item + (beta-alpha-1.)*(np.exp(item/epsilon) -1.)/(np.exp(1./epsilon) -1.)	)
		return x, np.array(A)
	
	epsilon = .01
	N=15
	X = np.linspace(0,1,N)
	x1, y1 = ode_fe(func=lambda x:-1./epsilon,c=-1./epsilon,x=X)
	x2, y2 = ode_fe(func=lambda x:-1./epsilon,c=-1./epsilon,x=X**(1./8.))
	alpha, beta = 2.,4.
	# Analytic solution
	Z =asol(np.linspace(0,1,500),epsilon)
	
	plt.plot(Z[0],Z[1],'-k',label='Solution',linewidth=3)
	plt.plot(x1,y1,'--bo',label='Evenly spaced grid points',linewidth=1.5)
	# plt.plot(Z[0],Z[1],'-k',mfc="None",label='Solution',linewidth=2)
	plt.plot(x2,y2,'--ro',label='Clustered grid points',linewidth=1.5)
	plt.axis([0.,1.1,1.8,4.2])
	plt.legend(loc='best')
	# plt.savefig('FEM_compare_methods.pdf')
	plt.show()
	plt.clf()
	
	X = np.linspace(0,1,500)
	plt.plot(X,abs(Z[1]-interp1d(x2,y2)(X)),'-r')
	plt.plot( X,abs( Z[1]-interp1d(x1,y1)(X) ), '-b' )
	
	# print "Max Error = ", np.max(np.abs(Z[1]-interp1d(x2,y2)(X) ))
	plt.show()
    def __init__(self,t0,dt=None,d=None):
        if d is not None and dt is not None:
            self.t0 = t0
            self.dT = dt
            d = np.array(d)
            try:
                (m,n) = d.shape
            except:
                m = 1
                n = d.shape[0]
            assert m==1, 'ctsd only supports 1d continuous data.'
            d = d.reshape(n)

            n = len(d)
            t = np.array([t0+iN*dt for iN in range(n)])
            self.f = interp1d(t,d)
            self.D = (self.f(t)).reshape((1,t.size))
        elif isinstance(t0,(ts,tsd,ctsd)):
            self.t0 = t0.starttime()
            self.dT = t0.dt()
            self.f = interp1d(t0.range(),t0.data())
            n = np.ceil((t0.endtime()-t0.starttime())/t0.dt()).astype(int)
            t = np.array([t0.starttime()+iN*t0.dt() for iN in range(n)])
            self.D = (self.f(t)).reshape((1,t.size))

        (d,n) = dims(self.D)
        self.dims = d
        self.nD = n
        assert self.isOK()
Beispiel #9
0
def interpolate(points, lam, flux, method):
    """
     NAME:
       interpolate

     PURPOSE:
       General purpose function that can call and use various scipy.interpolate
       methods. Defined for convienience.

     INPUTS:
       points      Set of new points to get interpolated values for.
       lam         The wavelengths of the data points
       flux        The fluxes of the data points
       method      The method of interpolation to use. Valide values include
                   'interp1d:linear', 'interp1d:quadratic', and 'splrep'.

     OUTPUTS:
       Interpolated set of values for each corresponding input point.

     EXAMPLE:
       interpFlux = interpolate(interpLam, lam, flux)
    """
    if method == 'interp1d:linear':
        f = interp1d(lam, flux, assume_sorted = True)
        return f(points)
    if method == 'interp1d:quadratic':
        f = interp1d(lam, flux, kind = 'quadratic', assume_sorted = True)
        return f(points)
    if method == 'splrep':
        return splev(points, splrep(lam, flux))
    raise Exception("You didn't choose a proper interpolating method")
Beispiel #10
0
def getF1F2Params(element = None):
    """Returns f1 and f2 scattering factors"""

    alldata = np.array([])
    global F1F2

    with open(os.path.join(datadir, 'f1f2_Henke.dat'),'r') as infile:
        for line in infile:
            if line.split()[0] == '#S':
                if len(alldata):
                    f1 = interpolate.interp1d(alldata[:,0], alldata[:,1] - thisZ)
                    f2 = interpolate.interp1d(alldata[:,0], alldata[:,2] * -1.)
                    F1F2[thisElement] = (f1, f2)
                    if thisElement == element:
                        infile.close()
                        return F1F2[element]

                s = line.split()
                thisElement = s[2]
                thisZ = int(s[1])

                alldata = np.array([])

            elif line[0] == '#':
                continue
            else:
                data = np.array(line.split()).astype('float32')
                if not len(alldata):
                    alldata = data
                else:
                    alldata = np.vstack((alldata, data))
    return alldata
def read_BCs(log_g=LOG_G):
   """Read BCs table (Girardi 2004), compute interp1d functions at log_g."""

   table_gir = at.read(model_dir+"bctab_p00.txt")

    # Save relevant arrays as variables
   colTeff = table_gir["Teff"]
   collogg = table_gir["logg"]
   colBCg =  table_gir["g"]
   colBCr =  table_gir["r"]
   colBCi =  table_gir["i"]

   # Only keep log_g for dwarfs
   iM37g = np.where(collogg==log_g)[0]

   # Compute interpolation functions
   bcfuncg = interp1d(colTeff[iM37g], colBCg[iM37g], kind='linear')
   bcfuncr = interp1d(colTeff[iM37g], colBCr[iM37g], kind='linear')
   bcfunci = interp1d(colTeff[iM37g], colBCi[iM37g], kind='linear')

   # Save the slopes separately, for computing uncertainties later
   slopesBC = np.zeros((len(colTeff[iM37g]) - 1,3))
   slopesBC[:,0]= np.abs(np.diff(colBCg[iM37g]) / np.diff(colTeff[iM37g]))
   slopesBC[:,1]= np.abs(np.diff(colBCr[iM37g]) / np.diff(colTeff[iM37g]))
   slopesBC[:,2]= np.abs(np.diff(colBCi[iM37g]) / np.diff(colTeff[iM37g]))

   # Teff ranges where the interpolation functions are valid
   # (for g,r,i)
   teffrange = [min(colTeff[iM37g])*1.00001,max(colTeff[iM37g])*0.99999]
   bcs = {"g":colBCg,"r":colBCr,"i":colBCi}
   funcs = {"g":bcfuncg,"r":bcfuncr,"i":bcfunci}
   slopes_dict = {"g":slopesBC[:, 0],"r":slopesBC[:, 1],"i":slopesBC[:, 2]}
   teff_bins = colTeff[iM37g]

   return funcs, teffrange, slopes_dict, teff_bins
def read_SEDs():
    """Read SEDs table (Adam's table)."""
    kh = at.read(model_dir+'kraushillenbrand5.dat')

    # Save relevant arrays as variables
    coltemp = kh["Teff"]
    gmag = kh["Mg"]
    rmag = kh["Mr"]
    imag = kh["Mi"]
    numrows = len(rmag)

    # Interpolation functions for Teff as a function of Absolute Magnitude
    gfunc = interp1d(gmag, coltemp, kind='linear')
    rfunc = interp1d(rmag, coltemp, kind='linear')
    ifunc = interp1d(imag, coltemp, kind='linear')

    # Save the slopes separately, for computing uncertainties later
    slopes = np.zeros((numrows - 1, 3))
    slopes[:, 0] = np.abs(np.diff(coltemp) / np.diff(gmag))
    slopes[:, 1] = np.abs(np.diff(coltemp) / np.diff(rmag))
    slopes[:, 2] = np.abs(np.diff(coltemp) / np.diff(imag))

    # Magnitude ranges where the interpolation functions are valid
    # (for g,r,i)
    magranges = {"g":[-0.39,20.98], "r":[-0.04,18.48], "i":[0.34,15.85]}
    mags = {"g":gmag,"r":rmag,"i":imag}
    funcs = {"g":gfunc,"r":rfunc,"i":ifunc}
    slopes_dict = {"g":slopes[:, 0],"r":slopes[:, 1],"i":slopes[:, 2]}

    return mags, funcs, magranges, slopes_dict
def rainin_singlechannel_pipetting_model(volume):
    """ Data obtained from
        https://www.shoprainin.com/Pipettes/Single-Channel-Manual-Pipettes/RAININ-Classic/Rainin-Classic-Pipette-PR-10/p/17008649
        
        Parameters
        ----------
        volume - volume pipetted in microliters
        
        Notes
        -----
        This is the pipette used for pipetting cyclohexane into octanol
           
        Returns
        -------
        Expected Inaccuracy, Imprecision
        
    """
    imprecision_function = interp1d(
        [1.0, 5.0, 10.0], # volume range (uL)
        [0.012, 0.006, 0.004]) # relative imprecision for these volumes from rainin website
    
    inaccuracy_function = interp1d(
        [1.0, 5.0, 10.0], # volume range (uL)
        [0.025, 0.015, 0.01]) # relative inaccuracy for these volumes from rainin website
    
    return [inaccuracy_function(volume), imprecision_function(volume)]    
Beispiel #14
0
def refinex2(x,y,tol=1e-3,maxiter=10):

  from scipy.interpolate import interp1d

  # assume y is appropriately normalized

  print "refinex: tol=%g, maxiter=%i\n" %(tol,maxiter)
  for iter in range(maxiter):

    y1 = interp1d(x,y,kind="linear",axis=0)
    y3 = interp1d(x,y,kind="cubic" ,axis=0)

    nx,ny = y.shape

    xi = 0.5*(x[:-1]+x[1:])
    yi = y3(xi)
    ei = abs(yi-y1(xi)).max(1)
    ii = np.nonzero(ei>tol)
    ix = np.arange(1,nx)
    ni = len(ii[0])

    print "  iter %i... added %i points" %(iter,ni)
    if ni>0:
      x = np.insert(x,ix[ii],xi[ii])
      y = np.insert(y,ix[ii],yi[ii],axis=0)
    else:
      break

  return x,y
def rainin_multichannel_pipetting_model(volume):
    """ Data obtained from 
        https://www.shoprainin.com/Pipettes/Multichannel-Manual-Pipettes/Pipet-Lite-XLS%2B/Pipet-Lite-Multi-Pipette-L8-200XLS%2B/p/17013805        
        
        Parameters
        ----------
        volume - volume pipetted in microliters
        
        Notes
        -----
        This is the pipette used for pipetting octanol for the cyclohexane dilution into octanol.
        
        Returns
        -------
        Expected Inaccuracy, Imprecision
    """
    imprecision_function = interp1d(
        [20.0, 100.0, 200.0], # volume range (uL)
        [0.01, 0.0025,0.0015]) # relative imprecision for these volumes from rainin website
    
    inaccuracy_function = interp1d(
        [20.0, 100.0, 200.0], # volume range (uL)
        [0.025, 0.008, 0.008]) # relative inaccuracy for these volumes from rainin website
    
    return [inaccuracy_function(volume), imprecision_function(volume)]    
Beispiel #16
0
def kcorr(l_o, fl_o, band, z, axis=0):
    '''

    '''
    # read in filter table
    band_tab = t.Table.read('filters/{}_SDSS.res'.format(band),
                            names=['lam', 'f'], format='ascii')

    # set up interpolator
    band_interp = interp1d(x=band_tab['lam'].quantity.value,
                           y=band_tab['f'], fill_value=0.,
                           bounds_error=False)
    l_o = l_o.to('AA')
    l_e = l_o / (1. + z)

    R_o = band_interp(l_o)
    R_e = band_interp(l_e)

    fl_e_ = interp1d(x=l_e, y=fl_o,
                     bounds_error=False, fill_value='extrapolate')
    fl_o_ = interp1d(x=l_o, y=fl_o,
                     bounds_error=False, fill_value='extrapolate')

    n = np.trapz(x=l_o,
                 y=(R_o * l_o * fl_o_(l_o / (1. + z))),
                 axis=axis)
    d = np.trapz(x=l_e,
                 y=(R_e * l_e * fl_e_(l_e)),
                 axis=axis)

    F = n / d

    K_QR = -2.5 * np.log10(F.to('').value / (1. + z))

    return K_QR
Beispiel #17
0
	def learn(self):
		from scipy.optimize import fmin_powell as minimizer
		switches = np.abs(np.diff(self.obs)).nonzero()[0]
		try:
			if len(switches)>5:
				first_switch = self.tps[switches[0]]
				last_switch = self.tps[switches[-1]]
			else:
				first_switch = self.tps[0]
				last_switch = self.tps[-1]
			if first_switch>self.final_pivot_tps[0] and first_switch < self.final_pivot_tps[-1]:
				first_pivot = max(0, np.where(first_switch<=self.final_pivot_tps)[0][0] - self.extra_pivots)
			else:
				first_pivot=0
			if last_switch<self.final_pivot_tps[-1] and last_switch>self.final_pivot_tps[0]:
				last_pivot = min(len(self.final_pivot_tps), np.where(last_switch>self.final_pivot_tps)[0][-1]+self.extra_pivots)
			else:
				last_pivot = len(self.final_pivot_tps)
			tmp_pivots = self.final_pivot_tps[first_pivot:last_pivot]
			if min(np.diff(tmp_pivots))<0.000001:
				print pivots
			self.tps = self.full_tps[(self.full_tps>=tmp_pivots[0])*(self.full_tps<tmp_pivots[-1])]
			self.obs = self.full_obs[(self.full_tps>=tmp_pivots[0])*(self.full_tps<tmp_pivots[-1])]
		except:
			import ipdb; ipdb.set_trace()

		self.pivot_freq = self.initial_guess(tmp_pivots, ws=2*(min(50,len(self.obs))//2))
		self.pivot_freq[0]=self.pivot_freq[1]
		self.pivot_freq[-1]=self.pivot_freq[-2]
		self.frequency_estimate = interp1d(tmp_pivots, self.pivot_freq, kind=self.interpolation_type, bounds_error=False)
		if self.verbose:
			print "Initial pivots:", tmp_pivots, self.pivot_freq
		steps= [4,2,1]
		for si in steps:
			if len(self.final_pivot_tps)>2*si or si==1:
				# subset the pivots, if the last point is not included, attach it
				self.pivot_tps = tmp_pivots[::si]
				if self.pivot_tps[-1]!=tmp_pivots[-1]:
					self.pivot_tps = np.concatenate((self.pivot_tps, tmp_pivots[-1:]))

				self.pivot_freq = self.frequency_estimate(self.pivot_tps)
				if np.max(np.abs(self.pivot_freq))>20:
					import ipdb; ipdb.set_trace()
				# determine the optimal pivot freqquencies
				self.pivot_freq = minimizer(self.logLH, self.pivot_freq, ftol = self.tol, xtol = self.tol, disp = self.verbose>0)
				if self.logit:
					self.pivot_freq = logit_transform(fix_freq(logit_inv(self.pivot_freq), 0.0001))
				else:
					self.pivot_freq = fix_freq(self.pivot_freq, 0.0001)
				# instantiate an interpolation object based on the optimal frequency pivots
				self.frequency_estimate = interp1d(self.pivot_tps, self.pivot_freq, kind=self.interpolation_type, bounds_error=False)
				if min(np.diff(self.pivot_tps))<0.000001:
					print pivots
				if self.verbose: print "neg logLH using",len(self.pivot_tps),"pivots:", self.logLH(self.pivot_freq)

		self.final_pivot_freq=np.zeros_like(self.final_pivot_tps)
		self.final_pivot_freq[first_pivot:last_pivot]=self.pivot_freq
		self.final_pivot_freq[:first_pivot] = self.final_pivot_freq[first_pivot]
		self.final_pivot_freq[last_pivot:] = self.final_pivot_freq[last_pivot-1]
		self.frequency_estimate = interp1d(self.final_pivot_tps, self.final_pivot_freq, kind=self.interpolation_type, bounds_error=False)
Beispiel #18
0
def loadAcorrCoeff( calFile ):
	ACA_Caldata = loadtxt(calFile)
	#bitDist = np.transpose(ACA_Caldata[2:10])
	#analogPower  = ACA_Caldata[0]
	digitalPower = ACA_Caldata[1]
	fitCoeff = ACA_Caldata[10:12]
	return interp1d(digitalPower, fitCoeff[0], kind='cubic'), interp1d(digitalPower, fitCoeff[1], kind='cubic')
Beispiel #19
0
def get_fn(data, fp):
    """ Given some scores data and a false negatives rate
    find the corresponding false positive rate in the ROC curve.
    If the point does not exist, we will interpolate it.

    """
    if fp in data.fpr:
        pos = np.where(data.fpr == fp)
        fnr, thr = np.mean(data.fnr[pos]), np.mean(data.thrs[pos])
    else:
        # Set data for interpolation
        x = np.sort(data.fpr)
        # Set new arange whichs includes the wanted value
        xnew = np.arange(fp, x[-1])
        # Interpolate the FN
        y = np.sort(data.tpr)
        f = interpolate.interp1d(x, y)
        tpr = f(xnew)[0]
        fnr = 1 - tpr
        # Interpolate the threashold
        y = np.sort(data.thrs)
        f = interpolate.interp1d(x, y)
        thr = f(xnew)[0]
    print("Dado el valor de fp: {0}, el valor de fnr es: {1} y el umbral: {2} "
          .format(fp, fnr, thr))
Beispiel #20
0
def makegood(prereqs,func,r,size,grid,smallrexp,largerexp,plotting):
    """
    prereqs - array containing model class instance as first element
    func - function to be evaluated
    r - independent variable array
    size - size of generated independent variable array with format 
    	   [log10(max),log10(min),stepsize]
    grid - choice of grid generator function
    smallrexp - log slope at small r or large E
    largerexp - log slope at large r or small E
    plotting - if False, do not plot. 
               if not False, must be array with ['<xlabel>','<ylabel>']
    
    Returns an interpolated object version of the function based 
    computed values.
    """
    model = prereqs[0]
    #generate independent array grid
    rarray,rchange,rstart = grid([model],size[0],size[1],size[2])
    #compute value of function for grid points
    tab,problems = func(rarray,prereqs)
    frac = float(len(problems))/float(len(tab))
    #report the fraction of problem points to console and file
    print 'fraction reporting a message: {0}'.format(frac)
    model.statfile.write('\nmesg frac = {0}\n'.format(frac))
    #check for problem points not caught in integration process
    gcheck = goodcheck(tab)
    #interpolate in log10 space
    inter = interp1d(log10(rarray),log10(tab))
    #generate array to further extremes using powerlaw behaviour
    m = piecewise(r,inter,tab[0],tab[len(rarray)-1],rstart,rchange,smallrexp,largerexp)
    #interpolate extended array in log10 space
    inter2 = interp1d(log10(r),log10(m))
    #save values used to interpolate to file (NOT in log10 space)
    saver = column_stack((r,m))
    funcname = str(func).split(' ')[1][4:]
    pklwrite('{0}/{1}.pkl'.format(model.directory,funcname),saver)
    #if plotting is possible and the array doesn't consist entirely of problems
    #add plot to pdf and return interpolate functional form
    if plotting != False and gcheck == True:
        xaxis,yaxis = plotting
        plt.figure()
        plt.loglog(r[1:-1],m[1:-1],'c',linewidth = 5)
        plt.loglog(rarray,tab,'.',color = 'DarkOrange')
        plt.ylabel(r'{0}'.format(yaxis))
        plt.xlabel('{0}'.format(xaxis))
        plt.xlim(min(r[1:-1]),max(r[1:-1]))
        plt.ylim(min(m[1:-1]),max(m[1:-1]))
        plt.title(model.name)
        model.pdfdump.savefig()
        plt.close()
        return inter2
    #if plotting isn't possible but array doesn't consist entirely of problems
    #return interpolated functional form
    elif plotting == False and gcheck == True:
        return inter2
    #if computation failed, return 0
    #this signals the rest of the program that computation failed here
    elif gcheck == False:
        return 0
Beispiel #21
0
def difTemplate():

	''' 
	Look at the differences between the templates

	'''

	path = "/home/gpfs/manip/mnt0607/bao/hdumasde/Results/Txt/chain_annalys_delta/"

	data1 = numpy.loadtxt(path + 'template_0_0.txt')
	templateData1 = interpolate.interp1d(data1[:,0],data1[:,1],bounds_error=False,fill_value=0)
	plt.errorbar(data1[:,0], data1[:,1]/templateData1(1150.), fmt='o', label=r'$Simu$',color='red')

	data = numpy.loadtxt(path + 'template.txt')
	templateData = interpolate.interp1d(data[:,0],data[:,1],bounds_error=False,fill_value=0)
	plt.errorbar(data[:,0], data[:,1]/templateData(1150.), fmt='o', label=r'$Data$',color='blue')

	data3 = numpy.loadtxt(path + 'template_0_0_MocksColab.txt')
	templateData3 = interpolate.interp1d(data3[:,0],data3[:,1],bounds_error=False,fill_value=0)
	plt.errorbar(data3[:,0], data3[:,1]/templateData3(1150.), fmt='o', label=r'$Mock \, colab$',color='green')


	plt.title(r'$Template$', fontsize=40)
	plt.xlabel(r'$\lambda_{R.F.} \, [\AA]$', fontsize=40)
	plt.ylabel(r'$f(\lambda_{R.F.}) / f(1150.)$', fontsize=40)
	myTools.deal_with_plot(False,False,True)
	plt.show()
	

	plt.errorbar(data[:,0], (data1[:,1]/templateData1(1150.)-data[:,1]/templateData(1150.))/(data1[:,1]/templateData1(1150.)) , fmt='o')
	plt.xlabel(r'$\lambda_{R.F.} \, [\AA]$', fontsize=40)
	plt.ylabel(r'$( (f(\lambda_{R.F.}) / f(1150.))_{Data} - (f(\lambda_{R.F.}) / f(1150.))_{Simu} ) / (f(\lambda_{R.F.}) / f(1150.))_{Data})$', fontsize=40)
	myTools.deal_with_plot(False,False,True)
	plt.show()
 def display(self,item):
     self.currentFile=self.listShots.currentItem().text()
     signal1=self.signalChoice1.currentText()
     signal2=self.signalChoice2.currentText()
     time1,data1,sampling1=readHdf5.getData(self.currentFile,signal1,self.env)
     time2,data2,sampling2=readHdf5.getData(self.currentFile,signal2,self.env)
     if sampling2>=sampling1:
          self.data2=interp1d(time2,data2)(time1)
          self.timei=time1
          self.data1=data1
     else:
          self.data1=interp1d(time1,data1)(time2)
          self.timei=time2
          self.data2=data2
     self.p1.clear()
     self.p1.plot(self.timei,data1)
     self.p2.clear()
     self.p2.plot(self.timei,self.data2)
     #self.p2.linkXAxis(self.p1)
     self.lr1=pg.LinearRegionItem([self.timei[0],self.timei[-1]])
     self.lr1.setZValue(-10)
     self.lr2=pg.LinearRegionItem([self.timei[0],self.timei[-1]])
     self.lr2.setZValue(-10)
     self.p1.addItem(self.lr1)
     self.p2.addItem(self.lr2)
     self.lr1.sigRegionChanged.connect(self.updatePlot1)
     self.lr2.sigRegionChanged.connect(self.updatePlot2)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
    # Test that lasso_path with lars_path style output gives the
    # same result

    # Some toy data
    X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
    y = np.array([1, 2, 3.1])
    alphas = [5., 1., .5]
    # Compute the lasso_path
    f = ignore_warnings
    coef_path = [e.coef_ for e in f(lasso_path)(X, y, alphas=alphas,
                                                return_models=True,
                                                fit_intercept=False)]

    # Use lars_path and lasso_path(new output) with 1D linear interpolation
    # to compute the the same path
    alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
    coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
                                               coef_path_lars[:, ::-1])
    alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
                                                    fit_intercept=False,
                                                    return_models=False)
    coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
                                                coef_path_lasso2[:, ::-1])

    np.testing.assert_array_almost_equal(coef_path_cont_lasso(alphas),
                                         np.asarray(coef_path).T, decimal=1)
    np.testing.assert_array_almost_equal(coef_path_cont_lasso(alphas),
                                         coef_path_cont_lars(alphas),
                                         decimal=1)
Beispiel #24
0
def DRIVplot(folder,keys):
  T = 281
  APiterator = [5,10]
  AP = Analysis.AnalyseFile()
  P = Analysis.AnalyseFile()
  if folder[0]['IVtemp'] == T:
    scale = 1e6
    plt.hold(True)
    plt.title('NLIV in P and AP at ' + str(T) + 'K')
    plt.xlabel('Current ($\mu$A)')
    plt.ylabel('V$_{NL}$ ($\mu$V)')
    for f in folder:
      if f['iterator'] in APiterator:
        AP.add_column(f.Voltage,str(f['iterator']))
      else:
        P.add_column(f.Voltage,str(f['iterator']))        
    AP.apply(func,0,replace=False,header='Mean NLVoltage')
    P.apply(func,0,replace=False,header='Mean NLVoltage')    
    
    I = numpy.arange(-295e-6,295e-6,1e-6)
    
    ap = interpolate.interp1d(f.column('Current'),AP.column('Mean NLV'))    
    p = interpolate.interp1d(f.column('Current'),P.column('Mean NLV')) 
    
    print P
    plt.title(' ',verticalalignment='bottom')
    plt.xlabel('Current ($\mu$A)')
    #plt.ylabel('V$_{NL}$/|I| (V/A)')
    plt.ylabel('$\Delta$V$_{NL}$/|I| (mV/A)') 
    plt.plot(f.column('Current')*scale,1e3*(P.column('Mean NLV')-AP.column('Mean NLV'))/abs(f.column('Current')),label =''+str(T)+ ' K')
    #plt.plot(f.column('Current')*scale,1e3*(P.column('Mean NLV'))/abs(f.column('Current')),label ='P at '+str(T)+ ' K')
    #plt.plot(f.column('Current')*scale,1e3*(AP.column('Mean NLV'))/abs(f.column('Current')),label ='AP at '+str(T)+ ' K')        
    plt.legend(loc='upper left')
  else:
    return 1  
Beispiel #25
0
 def make_GRASS_etopo2_colormap(self):
   """
   GRASS GIS allows for color maps to be assigned to absolute values.
   Matplotlib doesn't seem to.
   So this will import and interpolate the etopo2 color map.
   
   """
   etopo2 = np.genfromtxt('GRASScolors/etopo2', skip_footer=1)
   z = etopo2[:,0].astype(int)
   r = etopo2[:,1].astype(float)
   g = etopo2[:,2].astype(float)
   b = etopo2[:,3].astype(float)
   from scipy.interpolate import interp1d
   ri = interp1d(z, r)
   gi = interp1d(z, g)
   bi = interp1d(z, b)
   low_elev = np.min(z)
   high_elev = np.max(z)
   znew = np.linspace(low_elev, high_elev, 512)
   znew = np.concatenate(( znew[znew<-1], [-1, 0], znew[znew>0])) # make sure key SL transition is intact!
   rnew = ri(znew)
   gnew = gi(znew)
   bnew = bi(znew)
   clscaled = np.linspace(0, 1, len(znew))
   cdr = []
   cdg = []
   cdb = []
   for i in range(len(znew)):
     cdr.append([clscaled[i], rnew[i]/255., rnew[i]/255.])
     cdg.append([clscaled[i], gnew[i]/255., gnew[i]/255.])
     cdb.append([clscaled[i], bnew[i]/255., bnew[i]/255.])
   cdict = {'red': cdr, 'green': cdg, 'blue': cdb}
   cm_etopo2 = LinearSegmentedColormap('etopo2',cdict,4096)
   return low_elev, high_elev, cm_etopo2
    def rebin_data(self, grid, use_psf=True):
        """Calculates the center of mass of the grid and then
        rebins so that the center pixel really is the center of the array
        For this we do a 2-d interpolation on the grid
        """
        a = psf_fitter.psffit(abs(grid), circle=False, rotate=1)
        xcen = a[2]
        ycen = a[2]
        xlen, ylen = grid.shape
        xval = arange(xlen)
        yval = arange(ylen)

        xint = interp1d(xval, self.xpos_abs)
        yint = interp1d(yval, self.ypos_abs)

        xintcen = self.xmax_pos-xint(xcen)
        yintcen = self.ymax_pos-yint(ycen)

        print self.xmax_pos, xintcen, self.ymax_pos, yintcen
        f_real = interp2d(self.xpos_rel, self.ypos_rel, real(grid))
        f_imag = interp2d(self.xpos_rel, self.ypos_rel, imag(grid))

        xnew = self.xpos_rel - xintcen
        ynew = self.ypos_rel - yintcen

        recen_grid = f_real(xnew, ynew) + 1j*f_imag(xnew, ynew)

        print nd.center_of_mass(abs(recen_grid))

        return recen_grid
    def test_closeness_nest_lsodar(self):
        # Compare models to the LSODAR implementation.

        simtime = 100.

        # get lsodar reference
        lsodar = np.loadtxt(os.path.join(path, 'test_aeif_data_lsodar.dat')).T
        V_interp = interp1d(lsodar[0, :], lsodar[1, :])
        w_interp = interp1d(lsodar[0, :], lsodar[2, :])

        # create the neurons and devices
        neurons = {model: nest.Create(model, params=aeif_param)
                   for model in models}
        multimeters = {model: nest.Create("multimeter") for model in models}
        # connect them and simulate
        for model, mm in iter(multimeters.items()):
            nest.SetStatus(mm, {"interval": self.resol,
                                "record_from": ["V_m", "w"]})
            nest.Connect(mm, neurons[model])
        nest.Simulate(simtime)

        # relative differences: interpolate LSODAR to match NEST times
        mm0 = next(iter(multimeters.values()))
        nest_times = nest.GetStatus(mm0, "events")[0]["times"]
        reference = {'V_m': V_interp(nest_times), 'w': w_interp(nest_times)}

        rel_diff = self.compute_difference(multimeters, aeif_param, reference,
                                           ['V_m', 'w'])
        self.assert_pass_tolerance(rel_diff, di_tolerances_lsodar)
    def measure_target_width_on_segment(self, pt1, pt2):
        """
        Given the line segment L defined by 2d points pt1 and pt2 from a camera 
        frame, find the points pt3 and pt4 the nearest points to pt1 and pt2 
        on L that are masked according to self.mask8. Then calculate the 
        distance D between 3d points pt5 and pt6 in self.xyz which 
        correspond to pt3 and pt4.
        return pt3, pt4, D, fx, fy,
            where 
                pt3 = (x, y)
                pt4 = (x, y)
                fx is the function f(distance from pt3 on L) = x
                fy is the function f(distance from pt3 on L) = y
        If anything goes wrong, return None
        """
        from scipy.interpolate import interp1d

        dist2d = distance(pt1, pt2)
        interpx = interp1d([0, dist2d], [pt1[0], pt2[0]])
        interpy = interp1d([0, dist2d], [pt1[1], pt2[1]])
        t = numpy.linspace(0, int(dist2d), int(dist2d)+1)
        xs = numpy.int0(interpx(t))
        ys = numpy.int0(interpy(t))
        ixs, = self.mask8[ys, xs].nonzero()
        if len(ixs) >= 2:
            x1 = xs[ixs[0]]
            y1 = ys[ixs[0]]
            x2 = xs[ixs[-1]]
            y2 = ys[ixs[-1]]
            xyz1 = self.xyz[:, y1, x1]
            xyz2 = self.xyz[:, y2, x2]
            dist3d = distance(xyz1, xyz2)
            interpx2 = lambda d: (x2-x1)*d/dist2d + x1
            interpy2 = lambda d: (y2-y1)*d/dist2d + y1
            return (x1, y1), (x2, y2), dist3d, interpx2, interpy2
Beispiel #29
0
    def analyze_geometry(self):
        """
        analyzing airfoil geometry by upper and lower curve points.
        Search maximum thickness and maximum camber using cubic spline 
        interpolation and gradient based optimization. To avoid interpolation 
        errors that can occur at leading edge of several airfoil types 
        (mostly NACA cambered airfols) it is assumed that maximum camber and 
        thickness are located between 5 and 95% of airfoil length.
        
        Result is stored in self.thicknessLoc, self.thickness, self.camber, 
        self.camberLoc
        """
        lb = 0.1
        ub = 0.9
        up = geom.get_pts_in_range(self.upPts, lb, ub)
        lo = geom.get_pts_in_range(self.loPts, lb, ub)
        upCurve = interp1d(up[:, 0], up[:, 1], "cubic")
        loCurve = interp1d(lo[:, 0], lo[:, 1], "cubic")
        lb = up[0, 0]
        ub = up[-1, 0]

        def tc(x):
            return loCurve(x) - upCurve(x)

        def camber(x):
            return -(upCurve(x) + loCurve(x)) / 2.0

        self.thicknessLoc = float(fminbound(tc, lb, ub, xtol=0.001))
        self.camberLoc = float(fminbound(camber, lb, ub, xtol=0.001))
        self.thickness = -float(tc(self.thicknessLoc))
        self.camber = -float(camber(self.camberLoc))
def interpolate_baraffe(mass):
    if mass <= 0.02:
        data=read_baraffe(0.02,model=model)
        return data['Teff'][0], data['r'][0]
    else:
        modeled_masses=np.sort(np.unique(dat['m']))
        just_below=modeled_masses[modeled_masses<mass][-1]
        print 'JUST BELOW:', just_below
        just_above=modeled_masses[modeled_masses>mass][0]
        print 'JUST ABOVE:', just_above
        below_inds=dat["m"]==just_below
        above_inds=dat["m"]==just_above

        belowvals=map(lambda k:dat[k][below_inds],keys)
        belowdict=dict(zip(keys,belowvals))
        belowdict['r']=R_from_T_and_L(dat["Teff"],10**dat["log L"])

        abovevals=map(lambda k:dat[k][above_inds],keys)
        abovedict=dict(zip(keys,abovevals))
        abovedict['r']=R_from_T_and_L(dat["Teff"],10**dat["log L"])

        below_Teff=belowdict["Teff"][0]
        print 'BELOW TEFF: ',below_Teff
        above_Teff=abovedict["Teff"][0]
        print 'ABOVE TEFF: ',above_Teff

        below_L=10**belowdict["log L"][0]
        above_L=10**abovedict["log L"][0]

        out_Teff=interp1d([just_below,just_above],[below_Teff,above_Teff])(mass)
        out_L=interp1d([just_below,just_above],[below_L,above_L])(mass)
        out_R=R_from_T_and_L(out_Teff,out_L)
        return float(out_Teff), float(out_R)
    return v_swirl


v_swirl_cruise = v_swirl_propeller(V_cruise, v_a_cruise, omega_cruise, R_p)
v_swirl_stall = v_swirl_propeller(V_stall, v_a_stall, omega_VTOL, R_p)
v_swirl_VTOL = v_swirl_propeller(V_VTOL, v_a_cruise, omega_cruise, R_p)

# Creating a matrix of induced propeller velocities according to the propeller placements along the span.
""" INCOMPLETE """
inducedVelocity = np.zeros((N, 3))

# =============================================================================
# Prandtl's Adapted lifting line model with induced propeller velocities. Computing the circulation and induced lift distribution

# Linear twist distribution
twist_distribution_function = interp1d([-b / 2, 0, b / 2], [twist, 0, twist])
twist_distribution = []
for i in range(N):
    twist_distribution.append(twist_distribution_function(y[i]))
twist_distribution = np.array(twist_distribution)

# Calculating geometric angle of attack [rad]
alpha_geo = (alpha_fly + twist_distribution) * np.pi / 180

# Linear chord distribution
chord_distribution_function = interp1d([-b / 2, 0, b / 2],
                                       [c_tip, c_root, c_tip])
chord_distribution = []
for i in range(N):
    chord_distribution.append(chord_distribution_function(y[i]))
chord_distribution = np.array(chord_distribution)
Beispiel #32
0
    def __getitem__(self, index):
        #img_size = 224
        """Reads an image from a file and preprocesses it and returns."""

        image_path = self.image_paths[index]
        filename = image_path.split('_')[-1][:-len(".jpg")]
        #GT_path = self.GT_paths + 'ISIC_' + filename + '_segmentation.png'

        image = Image.open(image_path)
        #GT = Image.open(GT_path)
        annot_fn = self.GT_paths + filename.split('/')[-1] + '.xml'
        tree = ET.parse(annot_fn)
        objs = tree.findall('object')

        #img = Image.open(fn)
        wid = image.width
        hei = image.height

        for ix, obj in enumerate(objs):
            if obj.find('name').text.lower().strip() == 'graph':
                bbox = obj.find('bndbox')
                x11 = int(float(bbox.find('xmin').text))
                y11 = int(float(bbox.find('ymin').text))
                x12 = int(float(bbox.find('xmax').text))
                y12 = int(float(bbox.find('ymax').text))

            if obj.find('name').text.lower().strip() == 'xypercent':
                xper = obj.find('xper')
                #print(xper.text)
                xper = xper.text.split(' ')
                xper = [int(float(i) * 224) for i in xper]

                yper = obj.find('yper')
                #print(yper.text)
                yper = yper.text.split(' ')
                yper = [int(float(i) * 224) for i in yper]

        image = image.crop((x11, y11, x12, y12)).resize((img_size, img_size))
        matrix = torch.zeros(img_size, img_size)
        vector = torch.ones(img_size) * (-1)

        f = interpolate.interp1d(xper, yper)
        xnew = list(range(xper[0], xper[-1] + 1))
        ynew = f(xnew)
        ynew = [int(i) for i in ynew]

        for n, xn in enumerate(xnew):
            matrix[xn, ynew[n]] = 1
            vector[xn] = ynew[n]

        Transform = []

        Transform.append(T.ToTensor())
        Transform = T.Compose(Transform)

        image_t = Transform(image)

        Norm_ = T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        image_t = Norm_(image_t)

        return image_t, vector, matrix, image_path
    print('Analyzing')
    #get the class-1 (outlier/anomaly) rows from the feature matrix, and drop the prediction so we can investigate them

    ##From Here
    Left = 0.001
    Right = 0.01

    k = len(featureMatrix['prediction']) / 2

    Label = [0] * k + [1] * k

    print(len(Label))
    print(len(featureMatrix['prediction']))
    fpr, tpr, thresholds = roc_curve(Label, featureMatrix['prediction'])

    F = interpolate.interp1d(fpr, tpr)
    x = np.logspace(np.log10(Left), np.log10(Right))
    y = F(x)
    roc_auc = auc(x, y)
    print '%.6f' % (roc_auc)

    plt.figure()
    plt.xscale('log')
    plt.plot(fpr, tpr, ls='None', color='red')
    plt.plot(x, y, color='blue')
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver operating characteristic')

    plt.plot(plt.xlim(), plt.ylim(), ls="--", c=".3")
    plt.plot(fpr, tpr)
def interpolacion_cub(x1,x_tabla,y_tabla):
    funcion_cubica=interpolate.interp1d(x_tabla,y_tabla,kind= "cubic")
    return (funcion_cubica(x1))
Beispiel #35
0
def rvt_from_peakfinder(r):
    if r["demo"]:
        quiet = 0
        print("quiet = %s" % quiet)
    # Calculate RVT
    if len(r["p_trace"]) != len(r["n_trace"]):
        dd = abs(len(r["p_trace"]) - len(r["n_trace"]))
        if dd > 1:  # have not seen this yet, trap for it.
            print(
                "Error RVT_from_PeakFinder:\n"
                "  Peak trace lengths differ by %d\n"
                "  This is unusual, please upload data\n"
                "  sample to afni.nimh.nih.gov" % dd
            )
            # keyboard
            return
        else:  # just a difference of 1, happens sometimes, seems ok to discard one sample
            print(
                "Notice RVT_from_PeakFinder:\n"
                "   Peak trace lengths differ by %d\n"
                "   Clipping longer trace." % dd
            )
            dm = min(len(r["p_trace"]), len(r["n_trace"]))
            if len(r["p_trace"]) != dm:
                r["p_trace"] = r["p_trace"][0:dm]
                r["tp_trace"] = r["tp_trace"][0:dm]
            else:
                r["n_trace"] = r["n_trace"][0:dm]
                r["tn_trace"] = r["tn_trace"][0:dm]

    r["rv"] = subtract(r["p_trace"], r["n_trace"])
    # NEED TO consider which starts first and
    # whether to initialize first two values by means
    # and also, what to do when we are left with one
    # incomplete pair at the end

    nptrc = len(r["tp_trace"])
    r["rvt"] = r["rv"][0 : nptrc - 1] / r["prd"]
    if r["p_trace_r"].any:
        r["rvr"] = subtract(r["p_trace_r"], r["n_trace_r"])
        # Debugging lines below
        # with open('rvr.csv', 'w') as f:
        #     for i in r['rvr']:
        #         f.write("%s\n" % i)
        # with open('prdR.csv', 'w') as f:
        #     for i in r['prdR']:
        #         f.write("%s\n" % i)
        r["rvtr"] = numpy.ndarray(numpy.shape(r["rvr"]))
        divide(r["rvr"], r["prdR"], r["rvtr"])
        # Smooth RVT so that we can resample it at volume_tr later
        fnyq = r["phys_fs"] / 2  # nyquist of physio signal
        fcut = 2 / r["volume_tr"]  # cut below nyquist for volume_tr
        w = float(r["frequency_cutoff"]) / float(fnyq)  # cut off frequency normalized
        b = firwin(numtaps=(r["fir_order"] + 1), cutoff=w, window="hamming")
        v = r["rvtr"]
        around(v, 6, v)
        # Debugging lines below
        # with open('a.csv', 'w') as f:
        #     for i in v:
        #         f.write("%s\n" % i)
        mv = mean(v)
        # remove the mean
        v = v - mv
        # filter both ways to cancel phase shift
        v = lfilter(b, 1, v)
        if r["legacy_transform"] == 0:
            v = numpy.flipud(
                v
            )  # Turns out these don't do anything in the MATLAB version(Might be a major problem)
        v = lfilter(b, 1, v)
        if r["legacy_transform"] == 0:
            v = numpy.flipud(
                v
            )  # Turns out these don't do anything in the MATLAB version(Might be a major problem)
        r["rvtrs"] = v + mv

    # create RVT regressors
    r["rvtrs_slc"] = zeros((len(r["rvt_shifts"]), len(r["time_series_time"])))
    for i in range(0, len(r["rvt_shifts"])):
        shf = r["rvt_shifts"][i]
        nsamp = int(round(shf * r["phys_fs"]))
        sind = add(list(range(0, len(r["t"]))), nsamp)
        print(sind)
        sind[nonzero(sind < 0)] = 0
        sind[nonzero(sind > (len(r["t"]) - 1))] = len(r["t"]) - 1
        rvt_shf = interp1d(
            r["t"], r["rvtrs"][sind], r["interpolation_style"], bounds_error=True
        )
        rvt_shf_y = rvt_shf(r["time_series_time"])
        if r["quiet"] == 0 and r["show_graphs"] == 1:
           # pacify matplotlib by passing a label (to get new instance)
           subplot(111, label='plot #%d'%i)
           plot(r["time_series_time"], rvt_shf_y)
        r["rvtrs_slc"][:][i] = rvt_shf_y

    if r["quiet"] == 0 and r["show_graphs"] == 1:
        print("--> Calculated RVT \n--> Created RVT regressors")
        subplot(211)
        plot(
            r["t_mid_prd"], z_scale(r["rvt"], min(r["p_trace"]), max(r["p_trace"])), "k"
        )
        if any(r["p_trace_r"]):
            plot(
                r["tR"], z_scale(r["rvtrs"], min(r["p_trace"]), max(r["p_trace"])), "m"
            )
        show()
        if r["demo"]:
            # uiwait(msgbox('Press button to resume', 'Pausing', 'modal'))
            pass

    return r
Beispiel #36
0
	def get_rates(self, lambdas, dye_concentration, n):

		"""
			Rates for Rhodamine 6G

			Parameters:

				lambdas (list, or other iterable):  	Wavelength points where the rates are to be calculated. Wavelength is in meters
				dye_concentration (float):				In mM (milimolar) 1 mM = 1 mol / m^3			
				n (float): 								index of refraction

		"""

		# absorption data
		min_wavelength = 480
		max_wavelength = 650
		absorption_spectrum_datafile = Path("data") / 'absorption_cross_sections_R6G_in_EthyleneGlycol_corrected.csv'
		absorption_spectrum_datafile = Path(os.path.dirname(os.path.abspath(__file__))) / absorption_spectrum_datafile
		raw_data2 = pd.read_csv(absorption_spectrum_datafile)
		initial_index = raw_data2.iloc[(raw_data2['wavelength (nm)']-min_wavelength).abs().argsort()].index[0]
		raw_data2 = raw_data2.iloc[initial_index:].reset_index(drop=True)
		final_index = raw_data2.iloc[(raw_data2['wavelength (nm)']-max_wavelength).abs().argsort()].index[0]
		raw_data2 = raw_data2.iloc[:final_index].reset_index(drop=True)
		absorption_data = raw_data2
		absorption_data_normalized = absorption_data['absorption cross-section (m^2)'].values / np.max(absorption_data['absorption cross-section (m^2)'].values)
		absorption_spectrum = np.squeeze(np.array([[absorption_data['wavelength (nm)'].values], [absorption_data_normalized]], dtype=float))
		interpolated_absorption_spectrum = interp1d(absorption_spectrum[0,:], absorption_spectrum[1,:], kind='cubic')
		
		# emission data
		fluorescence_spectrum_datafile =  Path("data") / 'fluorescence_spectrum_R6G_in_EthyleneGlycol_corrected.csv'
		fluorescence_spectrum_datafile = Path(os.path.dirname(os.path.abspath(__file__))) / fluorescence_spectrum_datafile
		raw_data = pd.read_csv(fluorescence_spectrum_datafile)
		initial_index = raw_data.iloc[(raw_data['wavelength (nm)']-min_wavelength).abs().argsort()].index[0]
		raw_data = raw_data.iloc[initial_index:].reset_index(drop=True)
		final_index = raw_data.iloc[(raw_data['wavelength (nm)']-max_wavelength).abs().argsort()].index[0]
		raw_data = raw_data.iloc[:final_index].reset_index(drop=True)
		fluorescence_data = raw_data
		fluorescence_data_normalized = fluorescence_data['fluorescence (arb. units)'].values / np.max(fluorescence_data['fluorescence (arb. units)'].values)
		emission_spectrum = np.squeeze(np.array([[fluorescence_data['wavelength (nm)'].values], [fluorescence_data_normalized]], dtype=float))
		interpolated_emission_spectrum = interp1d(emission_spectrum[0,:], emission_spectrum[1,:], kind='cubic')

		# Uses both datasets
		if np.min(1e9*np.array(lambdas)) < 480 or np.max(1e9*np.array(lambdas)) > 650:
			raise Exception('*** Restrict wavelength to the range between 480 and 650 nm ***')

		temperature = 300
		lamZPL = 545e-9
		n_mol_per_vol= dye_concentration*sc.Avogadro
		peak_Xsectn = 2.45e-20*n_mol_per_vol*sc.c/n
		wpzl = 2*np.pi*sc.c/lamZPL/1e12

		def freq(wl):
			return 2*np.pi*sc.c/wl/1e12
		def single_exp_func(det):
			f_p = 2*np.pi*sc.c/(wpzl+det)*1e-3
			f_m = 2*np.pi*sc.c/(wpzl-det)*1e-3
			return (0.5*interpolated_absorption_spectrum(f_p)) + (0.5*interpolated_emission_spectrum(f_m))
		def Err(det):
			return Erf(det*1e12)
		def single_adjust_func(det):
			return ((1+Err(det))/2.0*single_exp_func(det)) + ((1-Err(det))/2.0*single_exp_func(-1.0*det)*np.exp(sc.h/(2*np.pi*sc.k*temperature)*det*1e12)) 
			
		emission_rates = np.array([single_adjust_func(-1.0*freq(a_l)+wpzl) for a_l in lambdas])*peak_Xsectn
		absorption_rates = np.array([single_adjust_func(freq(a_l)-wpzl) for a_l in lambdas])*peak_Xsectn

		return absorption_rates, emission_rates
Beispiel #37
0
def hatched_line(x,
                 y,
                 axis,
                 spc=0.03,
                 theta=45,
                 len_tick=0.015,
                 flip=False,
                 linestyle=None):
    try:
        from scipy.interpolate import interp1d
    except:
        raise Exception('scipy required to plot hatched lines')
    #end

    x = numpy.array(x)
    y = numpy.array(y)

    # Calculate the aspect ratio of the plot
    aspect_ratio = axis.axis()
    aspect_ratio = (aspect_ratio[1] - aspect_ratio[0]) / (aspect_ratio[3] -
                                                          aspect_ratio[2])

    if flip:
        flip = -1
    else:
        flip = 1
    #end

    # Calcualte the distance along the curve
    ds = numpy.sqrt((x[1:] - x[:-1])**2 + ((y[1:] - y[:-1]) * aspect_ratio)**2)
    s_tot = sum(ds)
    ds = numpy.concatenate(([0.0], numpy.cumsum(ds)))

    # Determine the x and y corrdinates of the tick root
    s_tick = numpy.linspace(0, s_tot, ceil(1 / spc))
    x_tick = interp1d(ds, x, bounds_error=False)(s_tick)
    y_tick = interp1d(ds, y, bounds_error=False)(s_tick)

    # Calcualte the normal to the curve at the tick root
    delta_s = spc * s_tot
    v_tick = (x_tick -
              interp1d(ds, x, bounds_error=False)(s_tick + delta_s)) / delta_s
    u_tick = (y_tick - interp1d(ds, y, bounds_error=False)
              (s_tick + delta_s)) / (delta_s * aspect_ratio)
    n = numpy.sqrt(u_tick**2 + v_tick**2)

    # Calcualte the offset in x and y for the tick
    theta = radians(theta)
    trans_matrix = numpy.array([[cos(theta), -sin(theta)],
                                [sin(theta), cos(theta)]])
    dxy = numpy.dot(numpy.array([u_tick / n, v_tick / n]).T,
                    trans_matrix) * len_tick * s_tot

    # Draw the base line
    base_line = plt.Line2D(x_tick, y_tick)
    axis.add_line(base_line)

    # Draw each tick
    for i in range(len(x_tick)):
        axis.add_line(
            plt.Line2D(
                [x_tick[i], x_tick[i] - flip * dxy[i, 0]],
                [y_tick[i], (y_tick[i] - flip * dxy[i, 1] / aspect_ratio)]))
    #end

    return axis
Beispiel #38
0
arw.arrow(1794, 1, -629, 0, width=.1, head_length=40, color='k')
arw.arrow(500, 1, 160, 0, width=.1, head_length=40, color='k')
arw.arrow(500, 1, -460, 0, width=.1, head_length=40, color='k')
arw.text(350, 1.2, '100\,kyr', ha='center')
arw.text(1919, 1.2, '41.1\,kyr', ha='center')
arw.axis('off')
benth2 = ax.twinx()
T = lambda b: 16.1 - 4.76 * (b - 0.2)
benth2.yaxis.set_major_locator(plt.MaxNLocator(5))
benth2.set_ylim(-7.2, 3.2)
benth2.plot(-benth[:, 0], T(benth[:, 1]), linewidth=0)
benth2.set_ylabel('Temperature ($^\circ$C)', rotation=-90, labelpad=20)
fig.savefig('../benthic_3m_years.pdf')

fig, axs = plt.subplots(1, 1, tight_layout=True, figsize=(10, 3))
ben_fun = interp1d(benth[:, 0], benth[:, 1])
quat_f, quat_p = welch(ben_fun(np.linspace(-2500, 0, 2500)), 1, nperseg=790)
pre_f, pre_p = welch(ben_fun(np.linspace(-2500, -1250, 1250)), 1, nperseg=810)
post_f, post_p = welch(ben_fun(np.linspace(-700, 0, 700)), 1, nperseg=700)
quat, = axs.plot(quat_f,
                 quat_p / np.mean(quat_p),
                 linewidth=3,
                 label='Quaternary',
                 zorder=2)
pre, = axs.plot(pre_f,
                pre_p / np.mean(pre_p),
                'C1',
                linewidth=3,
                label='Before MPT',
                zorder=1)
post, = axs.plot(post_f,