Example #1
0
def loadgs(num, r=False, mayavi=False, **kwargs):
    if isinstance(num,str):
        name = num
        if name.endswith('_r'):

            r = True
            num = gsdict[num[:-2]]
        else:
            num = gsdict[num]
 
    else:
        name = 'gs'
    
    if not mayavi:

        output = scipy.genfromtxt(cmd_folder+'/gs_colors.txt',
                                  skip_header=256*num,
                                  skip_footer=(14-num)*256)
        if r:
            output = output[::-1]
            
        return matplotlib.colors.LinearSegmentedColormap.from_list(name, output,**kwargs)
    else:
        output = scipy.ones((256,4),dtype=int)
        output[:,0:3] = scipy.genfromtxt(cmd_folder+'/gs_colors.txt',
                                         skip_header=256*num,
                                         skip_footer=(14-num)*256,dtype=int)
        if r:
            output = output[::-1]

        return output
def dreal(file1,file2):

	halo=sc.genfromtxt(file1,dtype=float)
	sub=sc.genfromtxt(file2,dtype=float)
	d_real, Vcirc, Vreal=np.array([]),np.array([]), np.array([])
	Xoff_halo, Xoff_sub=np.array([]), np.array([])

	for i in np.arange(len(halo[:,0])):
	
		mask=sub[:,14]==halo[i,11]
		N=np.size(sub[mask,14])
	
		if N >= 1.:
		
			d_real=np.append(d_real,np.sqrt(((float(sub[mask,0][0])-float(halo[i,0]))**2)+((float(sub[mask,1][0])-float(halo[i,1]))**2)+((float(sub[mask,2][0])-float(halo[i,2]))**2))*1000.)
			Vcirc=np.append(Vcirc,float(sub[mask,10][0])/float(halo[i,10]))
			Vreal=np.append(Vreal,(np.sqrt((float(sub[mask,3][0])-float(halo[i,3]))**2+(float(sub[mask,4][0])-float(halo[i,4]))**2+(float(sub[mask,5][0])-float(halo[i,5]))**2)/float(halo[i,10])))
			Xoff_halo=np.append(Xoff_halo,float(halo[i,15]))
			Xoff_sub=np.append(Xoff_sub,float(sub[mask,15][0]))
			
			N=float(len(d_real))
			Y_=1.-(np.arange(N)/N)
			d_real=np.sort(d_real)
	
	return d_real, Y_, Vcirc, Vreal, Xoff_halo, Xoff_sub
def main():
    # Open the data files
    outward = datadir + "/85Outward.csv"
    inward = datadir + "/85Inward.csv"

    outdata = _sp.genfromtxt(outward, delimiter=",", skip_header=1)
    indata = _sp.genfromtxt(inward, delimiter=",", skip_header=1)

    # Create plots
    pcoeffs, ncoeffs = mkplot(outdata, "Current adjusted increasing", "b.")
    mkplot(indata, "Current adjusted decreasing", "g.")

    # Set display options
    _plt.legend()
    _plt.xlabel("Current (A)")
    _plt.ylabel("Resonance (MHz)")
    _plt.title(r"$^{85}Rb$")

    _plt.show()

    # Calculate nuclear spin
    fit_b, fit_m, freq_err, err_b, err_m = pcoeffs
    measured_spin, err_spin = nuc_spin(fit_m, err_m, 0.29, 0.005, 135)
    spin = get_spin(measured_spin)

    # Calculate the earth's field
    outfield, sigoutfield = earth_field(outdata, spin)
    infield, siginfield = earth_field(indata, spin)

    sigfield = 1.0 / sigoutfield ** 2 + 1.0 / siginfield ** 2
    sigfield = 1.0 / _sp.sqrt(sigfield)
    field = outfield / sigoutfield ** 2 + infield / siginfield ** 2
    field *= sigfield ** 2

    return (spin, measured_spin, field, sigfield, freq_err, ncoeffs[2])
def fun_(var1,var2):

	halo_=sc.genfromtxt(var1)
	sub_=sc.genfromtxt(var2)

	#Variables
	
	Xoff_new_, V_circ_,dist_3d_,dist_2d_XY_, dist_2d_YZ_, dist_2d_XZ_=np.array([]), np.array([]), np.array([]),np.array([]), np.array([]), np.array([])#, Xoff_old_, V_circ_, dist_3d_, dist_2d_XY_, dist_2d_YZ_, dist_2d_XZ_, Rvirial_host_=np.array([]), np.array([]), np.array([]), np.array([]), np.array([]), np.array([]), np.array([]), np.array([])
	angulo_=np.array([])	
	#h=1
	Parameter=0.
	
	for i in np.arange(len(halo_[:,0])):
	
		mask=sub_[:,14]==halo_[i,11]
		N_=np.size(sub_[mask,14])
	
	#	print '['+str(i)+'/'+str(len(halo_[:,0]))+']'
	
		if N_==1.:
	
	#Parameters
	
			V_circ=float(sub_[mask,10][0])/float(halo_[i,10])
			dist_3d=np.sqrt(((float(sub_[mask,0][0])-float(halo_[i,0]))**2)+((float(sub_[mask,1][0])-float(halo_[i,1]))**2)+((float(sub_[mask,2][0])-float(halo_[i,2]))**2))*1000.
			dist_2d_XY=np.sqrt(((float(sub_[mask,0][0])-float(halo_[i,0]))**2)+((float(sub_[mask,1][0])-float(halo_[i,1]))**2))*1000.
			dist_2d_YZ=np.sqrt(((float(sub_[mask,1][0])-float(halo_[i,1]))**2)+((float(sub_[mask,2][0])-float(halo_[i,2]))**2))*1000.
			dist_2d_XZ=np.sqrt(((float(sub_[mask,0][0])-float(halo_[i,0]))**2)+((float(sub_[mask,2][0])-float(halo_[i,2]))**2))*1000.
			Vx, Vy, Vz=float(sub_[mask,3][0])-float(halo_[i,3]), float(sub_[mask,4][0])-float(halo_[i,4]), float(sub_[mask,5][0])-float(halo_[i,5])
			X_sh, Y_sh, Z_sh=(float(sub_[mask,0][0])-float(halo_[i,0]))*1000., (float(sub_[mask,1][0])-float(halo_[i,1]))*1000., (float(sub_[mask,2][0])-float(halo_[i,2]))*1000.
			V_r=(Vx*X_sh)+(Vy*Y_sh)+(Vz*Z_sh) # Escalar Product
			d_norm=np.sqrt(X_sh**2+Y_sh**2+Z_sh**2)
			V_norm=np.sqrt(Vx**2+Vy**2+Vz**2)
			angulo=V_r/(d_norm*V_norm)
			Rvirial_host=float(halo_[i,8])
			Xoff_new=(dist_3d/Rvirial_host)
			Xoff_old=float(halo_[i,15])
	
	#Vector
	
			angulo_=np.append(angulo_,angulo)
			Xoff_new_=np.append(Xoff_new_,Xoff_new)
#			Xoff_old_=np.append(Xoff_old_,Xoff_old)
			V_circ_=np.append(V_circ_,V_circ)
			dist_3d_=np.append(dist_3d_,dist_3d)
			dist_2d_XY_=np.append(dist_2d_XY_,dist_2d_XY)
			dist_2d_YZ_=np.append(dist_2d_YZ_,dist_2d_YZ)
			dist_2d_XZ_=np.append(dist_2d_XZ_,dist_2d_XZ)
#			Rvirial_host_=np.append(Rvirial_host_,Rvirial_host)
	
	mask_v=(V_circ_>=Parameter)
#	N=float(len(V_circ_[mask_v]))
#	Y_=1.-(np.arange(N)/N)
#	XYZ=np.sort(dist_3d_[mask_v])
#	XY=np.sort(dist_2d_XY_[mask_v])	

	return angulo_[mask_v], Xoff_new_[mask_v], dist_3d_[mask_v], dist_2d_XY_[mask_v], dist_2d_YZ_[mask_v], dist_2d_XZ_[mask_v]
Example #5
0
def import_data_rows(data_path, data_delimiter, rows_numb = 2, cols = None, data_type = None):
    with open (data_path) as f_in:
        if cols is None:
            return scipy.genfromtxt(itertools.islice(f_in, rows_numb),
                                 delimiter = data_delimiter, dtype = data_type, usemask = False, deletechars = '"')
        elif rows_numb is None:
           return scipy.genfromtxt(f_in,
                                 delimiter = data_delimiter, dtype = data_type, usecols = cols , deletechars = '"') 
        else:
            return np.genfromtxt(itertools.islice(f_in, rows_numb), delimiter = data_delimiter,
                                 usecols = cols, dtype = data_type , deletechars = '"')
def fun_(var1,var2):

	halo_=sc.genfromtxt(var1)
	sub_=sc.genfromtxt(var2)

	#Variables
	
	Xoff_new_, Xoff_old_, V_circ_, dist_3d_, dist_2d_XY_, dist_2d_YZ_, dist_2d_XZ_, Rvirial_host_=np.array([]), np.array([]), np.array([]), np.array([]), np.array([]), np.array([]), np.array([]), np.array([])
	
	#h=1
	Parameter=0.5
	
	for i in np.arange(len(halo_[:,0])):
	
		mask=sub_[:,14]==halo_[i,11]
		N_=np.size(sub_[mask,14])
	
	#	print '['+str(i)+'/'+str(len(halo_[:,0]))+']'
	
		if N_==1.:
	
	#Parameters
	
			V_circ=float(sub_[mask,10][0])/float(halo_[i,10])
			dist_3d=np.sqrt(((float(sub_[mask,0][0])-float(halo_[i,0]))**2)+((float(sub_[mask,1][0])-float(halo_[i,1]))**2)+((float(sub_[mask,2][0])-float(halo_[i,2]))**2))*1000.
			dist_2d_XY=np.sqrt(((float(sub_[mask,0][0])-float(halo_[i,0]))**2)+((float(sub_[mask,1][0])-float(halo_[i,1]))**2))*1000.
			dist_2d_YZ=np.sqrt(((float(sub_[mask,1][0])-float(halo_[i,1]))**2)+((float(sub_[mask,2][0])-float(halo_[i,2]))**2))*1000.
			dist_2d_XZ=np.sqrt(((float(sub_[mask,0][0])-float(halo_[i,0]))**2)+((float(sub_[mask,2][0])-float(halo_[i,2]))**2))*1000.
			Rvirial_host=float(halo_[i,8])
			Xoff_new=(dist_3d/Rvirial_host)
			Xoff_old=float(halo_[i,15])
	
	#Vector
	
			Xoff_new_=np.append(Xoff_new_,Xoff_new)
			Xoff_old_=np.append(Xoff_old_,Xoff_old)
			V_circ_=np.append(V_circ_,V_circ)
			dist_3d_=np.append(dist_3d_,dist_3d)
			dist_2d_XY_=np.append(dist_2d_XY_,dist_2d_XY)
			dist_2d_YZ_=np.append(dist_2d_YZ_,dist_2d_YZ)
			dist_2d_XZ_=np.append(dist_2d_XZ_,dist_2d_XZ)
			Rvirial_host_=np.append(Rvirial_host_,Rvirial_host)
	
	mask_v=(V_circ_>=Parameter)
	N=float(len(V_circ_[mask_v]))
	Y_=1.-(np.arange(N)/N)
	XYZ=np.sort(dist_3d_[mask_v])
	XY=np.sort(dist_2d_XY_[mask_v])	

	return XYZ, XY, Y_, dist_2d_XY_, V_circ_
def load_data_set(file_location, delimiter, column_y, column_x1):
    """
    :param file_location: string for data file location
    :param delimiter: ',' for CSV, etc.
    :param column_y: the column containing the target values
    :param column_x1: input data column -- right now, I only take 1
    TODO: add a parameter that is a set so I can take multiple input columns
    :return: Numpy Array of target values, input values, and bias term (bias term always = 1.0)
    """
    data = sp.genfromtxt(file_location, delimiter=delimiter, dtype=None)

    # Need to get everything after the headers

    X = data[1:, column_x1]
    Y = data[1:, column_y]

    # we make the cases 1 and -1 to fit with the Likelihood Formula
    # P(y | x) -> h(x) for y = +1
    # P(y | x) -> 1 - h(x) for y = -1
    y_numeric = [1.0 if entry == 'Yes' else -1.0 for entry in Y]

    # Will use this for x0
    ones = [1.0 for x in X]

    return np.array(zip(y_numeric, X, ones), dtype='float_')
def TSP(stops, Alg, steps, param, seed = None,
                      coordfile = 'xycoords.txt'):
    '''A wrapper function that attempts to optimize the traveling 
    salesperson problem using a specified algorithm. If coordfile
    exists, a preexisting set of coordinates will be used. Otherwise,
    a new set of "stops" coordinates will be generated for the person to 
    traverse, and will be written to the specified file.'''
    
    ## Create the distance matrix, which will be used to calculate
    ## the fitness of a given path
    if os.path.isfile(coordfile):
        coords = scipy.genfromtxt(coordfile)
        distMat = DistanceMatrix(coords)
    else:
        distMat = GenerateMap(stops, fname = coordfile, seed = seed)

    if Alg == 'HC':
        ## param is the number of solutions to try per step
        bestSol, fitHistory = HillClimber(steps, param, distMat, seed)
    elif Alg == 'SA':
        ## param is a placeholder
        bestSol, fitHistory = SimulatedAnnealing(steps, param, distMat, seed)
    elif Alg == 'MC3':
        ## param is the number of chains
        bestSol, fitHistory = MCMCMC(steps, param, distMat, seed)
    elif Alg == 'GA':
        ## param is the population size
        bestSol, fitHistory = GeneticAlgorithm(steps, param, distMat, seed)
    else:
        raise ValueError('Algorithm must be "HC", "SA", "MC3", or "GA".')

    outfname = coordfile + '-' + Alg + '-' + str(steps) + '-' + str(param) + '.txt'
    scipy.savetxt(outfname, scipy.array(bestSol), fmt = '%i')
    return bestSol, fitHistory
Example #9
0
def main():
    data = sp.genfromtxt('./data/web_traffic.tsv', delimiter='\t')
    x = data[:, 0]
    y = data[:, 1]
    x = x[~sp.isnan(y)]
    y = y[~sp.isnan(y)]
    fp1 = sp.polyfit(x, y, 1)
    print('Model parameters for fp1 %s' % fp1)
    f1 = sp.poly1d(fp1)
    print('This is the error rate for fp1 %f' % error(f1, x, y))

    fp2 = sp.polyfit(x, y, 2)
    print('Model parameters for fp2 %s' % fp2)
    f2 = sp.poly1d(fp2)
    print('This is the error rate for fp2 %f' % error(f2, x, y))

    plt.scatter(x, y,color= 'pink')
    plt.title('My first impression')
    plt.xlabel('Time')
    plt.ylabel('#Hits')
    plt.xticks([w * 7 * 24 for w in range(10)], ['week %i' % w for w in range(10)])
    fx = sp.linspace(0, x[-1], 1000)
    plt.plot(fx, f1(fx), linewidth=3,color='cyan')


    plt.plot(fx, f2(fx), linewidth=3, linestyle='--',color= 'red')
    plt.legend(['d = %i' %f1.order, 'd = %i' %f2.order], loc='upper left')
    plt.autoscale(tight=True)
    plt.grid()
    plt.show()
def plot_2():
    data=sp.genfromtxt('F:\EPAM\coursera\ML_COURSERA_GARVARD\machine-learning-ex1\machine-learning-ex1\ex1\ex1data1.txt',delimiter=',')
    x=data[:,0]
    y=data[:,1] 
    m=len(y)
    y=y.reshape(m,1)
    x1=np.array([])
    
    for xi in x:
        x1=np.append(x1,[1,xi])
    x=x1.reshape(m,2)
    
    theta=np.zeros((2,1))
    iterations = 1500;
    alpha = 0.01;
   
    cost=computerCost(x,y,theta)
    theta=Ggradient_Descent(x,y,theta,alpha,iterations)
    
    print cost
    print theta 
    pr1=np.array([1,3.5]).dot(theta)
    pr2=np.array([1,7]).dot(theta)
   
    print pr1
    print pr2
    y_1=x.dot(theta)
    y_1.shape=(m,1)
    plt.title('Linear regression')
    plt.xlabel('X')
    plt.ylabel('Y')
    plt.plot(x[:,1],y,'b-')
    plt.plot(x[:,1],y_1,'r-')
    plt.show(block=True)
Example #11
0
def loadct(num, **kwargs):
    file = os.path.join(ifigure.__path__[0], 'utils', 'idl_colors.txt',)
    output = scipy.genfromtxt(file,
                              skip_header=256*num,
                              skip_footer=(39-num)*256)/255.
    return matplotlib.colors.LinearSegmentedColormap.from_list('idl'+str(num), 
                                                               output, **kwargs)
Example #12
0
	def AtmosphereCondition(self,Height):
		data = sp.genfromtxt('AC.d',delimiter=',')		
		PresGrand = 0.1013   #[MPa]
		for i in range(0,520):
			if Height < data[i,0]:
				break;
		return data[i-1,1],data[i-1,2]*PresGrand,data[i-1,3]
Example #13
0
def read_itcsimlib_exp( file, exp_args={} ):
	from scipy import genfromtxt
	from .itc_experiment import ITCExperiment

	ignore = ("itcsim","Date","Ivol","units")
	data,h = genfromtxt(file,unpack=True),open(file)
	kwargs = {'Cell':{},'Syringe':{}}
	for a in [l.split()[1:] for l in h.readlines() if l[0]=='#']:
		if a == [] or a[0] in ignore:
			continue
		elif a[0] == 'Cell' or a[0] == 'Syringe':
			kwargs[a[0]][a[1]] = float(a[2])
		elif a[0].lower() == 'skip':
			kwargs['skip'] = map(int,a[1:])
		else:
			kwargs[a[0]] = float(a[1])
	h.close()
	if not 'title' in kwargs:
		kwargs['title'] = os.path.splitext(os.path.basename(file))[0]
	
	# overwrite any file-obtained info with explicit values
	kwargs.update(exp_args)
	if len(data) == 2:
		return ITCExperiment(injections=data[0],dQ=data[1],**kwargs)
	elif len(data) == 3:
		return ITCExperiment(injections=data[0],dQ=data[1],dQ_err=data[2],**kwargs)
	else:
		return None # TODO : parser errors
Example #14
0
def read_inputcat_for_mgc3(filename,pardic=None):

  #Open file
  if '.gz' in filename:
     inputfile=gzip.open(filename,'r')
     filename=filename.replace('.gz','')
  else: inputfile=open(filename,'r')

  obsdata = scipy.genfromtxt(inputfile,comments='#')
  
  #Deal with one-line files
  if np.ndim(obsdata)==1: obsdata=np.reshape(obsdata,(1,obsdata.size))

  #Do cuts
  mask = obsdata[:,0]==obsdata[:,0]  #Initialize mask to all-True-vector
  if pardic:
    for NAUX in range(1,pardic['NAUX']+1,1):
      mykey_col='AUX%d_col' % (NAUX)
      mykey_valo='AUX%d_o' % (NAUX)
      mykey_valf='AUX%d_f' % (NAUX)
      #Skip if col=998   
      if pardic[mykey_col]!=998:
       print(' Cutting input catalogue with %.1f<%s[%d]<%.1f' % (pardic[mykey_valo],mykey_col,pardic[mykey_col]+1,pardic[mykey_valf]))
       #Create mask 
       mask_i = (obsdata[:,pardic[mykey_col]]>pardic[mykey_valo]) & (obsdata[:,pardic[mykey_col]]<pardic[mykey_valf])
       #Combine masks
       mask = mask & mask_i

  #Apply mask
  obsdata=obsdata[mask,:]
    
  #Return data
  return (obsdata,filename)
Example #15
0
def LDA_batch_normalization(dataset, sample_table, batch_col, output_folder, ncomps): # this is actually the batch normalization method
   
    tmp_output_folder = os.path.join(output_folder, 'tmp')

    if not os.path.isdir(tmp_output_folder):
        os.makedirs(tmp_output_folder)
    
    barcodes, filtered_conditions, filtered_matrix, conditions, matrix = dataset
    
    # Remove any remaining NaNs and Infs from the filtered matrix - they would screw
    # up the LDA. 
    filtered_matrix[scipy.isnan(filtered_matrix)] = 0
    filtered_matrix[scipy.isinf(filtered_matrix)] = 0

    # For full matrix, also eliminate NaNs and Infs, BUT preserve the indices and values
    # so they can be added back into the matrix later (not implemented yet, and may never
    # be - there should no longer be NaNs and Infs in the dataset)
    # The NaNs and Infs will mess up the final step of the MATLAB LDA script, which uses
    # matrix multiplication to remove the specified number of components!
    matrix_nan_inds = scipy.isnan(matrix)
    matrix_nan_vals = matrix[matrix_nan_inds]
    matrix_inf_inds = scipy.isinf(matrix)
    matrix_inf_vals = matrix[matrix_inf_inds]

    matrix[matrix_nan_inds] = 0
    matrix[matrix_inf_inds] = 0

    # Save both the small matrix (for determining the components to remove) and the 
    # full matrix for the matlab script
    filtered_matrix_tmp_filename = os.path.join(tmp_output_folder, 'nonreplicating_matrix.txt')
    full_matrix_tmp_filename = os.path.join(tmp_output_folder, 'full_matrix.txt')
    
    np.savetxt(filtered_matrix_tmp_filename, filtered_matrix)
    np.savetxt(full_matrix_tmp_filename, matrix)

    # Map the batch to integers for matlab, and write out to a file so matlab can read
    # Note that yes, the batch_classes should match up with the filtered matrix, not
    # the full matrix
    batch_classes = get_batch_classes(dataset = [barcodes, filtered_conditions, filtered_matrix], sample_table = sample_table, batch_col = batch_col)
    class_tmp_filename = os.path.join(tmp_output_folder, 'classes.txt')
    writeList(batch_classes, class_tmp_filename)
   
    output_tmp_filename = os.path.join(tmp_output_folder, 'full_matrix_lda_normalized.txt')
    runLDAMatlabFunc(filtered_matrix_filename = filtered_matrix_tmp_filename, \
            matrix_filename = full_matrix_tmp_filename, \
            class_filename = class_tmp_filename, \
            ncomps = ncomps, \
            output_filename = output_tmp_filename)
    # The X norm that is returned is the full matrix. In the future, we could add in
    # returning the components to remove so they can be visualized or applied to other
    # one-off datasets
    Xnorm =  scipy.genfromtxt(output_tmp_filename)

    ## Dump the dataset out!
    #output_filename = os.path.join(mtag_effect_folder, 'scaleddeviation_full_mtag_lda_{}.dump.gz'.format(ncomps))
    #of = gzip.open(output_filename, 'wb')
    #cPickle.dump([barcodes, conditions, Xnorm], of)
    #of.close()

    return [barcodes, conditions, Xnorm]
Example #16
0
def extremalPlot(V,c):
    datZM = genfromtxt('zero-mode-V0='+str(V)+'.dat')
    rh = datZM[:,-3]
    a = datZM[:,-2]
    w = a/(a*a+rh*rh)
    Qe = V * (a*a+rh*rh)/rh
    M = (a*a+rh*rh+Qe*Qe)/(2*rh)
    plot(w,M,c+'-',ms=2.5)
    extremalLine = lambda w: sqrt((1.0 - 4.0*V*V + sqrt(1.0+8.0*V*V))/w)/(2.0*sqrt(2.0*w))
    ws = linspace(0.75,1,100)
    plot(ws,extremalLine(ws),c+'-',ms=2.5)

    datExtremal = genfromtxt('gnuplot-extremal-V0='+str(V)+'.dat')
    wExtremal = datExtremal[:,0]
    MExtremal = datExtremal[:,2]
    plot(wExtremal,MExtremal,c+'--',ms=2.5)
Example #17
0
def MainFrac(dire,MdotO,Time):
	#{{{
	Frac = sp.genfromtxt(dire+'Fraction.d',delimiter=',')

	#CH4 = Frac[:,0]
	#CO2 = Frac[:,1]
	#CO  = Frac[:,2]
	#H   = Frac[:,3]
	#H2  = Frac[:,4]
	#H2O = Frac[:,5]
	#O   = Frac[:,6]
	#O2  = Frac[:,7]
	#OH  = Frac[:,8]

	plt.plot(Time,Frac[:,1])
	plt.plot(Time,Frac[:,2])
	plt.plot(Time,Frac[:,3])
	plt.plot(Time,Frac[:,4])
	plt.plot(Time,Frac[:,5],'--')
	plt.plot(Time,Frac[:,6],'--')
	plt.plot(Time,Frac[:,7],'--')
	plt.plot(Time,Frac[:,8],'--')
	plt.grid()
	##plt.title('MdotO:%s[kg/s]'%MdotO)
	plt.legend(( 'CO2 ',  'CO ',  'H ',  'H2 ',  'H2O ',  'O ',  'O2 ', 'OH' ))
	plt.xlabel('Time[s]')
	plt.ylabel('Mole Fraction')
	#plt.ylim([0,100])
	plt.savefig(dire+"Frac_Mdot%s.png"%float(MdotO))
	plt.close()
def init_and_cleanup_data(path, delimiter):
    data = sp.genfromtxt(path, delimiter=delimiter)
    hours = data[:, 0] # contains the hours
    webhits = data[:, 1] # contains the number of web hits at a particular hour
    hours = hours[~sp.isnan(webhits)]
    webhits = webhits[~sp.isnan(webhits)]
    return (hours, webhits)
def ReadDatas(dataDirectory):
    datas = []
    for fileName in os.listdir(dataDirectory):
        fullPath = os.path.join(dataDirectory, fileName)
        print("LOADING : " + fullPath)
        if os.path.isfile(fullPath):
            datas.append(sp.genfromtxt(fullPath, delimiter=","))
    return datas[::-1]  # because the original data is from late to early
    def load_single_data(self):
        single_data = sp.genfromtxt(os.path.join(DATA_DIR, "ex1data1.txt"), delimiter=",")
        x = single_data[:, 0]
        y = single_data[:, 1]

        x = np.transpose(np.atleast_2d(x))

        return (x, y)
Example #21
0
def load_data():
    datas = sp.genfromtxt("web_traffic.tsv", delimiter='\t')
    print datas[:10]
    x = datas[:,0]
    y = datas[:,1]
    x = x[ ~sp.isnan(y)]
    y = y[ ~sp.isnan(y)]
    return x,y
Example #22
0
def PreVari(dire,MdotO):
	
	#Pre{{{
	PreVari = sp.genfromtxt(dire+'Variable.d',delimiter=',')
	Time = PreVari[:,0]
	Pres = PreVari[:,1]
	Temp = PreVari[:,2]
	OF   = PreVari[:,3]
	Mdot = PreVari[:,4]
	
	
	#print Time[:20]
	#print Pres[:20]
	
	plt.plot(Time,Pres)
	plt.grid()
	##plt.title('MdotO:%s[kg/s]'%MdotO)
	plt.legend(('LOx:%s[kg/s]'%float(MdotO),))
	plt.xlabel('Time[s]')
	plt.ylabel('Pres[MPa]')
	#plt.ylim([0,100])
	plt.savefig(dire+"Pres_Mdot%s.png"%float(MdotO))
	plt.close()
	
	plt.plot(Time,Temp)
	plt.grid()
	##plt.title('MdotO:%s[kg/s]'%MdotO)
	plt.legend(('LOx:%s[kg/s]'%float(MdotO),))
	plt.xlabel('Time[s]')
	plt.ylabel('Temp[K]')
	#plt.ylim([0,100])
	plt.savefig(dire+"Temp_Mdot%s.png"%float(MdotO))
	plt.close()
	
	plt.plot(Time,OF)
	plt.grid()
	##plt.title('MdotO:%s[kg/s]'%MdotO)
	plt.legend(('LOx:%s[kg/s]'%float(MdotO),))
	plt.xlabel('Time[s]')
	plt.ylabel('OF[-]')
	#plt.ylim([0,100])
	plt.savefig(dire+"OF_Mdot%s.png"%float(MdotO))
	plt.close()
	
	plt.plot(Time,Mdot)
	plt.grid()
	##plt.title('MdotO:%s[kg/s]'%MdotO)
	plt.legend(('LOx:%s[kg/s]'%float(MdotO),))
	plt.xlabel('Time[s]')
	plt.ylabel('Mdot[kg/s]')
	#plt.ylim([0,100])
	plt.savefig(dire+"Mdot_Mdot%s.png"%float(MdotO))
	plt.close()
	
	#}}}
	
	print 'ok'
	return Time
def plot_1():
    data=sp.genfromtxt('F:\EPAM\coursera\ML_COURSERA_GARVARD\machine-learning-ex1\machine-learning-ex1\ex1\ex1data1.txt',delimiter=',')
    x=data[:,0]
    y=data[:,1] 
    m=len(y)
    y=y.reshape(m,1)
    
    plot(x,y)
    plt.show(block=True) 
Example #24
0
def create_dataset(tsv):

    data_set = DataFrame(columns=["x", "t"])

    data = sp.genfromtxt(tsv, delimiter=",")
    for d in data:
        data_set = data_set.append(Series(d, index=["x", "t"]),ignore_index=True)

    return data_set
Example #25
0
def get_data():
    data = sp.genfromtxt("input/web_traffic.tsv", delimiter="\t")

    x = data[:, 0]
    y = data[:, 1]

    x = x[~sp.isnan(y)]
    y = y[~sp.isnan(y)]

    return (x, y,)
Example #26
0
def loadct(num, r=False, mayavi=False, **kwargs):
    if not mayavi:

        output = scipy.genfromtxt(cmd_folder+'/idl_colors.txt',
                                  skip_header=256*num,
                                  skip_footer=(39-num)*256)/255.
        if r:
            output = output[::-1]
            
        return matplotlib.colors.LinearSegmentedColormap.from_list('idl', output, **kwargs)
    else:
        output = scipy.ones((256,4),dtype=int)
        output[:,0:3] = scipy.genfromtxt(cmd_folder+'/idl_colors.txt',
                                         skip_header=256*num,
                                         skip_footer=(39-num)*256,dtype=int)
        if r:
            output = output[::-1]

        return output
Example #27
0
def read_UA(fname):
    from datetime import datetime as dt

    X = sp.genfromtxt(fname, skip_header=2, delimiter=';', usecols=(1, 2), dtype="S20,f8", names=["f0", "f1"])
    print X

    dates = [dt.strptime(dts, '%m.%d.%y %I:%M:%S %p') for dts in X["f0"]]

    T = X["f1"]
    return dates, T
def plot_1():
    data=sp.genfromtxt('F:\EPAM\coursera\ML_COURSERA_GARVARD\machine-learning-ex1\machine-learning-ex1\ex1\ex1data2.txt',delimiter=',')
    x=data[:,0:2]
    y=data[:,2:3]
    m=len(y)
    
    mu,x,sigma =featureNormalize(x)
    print mu
    print x
    print sigma
Example #29
0
def load_samples(fname):
	""" Load training sample dataset """

	data = sp.genfromtxt(fname, delimiter='\t')
	x = data[:, 0]
	y = data[:, 1]

	print('Totally %i entries while %i invalid entries.' % (sp.shape(data)[0], sp.sum(sp.isnan(y))))
	x = x[~sp.isnan(y)]
	y = y[~sp.isnan(y)]
	return (x, y)
Example #30
0
def RNFinder(KID):

    file = '%s/GlobalLCInfo.dat' % keplerdefs.ROOTDIR
    X2 = scipy.genfromtxt(file)
    ind = scipy.where(X2.T[1] == KID)
    ind = ind[0]

    print('ind', ind)
    if len(ind) == 0: return scipy.array([-999])

    return X2.T[:,ind]
Example #31
0
import os
import scipy as sp

from utils import DATA_DIR, CHART_DIR


def error(f, x, y):
    return sp.sum((f(x) - y)**2)


'''
numpy.genfromtxt(fname, dtype=<type 'float'>, comments='#', delimiter=None, skip_header=0, skip_footer=0, converters=None, missing_values=None, filling_values=None, usecols=None, names=None, excludelist=None, deletechars=None, replace_space='_', autostrip=False, case_sensitive=True, defaultfmt='f%i', unpack=None, usemask=False, loose=True, invalid_raise=True, max_rows=None)[source]
Load data from a text file, with missing values handled as specified.
'''

data = sp.genfromtxt("web_traffic.tsv", delimiter="\t")
print(data[:10])
print(data.shape)
#Dimesion separation
x = data[:, 0]
print(x)
y = data[:, 1]
print(y)
#data cleaning
Nan = sp.sum(sp.isnan(y))
print(Nan)  #missing 8 out of 743 data

x = x[~sp.isnan(y)]
print(x)
y = y[~sp.isnan(y)]
print(y)
Example #32
0
import scipy as sp
data = sp.genfromtxt("WT001.csv", delimiter="\t")
Example #33
0
import scipy as sp
import numpy as np
import timeit as ti
import matplotlib.pyplot as pl

data = sp.genfromtxt("entradaIng.csv", delimiter="")
p = data[:, :-2]  #obtengo el arreglo sin las ultimas dos columnas
t = data[:, [-2, -1]]  #obtengo las ultimas dos columnas
constante = 0.3
erroresSum = []
w = np.array(
    [[0.5, -0.5, 0.8, -0.5], [0.9, 0.4, -0.2, -0.2], [0.5, 0.2, 0.1, 0.8],
     [0.5, 0.8, -0.1,
      0.7], [0.5, 0.7, -0.2, 0.4]]
)  #arreglo de pesos; para neurona1, vector 1; para neurona 2 v2; para neurona n;vn
print w  #[ganancia, entrada1, entrada2]
#w = np.random.rand(p.shape[1],3) #genera los numeros aleatorios que representan los pesos de la neurona, el ultimo parametro nos dice cuantos pesos son

#Ganancia
b = np.ones((p.shape[0], 1))  #genero vector de 1's
p = np.concatenate((b, p), axis=1)  #concateno el vector de 1's al inicio de p


def neurona(p, w):
    #La funcion neurona realiza la multiplicacion entre el vector de entradas (o puntos, en el primer caso) y los pesos de esta neurona, representados en w[n]
    energy = energia(p, w)
    y = transferencia(energy)
    return y  #y = hdx


def energia(p, w):
Example #34
0
'sref', if you prefer.
"""

if len(sys.argv) == 1:
    print 'Usage:'
    print 'python  ref_smooth.py   infile.txt  speclist   window outfile.txt'
    print 'infile.txt---  input spectrum to smooth (usually the reference for alignment)'
    print 'speclist--  1 col ascii file with list of files to compare---determines target resolution'
    print 'window---  window file designating wavelengths for the EmissionLine'
    print 'outfile.txt---  output spectrum, after smoothing'
    exit

#reference
sref = TextSpec(sys.argv[1], style='linear')
#list of spectra
speclist = sp.genfromtxt(sys.argv[2], dtype=str)
#line used to calculate resolution
window = sp.genfromtxt(sys.argv[3])

lref = EmissionLine(sref, window[0], [window[1], window[2]])
lcenter = lref.wv_mean()

#model as a Gaussian, so that everything is measured in the same way
lmodelref = LineModel(lref, func='gaussian')

centdist = []
dispdist = []

plt.ion()
print 'name         FWHM, FWHM fit'
for spec in speclist:
Example #35
0
        data2[i][j] = float(data2[i][j])

# Build the array
data2 = numpy.array(data2[1:], dtype='f')

# Test
print("data2 shape: " + str(numpy.shape(data2)))

# 3
# Reads the contents of the data file into an array data3 using genfromtxt.
# Using what we've covered in this class, the first column of the array this
# call to genfromtxt returns will be filled with "nan"s (i.e., "not-a-number"
# values). That's okay.  For this task, you do not have to write a function.
data3 = genfromtxt('sp500_1950-01-03_to_2016-10-07.csv',
                   skip_header=1,
                   skip_footer=0,
                   dtype='f',
                   delimiter=',')

# Test
print("data3 shape: " + str(numpy.shape(data3)) + '\n')

# 4
# Compare all values (except the first column) in data2 and data3 to confirm
# they are "equal" to each other. Since they're all floating point values,
# you're checking that they are "close" to each other. Hint: There's a function
# in NumPy called allclose that will help here.  For this task, you do not have
# to write a function.
print("All values in data2 & data3 are 'equal': " +
      str(numpy.allclose(data2[:, 1:], data3[:, 1:])))
# # Why should you care about overfitting?
#
# #### Learning Objective: In this tutorial, we will learn what it means to overfit a model and how to avoid doing so.
#
# #### Data: Imagine you own a website and you are able to track how many hits the website receives over time. Your server can only handle a certain amount of requests at a time.
#
# #### Prediction Question: When you will hit 100,000 hourly hits?

# In[1]:

#load the libraries and data
import numpy as np
import scipy as sp
data = sp.genfromtxt(
    "C:/Users/tehskhu/Documents/Personal/Analytics/Projects/Building Machine Learning Systems/Data/ch01/data/web_traffic.tsv",
    delimiter="\t")

# Printing the first 10 rows of the data shows us two columns.
# 1) the hour - this will be the x variable.
# 2) the number of website requests for that hour - this will be the y variable

# In[2]:

#split the data into x, y
x = data[:, 0]
y = data[:, 1]

#remove all NAs from both x,y
x = x[~sp.isnan(y)]
y = y[~sp.isnan(y)]
Example #37
0
def _quadrant_image(i):
    return (sp.genfromtxt(f'data/900V/900V_5kV{i:04d}.asc',
                          delimiter='\t')[:, 1:-1] - 398) / 400
Example #38
0
def quadrant_background():
    global _quadrant_background
    if _quadrant_background is None:
        return (sp.genfromtxt('data/bg/bg.asc', delimiter='\t')[:, 1:-1] / 200
                - 398) / 400
    return _quadrant_background
Example #39
0
#!/usr/bin/python

# Created by: J. G. Fernandez-Trincado
# Date: 2015, December

import numpy as np
import scipy as sc
import pylab as plt

data = sc.genfromtxt('Input data here', comments='#',
                     names=True)  #'Input data here'

RA = data['RA']
DEC = data['DEC']

# Important: You need defined the grid size in RA, and DEC. I have assumed 2 degrees in RA (e.g., size_RA = 2 degrees), and 2 degrees in DEC (e.g., size_DEC = 2 degrees).

size_RA, size_DEC = 2., 2.
bin_RA = int(360. / 2.)
bin_DEC = int((np.max(DEC) - np.min(DEC)) / 2.)

H, xedges, yedges = np.histogram2d(RA, DEC, bins=(bin_RA, bin_DEC))

RA, DEC, Z = [], [], []
for i in np.arange(len(xedges[:-1])):
    for j in np.arange(len(yedges[:-1])):
        RA, DEC, Z = np.append(RA, xedges[:-1][i]), np.append(
            DEC, yedges[:-1][j]), np.append(Z, H.T[j, i])

# Optional: uncomment the following two lines if you not need print zero values in the third component, see Figure 1.2.
mask = (Z > 0)
Example #40
0
#!/usr/bin/python
import scipy as sp
import matplotlib.pyplot as plt
data = sp.genfromtxt('nozzle.d')
print 'ok'
x = data[:-1, 0]
y = data[:-1, 1]
plt.plot(x, y)
plt.savefig('nozzle.png')
plt.close()

data1 = sp.genfromtxt('data/density_087.d')
x1 = data1[:, 0]
y1 = data1[:, 1] / data1[1, 1]
plt.plot(x1, y1)
plt.savefig('data/density.png')

data3 = sp.genfromtxt('data/pressure_087.d')
x3 = data3[:, 0]
y3 = data3[:, 1] / data3[1, 1]
plt.plot(x3, y3)
plt.savefig('data/pressure.png')

data4 = sp.genfromtxt('data/Mach_087.d')
x4 = data4[:, 0]
y4 = data4[:, 1]
M = data4[1, 1]
plt.plot(x4, y4)
plt.savefig('data/Mach.png')

data2 = sp.genfromtxt('data/U_Velocity_087.d')
Example #41
0
import scipy as sp
import matplotlib.pyplot as plt

# Read the file
data = sp.genfromtxt('web_traffic.tsv', delimiter='\t')

# Get vectors
x = data[:, 0]
y = data[:, 1]

print("Invalid entries: ", sp.sum(sp.isnan(y)))

# Get only an integers
x = x[~sp.isnan(y)]
y = y[~sp.isnan(y)]

x.shape, y.shape
plt.scatter(x, y, s=10)
plt.title('Web traffic over the last month')
plt.xlabel('Time')
plt.ylabel('Hits/hours')
plt.xticks([w * 7 * 24 for w in range(10)], ['week %i' % w for w in range(10)])

plt.autoscale(tight=True)
plt.grid(True, linestyle='-', color='0.75')
plt.show()

fp1, residuals, rank, sv, rcond = sp.polyfit(x, y, 1, full=True)
print("Parameters of model %s" % fp1)
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.svm import SVR
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn import preprocessing
from sklearn.datasets.samples_generator import make_regression

# 读入数据
# data = sp.genfromtxt('workday_13150_improve.txt')
data = sp.genfromtxt('100_roads_1106_data.txt')

print(data.size)

# 为 X , y 赋值
X = data[:, :17]
y = data[:, 17]

# print(X)

# X.shape = (1021,1)
# y.shape = (1021,1)

# X = np.vstack((time,X))
# y = np.vstack((time,y))

# plt.scatter(time, X)
# plt.scatter(time, y)
Example #43
0
#encoding=utf-8
import scipy as sp
import matplotlib.pyplot as plt
###1.数据处理
#返回一个numpy.array
data=sp.genfromtxt(r'E:\python_code\ml\1400OS_01_Codes\data\web_traffic.tsv',delimiter='\t')
print(data[0:10])
print(data.shape)
#索引标记
x=data[:,0]
y=data[:,1]
#y的无效值总数
print(sp.sum(sp.isnan(y)))
#8:743,无效值直接清理掉,注意array可以直接作为索引
x=x[~sp.isnan(y)]
y=y[~sp.isnan(y)]
#为了对数据直观感受,开始绘图
plt.scatter(x,y)#生成散点图
plt.title("Web traffic over the last month")
plt.xlabel("Time")
plt.ylabel("Hits/hour")
#xticks第一个参数是属性的位置,第二个参数是显示的名称1
plt.xticks(
    [w*7*24 for w in range(10)], ['week %i' % w for w in range(10)])#因为x轴本身的刻度是小时
plt.autoscale(tight=True)#自动调节面板的适应规模,很有用,去掉后,图就会空出一大块来
plt.grid(True)#你要格子吗,还有格子的属性也能设置,具体去看看API
# 用scipy建个1阶的模型出来,得到各种参数
fp1,res,a,b,c= sp.polyfit(x,y,1,full=True)
print(fp1,res)
#用参数得到一个模型函数
f1=sp.poly1d(fp1)
Example #44
0
def _full_image(i):
    return (sp.genfromtxt(f'data/483_53/ac_483_53_{i:04d}.asc',
                          delimiter='\t')[:, 1:-1] - 398) / 400
Example #45
0
algoritmo que utiliza backpropagation
3 neuronas en la capa de salida
n neuronas en la capa de entrada
n datos de entrada
el numero de neuronas en la primera capa oculta es igual a n/2 de los datos de entrada
el numero de neuronas en la segunda capa es n/3
factor de correccion 0.01
error aceptado 0.0002
"""

import neurolab as nl
import numpy as np
import scipy as sp

#lectura de la matriz de datos
datos = np.matrix(sp.genfromtxt("datos-entrenamiento.csv", delimiter=" "))

#salida de la neurona
columnas_salida = 3

#datos de entrada a la neurona
entrada = datos[:, :-3]
#datos de salida de la neurona
objetivo = datos[:, -3:]

#max min para cada dato de entrada a la neurona
maxmin = np.matrix([[-5, 5] for i in range(len(entrada[1, :].T))])

# valores para las capas de la neurona
capa_entrada = entrada.shape[0]
capa_oculta1 = int(capa_entrada * 0.5)
Example #46
0
def full_background():
    global _full_background
    if _full_background is None:
        return (sp.genfromtxt('data/bg-2/bg.asc', delimiter='\t')[:, 1:-1] /
                200 - 398) / 400
    return _full_background
Example #47
0
#!/usr/bin/python
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt

data = sp.genfromtxt("Ast_Doller.d")
x=data[:,0]
y=data[:,1]
plt.subplot(241)
plt.plot(x,y)
plt.title("Ast")
plt.tick_params(labelbottom='off')
plt.grid()

data = sp.genfromtxt("Bri_Pound.d")
x=data[:,0]
y=data[:,1]
plt.subplot(242)
plt.plot(x,y)
plt.title("Bri")
plt.tick_params(labelbottom='off')
plt.grid()


data = sp.genfromtxt("Chi_Yuan.d")
x=data[:,0]
y=data[:,1]
plt.subplot(243)
plt.plot(x,y)
plt.title("Chi")
plt.tick_params(labelbottom='off')
import matplotlib.pyplot as plot
import scipy
#读取分离以下CSV格式数据
Data = scipy.genfromtxt('Iris.csv', delimiter=",")
lable = Data[:, 0]
Data = Data[:, 1:]
#print(lable)
#print(Data)
i = 0
for m in range(3):
    for n in range(3):
        if m != n:
            plot.subplot(231 + i)
            i = i + 1
            print(m, n, i)
            for t, marker, c in zip(range(3), ">ox", "rgb"):
                plot.scatter(Data[lable == t, m],
                             Data[lable == t, n],
                             marker=marker,
                             c=c)
plot.show()
Example #49
0
#!/usr/bin/python

import numpy as np
import scipy as sc
import pylab as plt

data = sc.genfromtxt('Host.dat')

r_host = ((data[0, 8] / 0.7) / 1000.)
x_host = data[0, 0] / 0.7
y_host = data[0, 1] / 0.7

r_sub = ((data[1, 8] / 0.7) / 1000.)
x_sub = data[1, 0] / 0.7
y_sub = data[1, 1] / 0.7

an = np.linspace(0, 2 * np.pi, 100)
plt.plot(r_sub * np.cos(an) + (x_sub),
         r_sub * np.sin(an) + (y_sub),
         color='black',
         label=r'$\nu_{circ,Sub}=$' + str(data[1, 10]) + ' kms$^{-1}$')
plt.plot(r_host * np.cos(an) + (x_host),
         r_host * np.sin(an) + (y_host),
         ls='--',
         color='gray',
         label=r'$\nu_{circ,Halo}=$' + str(data[0, 10]) + ' kms$^{-1}$')
plt.plot((x_host, x_sub), (y_host, y_sub), lw=3, color='black')
#plt.plot((0,x_sub),(0,y_sub),color='black')
#plt.plot((0,x_host),(0,y_host),color='black')
plt.title('Fernandez-Trincado et al. (2014)')
plt.text(333.701, 148.78, 'Bullet Group', fontsize='xx-large')
import numpy as np
import scipy as sp
import pandas as pd

row_name = sp.genfromtxt('1106_timestamp.txt', dtype=str, delimiter='\n')
col_name = sp.genfromtxt('second_ring.txt', dtype=int)
data = sp.genfromtxt('1106_velocity.txt')
# print(data)
# print(row_name.size)
# print(col_name.size)

dataFrame = pd.DataFrame(data=data, index=row_name, columns=col_name)
# print(dataFrame.all())

h5 = pd.HDFStore('1106_velocity.h5', 'w')

h5['data'] = dataFrame

h5.close()
Example #51
0
        button2.clicked.connect(self.createChart)

        self.setGeometry(30, 30, 790, 720)
        self.setWindowTitle('sosi hui bidlo 1488')
        self.show()

    def choseFile(self):

        arr = QtGui.QFileDialog.getOpenFileName(self)
        file = open(arr, 'r')
        self.full_data = file.read()
        file.close()

    def createChart(self):
        data = self.kernel.comput_first()
        self.widget.plotItem.plot(data, symbol='o')
        self.widget.resize(700, 700)
        self.widget.move(80, 10)
        self.widget.show()


def main(camp_dist):
    app = QtGui.QApplication(sys.argv)
    ex = UI(camp_dist)
    sys.exit(app.exec_())


if __name__ == '__main__':
    camp_dist = sp.genfromtxt('../data/camp_dist.tsv', delimiter='\t')
    main(camp_dist)
Example #52
0
def run():
    defaults = PaperDefaults()

    #David's globals
    _DEFAULT_KW97_TILTEFFECT_DEGPERPIX = .45  # <OToole77>
    _DEFAULT_TILTEFFECT_SIZE = 101  #101
    _DEFAULT_KW97_TILTEFFECT_CSIZE = iround(3.6 /
                                            _DEFAULT_KW97_TILTEFFECT_DEGPERPIX)
    _DEFAULT_KW97_TILTEFFECT_NSIZE = iround(5.4 /
                                            _DEFAULT_KW97_TILTEFFECT_DEGPERPIX)
    _DEFAULT_KW97_TILTEFFECT_FSIZE = iround(10.7 /
                                            _DEFAULT_KW97_TILTEFFECT_DEGPERPIX)
    _DEFAULT_TILTEFFECT_CVAL = .5
    _DEFAULT_TILTEFFECT_SVALS = np.linspace(0.0, 0.5, 10)
    _DEFAULT_KW97_TILTEFFECT_SCALE = 1.25
    _DEFAULT_TILTEFFECT_NPOINTS = 25  #100
    _DEFAULT_TILTEFFECT_CIRCULAR = True
    _DEFAULT_TILTEFFECT_DECODER_TYPE = 'circular_vote'
    csvfiles = [
        os.path.join(defaults._DATADIR, 'KW97_GH.csv'),
        os.path.join(defaults._DATADIR, 'KW97_JHK.csv'),
        os.path.join(defaults._DATADIR, 'KW97_LL.csv'),
        os.path.join(defaults._DATADIR, 'KW97_SJL.csv'),
    ]

    # experiment parameters
    cpt = (_DEFAULT_TILTEFFECT_SIZE // 2, _DEFAULT_TILTEFFECT_SIZE // 2)
    spt = (_DEFAULT_TILTEFFECT_SIZE // 2,
           _DEFAULT_TILTEFFECT_SIZE // 2 + _DEFAULT_KW97_TILTEFFECT_CSIZE)
    dt_in = _DEFAULT_TILTEFFECT_CVAL - _DEFAULT_TILTEFFECT_SVALS

    # simulate populations
    im = sp.array([[
        stim.get_center_nfsurrounds(size=_DEFAULT_TILTEFFECT_SIZE,
                                    csize=_DEFAULT_KW97_TILTEFFECT_CSIZE,
                                    nsize=_DEFAULT_KW97_TILTEFFECT_NSIZE,
                                    fsize=_DEFAULT_KW97_TILTEFFECT_FSIZE,
                                    cval=_DEFAULT_TILTEFFECT_CVAL,
                                    nval=sp.nan,
                                    fval=sval,
                                    bgval=sp.nan)
    ] for sval in _DEFAULT_TILTEFFECT_SVALS])

    # get shifts for model for both papers, and from digitized data
    sortidx = sp.argsort(dt_in)  # re-order in increasing angular differences

    # O'Toole and Wenderoth (1977)
    n_subjects = len(csvfiles)
    ds_kw97_paper_x = sp.zeros((n_subjects, 9))
    ds_kw97_paper_y = sp.zeros((n_subjects, 9))

    for sidx, csv in enumerate(csvfiles):
        ds_kw97_paper_x[sidx], ds_kw97_paper_y[sidx] = \
            sp.genfromtxt(csv, delimiter=',').T

    ds_kw97_paper_x = (ds_kw97_paper_x + 360.) % 360. - 45.
    ds_kw97_paper_y = 45. - ds_kw97_paper_y

    for sidx in range(n_subjects):
        ds_kw97_paper_x[sidx] = ds_kw97_paper_x[sidx][sp.argsort(
            ds_kw97_paper_x[sidx])]

    extra_vars = {}
    extra_vars['scale'] = _DEFAULT_KW97_TILTEFFECT_SCALE
    extra_vars['decoder'] = _DEFAULT_TILTEFFECT_DECODER_TYPE
    extra_vars['npoints'] = _DEFAULT_TILTEFFECT_NPOINTS
    extra_vars['npoints'] = _DEFAULT_TILTEFFECT_NPOINTS
    extra_vars['cval'] = _DEFAULT_TILTEFFECT_CVAL
    extra_vars['sortidx'] = sortidx
    extra_vars['cpt'] = cpt
    extra_vars['spt'] = spt
    extra_vars['sval'] = sval
    extra_vars['kind'] = 'circular'
    extra_vars['figure_name'] = 'f3b'
    extra_vars['return_var'] = 'O'
    extra_vars['hp_file'] = os.path.join(defaults._FIGURES, 'best_hps.npz')

    adjusted_gt = signal.resample(np.mean(ds_kw97_paper_y, axis=0), 10)
    optimize_model(im, adjusted_gt, extra_vars, defaults)
Example #53
0
                chi2 = deepcopy(chi2try)
                accept += 1

    return chi2best, pbest, accept / float(ntrial)


if len(sys.argv) == 1:
    print 'Usage:'
    print 'python ref_make.py   speclist    window  outfile.txt'
    print 'speclist--      1 col ascii file with list of files to combine'
    print 'window---       window file designating wavelengths for the EmissionLine'
    print 'outfile.txt---  output spectrum, after smoothing'
    exit

#list of spectra for the reference
reflist = sp.genfromtxt(sys.argv[1], dtype='a')
#window for line to align
window = sp.genfromtxt(sys.argv[2])

S, L = [], []
for ref in reflist:
    s = TextSpec(ref)
    s.set_interp(style='linear')
    m = (s.wv > 4500) * (s.wv < 7500)
    s.wv = s.wv[m]
    s.f = s.f[m]
    s.ef = s.ef[m]

    S.append(s)

#    plt.plot(s.wv,s.f,'k')
Example #54
0
# -*- coding: utf-8 -*-
"""
Created on Sun Oct  6 14:51:19 2019

@author: HP
"""

#### This is a sample script for reading data from a single file, and visualizing it in a simple plot.

import scipy as sp
import numpy as np
import matplotlib.pyplot as plt

#### Use sample data which is available on the github account.
data = sp.genfromtxt('Atmospheric_data.ftr', skip_header=1)
altitude = data[:, 1]  #altitude  (column 2)
pressure = data[:, 2]  #atmospheric pressure (column 3)
u = data[:, 8]  #zonal winds (column 9)

#plt.plot(u,altitude); plt.axvline(x=0)
plt.plot(u, pressure)
plt.axvline(x=0)
plt.ylim(1000, 0)

#plt.axvline is for plotting a vertical line
#plt.axhline is for plotting a horizontal line
Example #55
0
norm = 1.e-15
pvector = [norm, 0.5e4, 0.1, 0.0, 0.5e8]

BCflux = model.model_flux(pvector)

nwv = 4861
m = abs(spectrum.wavelengths - nwv) == min(abs(spectrum.wavelengths - nwv))
BCflux /= BCflux[m]

plt.plot(spectrum.wavelengths, BCflux, 'k|')
plt.plot([1000, 5000], [norm, norm], 'k--')
plt.plot([3646, 3646], [0, norm * 1.3], 'r--')

x2, y2 = sp.genfromtxt(
    '/home/rrlyrae/fausnaugh/repos/mcmc_deconvol/Data/FakeData/BaC_comp/FakeBac_lines01.dat',
    unpack=1)
x3, y3 = sp.genfromtxt(
    '/home/rrlyrae/fausnaugh/AGNstorm/Denney2009templates/Matthias_temps/balc1001001.dat',
    unpack=1,
    usecols=(0, 1))
x2, y2 = sp.genfromtxt(
    '/home/rrlyrae/fausnaugh/repos/mcmc_deconvol/Data/FakeData/BaC_comp2/FakeBac-full_nrm1.dat',
    unpack=1)

m = abs(x2 - nwv) == min(abs(x2 - nwv))
y2 /= y2[m]

m = abs(x3 - 3646) == min(abs(x3 - 3646))
y3 /= y3[m]
def optim_f3(lesions, out_dir):
    defaults = PaperDefaults()
    if lesions != None:
        defaults.lesions = lesions
    #David's globals
    _DECODER_TYPE = None  #'circular_vote'
    _DEFAULT_TILTEFFECT_DEGPERPIX = .25  # <OToole77>
    _DEFAULT_TILTEFFECT_SIZE = 51  #101
    _DEFAULT_TILTEFFECT_CSIZE = iround(2. / _DEFAULT_TILTEFFECT_DEGPERPIX)
    _DEFAULT_TILTEFFECT_SSIZE = iround(8. / _DEFAULT_TILTEFFECT_DEGPERPIX)
    _DEFAULT_TILTEFFECT_CVAL = .5
    _DEFAULT_TILTEFFECT_SVALS = np.linspace(0.0, 0.5, 10)
    _DEFAULT_TILTEFFECT_SCALES = {'ow77': 0.40, 'ms79': 0.60}  #0.45
    _DEFAULT_TILTEFFECT_NPOINTS = 25  #100
    _DEFAULT_TILTEFFECT_CIRCULAR = True
    _DEFAULT_TILTEFFECT_DECODER_TYPE = 'circular_vote'
    _DEFAULT_TILTEFFECT_CSV = {
        'ow77': os.path.join(defaults._DATADIR, 'OW_fig4_Black.csv'),
        'ms79': os.path.join(defaults._DATADIR, 'MS1979.csv'),
    }

    # experiment parameters
    m = (lambda x: sp.sin(sp.pi * x)) if _DEFAULT_TILTEFFECT_CIRCULAR else (
        lambda x: x)
    assert len(_DEFAULT_TILTEFFECT_CSV) == 2
    assert len(_DEFAULT_TILTEFFECT_SCALES) == 2
    cpt = (_DEFAULT_TILTEFFECT_SIZE // 2, _DEFAULT_TILTEFFECT_SIZE // 2)
    spt = (_DEFAULT_TILTEFFECT_SIZE // 2,
           _DEFAULT_TILTEFFECT_SIZE // 2 + _DEFAULT_TILTEFFECT_CSIZE)
    dt_in = _DEFAULT_TILTEFFECT_CVAL - _DEFAULT_TILTEFFECT_SVALS
    dtmin = dt_in.min()
    dtmax = dt_in.max()

    # simulate populations
    im = sp.array([[
        stim.get_center_nfsurrounds(size=_DEFAULT_TILTEFFECT_SIZE,
                                    csize=_DEFAULT_TILTEFFECT_CSIZE,
                                    nsize=_DEFAULT_TILTEFFECT_CSIZE,
                                    fsize=_DEFAULT_TILTEFFECT_SSIZE,
                                    cval=_DEFAULT_TILTEFFECT_CVAL,
                                    nval=_DEFAULT_TILTEFFECT_CVAL,
                                    fval=sval,
                                    bgval=sp.nan)
    ] for sval in _DEFAULT_TILTEFFECT_SVALS])

    # get shifts for model for both papers, and from digitized data
    sortidx = sp.argsort(dt_in)  # re-order in increasing angular differences

    # O'Toole and Wenderoth (1977)
    _, ds_ow77_paper_y = sp.genfromtxt(_DEFAULT_TILTEFFECT_CSV['ow77'],
                                       delimiter=',').T

    extra_vars = {}
    extra_vars['scale'] = _DEFAULT_TILTEFFECT_SCALES['ow77']
    extra_vars['decoder'] = _DEFAULT_TILTEFFECT_DECODER_TYPE
    extra_vars['npoints'] = _DEFAULT_TILTEFFECT_NPOINTS
    extra_vars['npoints'] = _DEFAULT_TILTEFFECT_NPOINTS
    extra_vars['cval'] = _DEFAULT_TILTEFFECT_CVAL
    extra_vars['sortidx'] = sortidx
    extra_vars['cpt'] = cpt
    extra_vars['spt'] = spt
    extra_vars['sval'] = sval
    extra_vars['kind'] = 'circular'
    extra_vars['figure_name'] = 'f3'
    extra_vars['return_var'] = 'O'

    scores, params = optimize_model(im, ds_ow77_paper_y, extra_vars, defaults)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    np.savez(os.path.join(
        out_dir, 'hp_optimization_lesion_' + '_'.join(defaults.lesions)),
             scores=scores,
             params=params)
Example #57
0
def chapter1():
    a = np.array([0, 1, 2, 3, 4, 5, 6, 7])
    # 配列を出力
    #print(a)
    # 次元数を出力
    #print(a.ndim)
    # 要素数を出力
    #print(a.shape)

    #aを再配列
    b = a.reshape(4, 2)
    b[0][1] = 77
    # 配列を出力
    #print(b)
    # 次元数を出力
    #print(b.ndim)
    # 要素数を出力
    #print(b.shape)

    c = a.reshape((4, 2)).copy()
    c[0][0] = 99
    #print(c)
    #print(a)

    #print(a[np.array([2,4,6])])
    #print(a > 5)
    #print(a.clip(0, 4))

    #処理速度比較
    #normal_py_sec = timeit.timeit('sum(x*x for x in xrange(1000))', number = 10000)
    #good_num_sec = timeit.timeit('na.dot(na)', setup="import numpy as np; na=np.arange(1000)", number=10000)
    #print("normal_py f% sec:"%normal_py_sec)
    #print("good_num f% sec:"%good_num_sec)

    #データファイル読込み
    data = sp.genfromtxt("web_traffic.tsv", delimiter="\t")
    #print(data[:10])
    #print(data.shape)

    x = data[:, 0]
    y = data[:, 1]

    x = x[~sp.isnan(y)]
    y = y[~sp.isnan(y)]

    #グラフ描画
    plt.scatter(x, y)
    plt.title("web traffic research")
    plt.xlabel("time")
    plt.ylabel("hits/hour")
    plt.xticks([w * 7 * 24 for w in range(10)],
               ['week %i' % w for w in range(10)])
    plt.autoscale(tight=True)
    plt.grid()
    #plt.show()

    #誤差情報
    fp1, residuals, rank, sv, rcond = sp.polyfit(x, y, 1, full=True)
    #print("Model Parameters %s:" % fp1)
    #print(residuals)

    #近似線描画
    #f1 = sp.poly1d(fp1)
    #fx = sp.linspace(0, x[-1], 1000)
    #plt.plot(fx, f1(fx), linewidth=4)
    #plt.legend(["d=%i" % f1.order], loc="upper left")
    #plt.show()

    #変化点を3.5週目に置く
    inflection = 3 * 7 * 24
    xbf = x[:inflection]
    ybf = y[:inflection]
    xaf = x[inflection:]
    yaf = y[inflection:]

    fbf = sp.poly1d(sp.polyfit(xbf, ybf, 1))
    faf = sp.poly1d(sp.polyfit(xaf, yaf, 1))

    fbf_error = error(fbf, xbf, ybf)
    faf_error = error(faf, xaf, yaf)

    #print("Error Infrection %f" % (fbf_error + faf_error))

    #適切な近似式を求める
    frac = 0.3  #テストに用いるデータの割合
    split_idx = int(frac * len(xaf))

    shuffled = sp.random.permutation(list(range(len(xaf))))  #xafの30%のデータを取得する
    test = sorted(shuffled[:split_idx])  #テスト用データ
    train = sorted(shuffled[split_idx:])  #訓練用データ

    #各々訓練用データで訓練を行う
    #fbt1 = sp.poly1d(sp.polyfit(xaf[train], yaf[train], 1))
    fbt2 = sp.poly1d(sp.polyfit(xaf[train], yaf[train], 2))
    #fbt3 = sp.poly1d(sp.polyfit(xaf[train], yaf[train], 3))
    #fbt10 = sp.poly1d(sp.polyfit(xaf[train], yaf[train], 10))
    #fbt100 = sp.poly1d(sp.polyfit(xaf[train], yaf[train], 100))

    #それぞれのテスト結果の評価を行う
    #for f in [fbt1, fbt2, fbt3, fbt10, fbt100]:
    #    print("Error  d=%i: %f" % (f.order, error(f, xaf[test], yaf[test])))

    #fbt2を使用して、100,000リクエスト/hを超える日を算出
    print(fbt2)

    reached_max = fsolve(fbt2 - 100000, 800) / (7 * 24)
    print("result %f" % reached_max[0])
def optim_f4(lesions, out_dir, initialize_model=False):
    defaults = PaperDefaults()
    if lesions != None:
        defaults.lesions = lesions

    #David's globals
    _DEFAULT_KW2015_SO_PARAMETERS = {
        'filters': {
            'name': 'gabors',
            'aspect_ratio': .6,
            'sizes': sp.array([9]),
            'spatial_frequencies': sp.array([[9.0]]),
            'orientations': sp.arange(2) * sp.pi / 2,
            'phases': sp.array([0]),
            'with_center_surround': False,
            'padding': 'reflect',
            'corr': False,
            'ndp': False
        },
        'model': {
            'channels_so': ('R+G-', 'B+Y-', 'R+C-', 'Wh+Bl-', 'G+R-', 'Y+B-',
                            'C+R-', 'Bl+Wh-'),
            'normalize':
            False
        },
        'dnp_so': None,
        'selected_channels': [0, 1, 3, 4, 5, 7],
        'norm_channels': [0, 1, 3, 4, 5, 7]
    }

    size = 51
    csize = 9
    n_train = 32
    n_t_hues = 16
    n_s_hues = 16
    cxp = None
    csvfiles = [
        defaults._DATADIR + '/KW2015_%i.csv' % (i, )
        for i in range(0, 360, 45)
    ]
    percent_reg_train = 80.

    #Load data from experiments
    kw2015_fig2_x = sp.zeros((len(csvfiles), 16))
    kw2015_fig2_y = sp.zeros((len(csvfiles), 16))
    for idx, csv in enumerate(csvfiles):
        kw2015_fig2_x[idx], kw2015_fig2_y[idx] = \
            sp.genfromtxt(csv, delimiter=',')[1:].T
    spl = UnivariateSpline(x=kw2015_fig2_x.mean(0), y=kw2015_fig2_y.mean(0))
    kw2015_fig2_x_fit = sp.linspace(
        kw2015_fig2_x.mean(0).min(),
        kw2015_fig2_x.mean(0).max(), 360)
    kw2015_fig2_y_fit = spl(kw2015_fig2_x_fit)

    # experiment stimuli
    extra_vars = {}
    extra_vars['_DEFAULT_KW2015_SO_PARAMETERS'] = _DEFAULT_KW2015_SO_PARAMETERS
    extra_vars['_DEFAULT_FLOATX_NP'] = defaults._DEFAULT_FLOATX_NP
    extra_vars['size'] = size
    extra_vars['csize'] = csize
    extra_vars['n_train'] = n_train
    extra_vars['n_t_hues'] = n_t_hues
    extra_vars['n_s_hues'] = n_s_hues
    extra_vars['figure_name'] = 'f4'
    extra_vars['gt_x'] = kw2015_fig2_x_fit
    extra_vars['f4_stimuli_file'] = defaults.f4_stimuli_file
    extra_vars['return_var'] = 'I'
    extra_vars['precalculated_x'] = True

    if initialize_model:
        create_stims(extra_vars)
    stim_files = np.load(extra_vars['f4_stimuli_file'])

    #Run model
    #cx.run(so_all, from_gpu=False)
    #sx_all[:] = cx.Y.get()[:, :, size//2, size//2]
    scores, params = optimize_model(
        stim_files['so_ind'].reshape(
            n_t_hues * n_s_hues,
            len(_DEFAULT_KW2015_SO_PARAMETERS['norm_channels']), size,
            size).transpose(0, 2, 3, 1), np.mean(kw2015_fig2_y, axis=0),
        extra_vars, defaults)

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    np.savez(os.path.join(
        out_dir, 'hp_optimization_lesion_' + '_'.join(defaults.lesions)),
             scores=scores,
             params=params)
Example #59
0
    n = len(x)
    a_numerator = n * math.fsum(map(lambda x, y: x * y, x,
                                    y)) - math.fsum(x) * math.fsum(y)
    a_denominator = math.fsum(map(lambda x: x * x, x)) * n - math.fsum(x)**2
    a = a_numerator / a_denominator

    b = (math.fsum(y) - a * math.fsum(x)) / n
    print(a, b)
    return a, b


def make_function(a, b):
    return lambda x: a * x + b


data = sp.genfromtxt("data.tsv", delimiter="\t")

x = data[:, 0]
y = data[:, 1]

f = make_function(1, 1)(2)
arg = getKoef(x, y)
a = make_function(arg[0], arg[1])
range_x = range(int(min(x)) - 10, int(max(x)) + 10)
function = [a(x) for x in range_x]

plt.plot(range_x, function)
plt.scatter(x, y)
plt.grid()
plt.show()
Example #60
0
# Script - 1
import scipy as sp
import numpy as np

data = sp.genfromtxt("ML_curve_fitting_example.dat", delimiter="\t")
print(data[-10:])  ## print last 10 items in our dataset
print(data.shape)

## separate data in two different lists
x = data[:, 0]
y = data[:, 1]

## a quick statistic: min and max of word's score
print("y min = " + repr(min(y)) + "\t y max = " + repr(max(y)))

## as the range of score is too low, let is scale
y = sp.multiply(y, 100000)

## if there are missing values, those have to be taken care of
#print(sp.sum(sp.isnan(y)))
x = x[~sp.isnan(
    y
)]  # Be careful. This is not a typo. NaN will be present for score, not for years!
y = y[~sp.isnan(y)]

## lets plot something
import matplotlib.pyplot as plt
plt.scatter(x, y)
plt.title("Use of word \"artificial\" over 1500 to 1999")
plt.xlabel("Years")
plt.ylabel("Score of use")