コード例 #1
0
ファイル: pupil.py プロジェクト: benjaminpope/pysco
def getSquareSamplingPoints(in_mask,dens=100.0,scale=100.0,filename='',prec=6,shift_x=0.0,shift_y=0.0) :
	'''generates discrete sampling points array in mask'''
	step=1.0/dens
	#step_pix = int(scale/dens)
	#xs=int(round(shift_x*scale))
	#ys=int(round(shift_y*scale))
	size_y=in_mask.shape[0]/scale
	size_x=in_mask.shape[1]/scale
	res=[]
	x00=step+shift_x
	y00=step+shift_y
	yC=in_mask.shape[0]/scale/2
	xC=in_mask.shape[1]/scale/2
	x=x00
	y=y00
	i=0
	while (x<size_x and y<size_y) :
		if in_mask[int(y*scale),int(x*scale)]>0 :
			res.append([round(x-xC,prec),round(y-yC,prec)])
		x+=step
		if x>=size_x :
			x=x00
			y+=step
			i+=1
	if len(filename)>0 :
		np.savetxt(filename,res)
	return np.asarray(res)
コード例 #2
0
ファイル: leafsnake.py プロジェクト: fjansson/leafsnake
def saveTreeText(G, edgeName, nodeName):
    edge_keys = ['Strahler', 'level', 'branchlength', 'branchlength_e', 'alpha', 'alpha_e', 'W_mean', 'W_max', 'W_min', 'W_std']
    node_keys = ['Strahler', 'level', 'dia']

    px_mm_factor = 1
    if px_mm != None:
        px_mm_factor = px_mm

    # make a dictionary of conversion factors
    conversion = {}
    for k in length_keys:
        conversion[k] = 1.0/px_mm_factor
    for k in angle_keys:
        conversion[k] = 180/np.pi
             
    eheader = ''
    for k in edge_keys:
        eheader += k + ', ' 
    nheader = ''
    for k in node_keys:
        nheader += k + ', ' 
    
    output = [[e[2][k] * (conversion[k] if k in conversion else 1)
               for k in edge_keys] for e in G.edges_iter(data=True)]
    output.sort() # sort on Strahler order
    np.savetxt(edgeName, output, fmt = '%3d,%3d' + (len(edge_keys)-2) * ', %8.3f', header=eheader, comments='#')

    output = [[n[1][k] * (conversion[k] if k in conversion else 1) 
               for k in node_keys] for n in G.nodes_iter(data=True)]
    output.sort() # sort on Strahler order
    np.savetxt(nodeName, output, fmt = '%3d,%3d' + (len(node_keys)-2) * ', %8.3f', header=nheader, comments='#')
コード例 #3
0
ファイル: modularity.py プロジェクト: Lx37/dmgraphanalysis
def compute_mod_cor_mat(mod_average_ts_file,regressor_file):

    import os
    import numpy as np

    from dmgraphanalysis.utils_cor import compute_weighted_cor_mat_non_zeros

    print 'load regressor_vect'
    
    regressor_vect = np.loadtxt(regressor_file)
    
    print 'load mod_average_ts_mat'
    
    mod_average_ts = np.load(mod_average_ts_file)
    
    print 'compute_weighted_cor_mat_non_zeros'
    
    mod_cor_mat,mod_Z_cor_mat = compute_weighted_cor_mat_non_zeros(np.transpose(mod_average_ts),regressor_vect)

    print mod_cor_mat
    print mod_Z_cor_mat

    print "saving mod cor mat"
    mod_cor_mat_file = os.path.abspath('mod_cor_mat.txt')

    np.savetxt(mod_cor_mat_file,mod_cor_mat,fmt = '%2.2f')

    print "saving mod Z cor mat"
    mod_Z_cor_mat_file = os.path.abspath('mod_Z_cor_mat.txt')

    np.savetxt(mod_Z_cor_mat_file,mod_Z_cor_mat,fmt = '%2.2f')

    
    return mod_cor_mat_file,mod_Z_cor_mat_file
コード例 #4
0
ファイル: create_fsl_model.py プロジェクト: RanjitK/C-PAC
def create_mat_file(data, model_name, outputModelFilesDirectory):

    """
    create the .mat file
    """

    dimx = None
    dimy = None
    if len(data.shape) == 1:
        dimy = 1
        dimx = data.shape[0]
    else:
        dimx, dimy = data.shape

    ppstring = "/PPheights"

    for i in range(0, dimy):

        ppstring += "\t" + "%1.5e" % (1.0)

    ppstring += "\n"

    f = open(os.path.join(outputModelFilesDirectory, model_name + ".mat"), "w")

    print >> f, "/NumWaves\t%d" % dimy
    print >> f, "/NumPoints\t%d" % dimx
    print >> f, ppstring

    print >> f, "/Matrix"
    np.savetxt(f, data, fmt="%1.5e", delimiter="\t")

    f.close()
コード例 #5
0
ファイル: func1d.py プロジェクト: srirampr/abipy
 def to_file(self, path, fmt='%.18e', delimiter=' ', newline='\n', header='', footer='', comments='# '):
     """
     Save self to a text file. See :func:`np.savetext` for the description of the variables
     """
     data = zip(self.mesh, self.values)
     np.savetxt(path, data, fmt=fmt, delimiter=delimiter, newline=newline,
                header=header, footer=footer, comments=comments)
コード例 #6
0
ファイル: test_kit.py プロジェクト: HSMin/mne-python
def test_decimate():
    """Test decimation of digitizer headshapes with too many points."""
    # load headshape and convert to meters
    hsp_mm = _get_ico_surface(5)['rr'] * 100
    hsp_m = hsp_mm / 1000.

    # save headshape to a file in mm in temporary directory
    tempdir = _TempDir()
    sphere_hsp_path = op.join(tempdir, 'test_sphere.txt')
    np.savetxt(sphere_hsp_path, hsp_mm)

    # read in raw data using spherical hsp, and extract new hsp
    with warnings.catch_warnings(record=True) as w:
        raw = read_raw_kit(sqd_path, mrk_path, elp_txt_path, sphere_hsp_path)
    assert_true(any('more than' in str(ww.message) for ww in w))
    # collect headshape from raw (should now be in m)
    hsp_dec = np.array([dig['r'] for dig in raw.info['dig']])[8:]

    # with 10242 points and _decimate_points set to resolution of 5 mm, hsp_dec
    # should be a bit over 5000 points. If not, something is wrong or
    # decimation resolution has been purposefully changed
    assert_true(len(hsp_dec) > 5000)

    # should have similar size, distance from center
    dist = np.sqrt(np.sum((hsp_m - np.mean(hsp_m, axis=0))**2, axis=1))
    dist_dec = np.sqrt(np.sum((hsp_dec - np.mean(hsp_dec, axis=0))**2, axis=1))
    hsp_rad = np.mean(dist)
    hsp_dec_rad = np.mean(dist_dec)
    assert_almost_equal(hsp_rad, hsp_dec_rad, places=3)
コード例 #7
0
ファイル: myplot.py プロジェクト: johuck/ccsgp
 def _ascii(self):
   """write ascii file(s) w/ data contained in plot"""
   if not os.path.exists(self.name): os.makedirs(self.name)
   for k, v in self.dataSets.iteritems():
     np.savetxt(
       self.name + '/' + self._prettify(k) + '.dat', v, fmt='%.4e'
     )
 def writeOut(self, outname='',include_state=False):
     """
     Function writes out to file... only doing primitive variables for now.
     rho, u, p, maybe tack on e and h ....
     
     This needs to be nice, but for debugging purposes, only doing to 
     write out to ascii for now... just ot be quick and easy to focus on
     coding rather than fancy outputting.....
     """
     x   = self.grid.center()
     rho = self.getPrimitive('Density')
     u   = self.getPrimitive('Velocity')
     P   = self.getPrimitive('Pressure')
     
     if include_state:
         data = np.column_stack((x,rho,u,P,self.q[0],self.q[1],self.q[2]))
         header = '# x Density Velocity Pressure q0 q1 q2'
         
     else:
         data = np.column_stack((x,rho,u,P))
         header = '# x Density Velocity Pressure'
 
     
    # np.savetxt(outname + '_simstate_%3.3f_.txt'%(self.t), data,
     np.savetxt(outname + '_simstate.txt', data, header=header, fmt='%1.4e')
コード例 #9
0
ファイル: meas_info.py プロジェクト: YoheiOseki/mne-python
def _write_dig_points(fname, dig_points):
    """Write points to file

    Parameters
    ----------
    fname : str
        Path to the file to write. The kind of file to write is determined
        based on the extension: '.txt' for tab separated text file.
    dig_points : numpy.ndarray, shape (n_points, 3)
        Points.
    """
    _, ext = op.splitext(fname)
    dig_points = np.asarray(dig_points)
    if (dig_points.ndim != 2) or (dig_points.shape[1] != 3):
        err = ("Points must be of shape (n_points, 3), "
               "not %s" % (dig_points.shape,))
        raise ValueError(err)

    if ext == '.txt':
        with open(fname, 'wb') as fid:
            version = __version__
            now = dt.now().strftime("%I:%M%p on %B %d, %Y")
            fid.write(b("% Ascii 3D points file created by mne-python version "
                        "{version} at {now}\n".format(version=version,
                                                      now=now)))
            fid.write(b("% {N} 3D points, "
                        "x y z per line\n".format(N=len(dig_points))))
            np.savetxt(fid, dig_points, delimiter='\t', newline='\n')
    else:
        msg = "Unrecognized extension: %r. Need '.txt'." % ext
        raise ValueError(msg)
コード例 #10
0
ファイル: abstract_learning.py プロジェクト: MinaKh/bcpnn-mt
def prepare_input(tp, params, my_units=None):
    n_units = tp.shape[0]
    dt = params['dt_rate'] # [ms] time step for the non-homogenous Poisson process 

    time = np.arange(0, params['t_stimulus'], dt)

    if (my_units == None):
        my_units = xrange(n_units)
    else:
        my_units = xrange(my_units[0], my_units[1])

    n_cells = len(my_units)
    L_input = np.zeros((n_cells, time.shape[0]))
#    offset = 100
    for i_time, time_ in enumerate(time):
        if (i_time % 100 == 0):
            print "t:", time_
#        i_time += offset
#        i_time = min(i_time, max(i_time, len(time)-1))
        L_input[:, i_time] = utils.get_input(tuning_prop[my_units, :], params, time_/params['t_sim'])

    for i_, unit in enumerate(my_units):
        output_fn = params['input_rate_fn_base'] + str(unit) + '.dat'
        print 'output_fn:', output_fn
        np.savetxt(output_fn, L_input[i_, :])
コード例 #11
0
ファイル: abstract_learning.py プロジェクト: MinaKh/bcpnn-mt
def normalize_input(params):
    if pc_id == 0:
        print 'normalize_input'
        dt = params['dt_rate'] # [ms] time step for the non-homogenous Poisson process 
        L_input = np.zeros((params['n_exc'], params['t_stimulus']/dt))

        v_max = params['v_max']
        if params['log_scale']==1:
            v_rho = np.linspace(v_max/params['N_V'], v_max, num=params['N_V'], endpoint=True)
        else:
            v_rho = np.logspace(np.log(v_max/params['N_V'])/np.log(params['log_scale']),
                            np.log(v_max)/np.log(params['log_scale']), num=params['N_V'],
                            endpoint=True, base=params['log_scale'])
        v_theta = np.linspace(0, 2*np.pi, params['N_theta'], endpoint=False)
        index = 0
        for i_RF in xrange(params['N_RF_X']*params['N_RF_Y']):
            index_start = index
            for i_v_rho, rho in enumerate(v_rho):
                for i_theta, theta in enumerate(v_theta):
                    fn = params['input_rate_fn_base'] + str(index) + '.dat'
                    L_input[index, :] = np.loadtxt(fn)
                    print 'debug', fn
                    index += 1
            index_stop = index
            print 'before', i_RF, L_input[index_start:index_stop, :].sum()
            if (L_input[index_start:index_stop, :].sum() > 1):
                L_input[index_start:index_stop, :] /= L_input[index_start:index_stop, :].sum()
            print 'after', i_RF, L_input[index_start:index_stop, :].sum()

        for i in xrange(params['n_exc']):
            output_fn = params['input_rate_fn_base'] + str(i) + '.dat'
            print 'output_fn:', output_fn
            np.savetxt(output_fn, L_input[i, :])
    if comm != None:
        comm.barrier()
コード例 #12
0
ファイル: ida.py プロジェクト: viratupadhyay/ida
def Nch_write(nch, fname):
    N = np.size(nch)
    x = np.arange(N)
    A = [x,nch]
    A = np.array(A)
    A = np.transpose(A)
    np.savetxt(fname, A, fmt="%4d")
コード例 #13
0
def main(simulations, outname, edgeorevent, plot, savedata):
	mysimulations=open(simulations, 'r').readlines()
	truescores=[]
	fpscores=[]
	type=0
#	(min_count, max_count) = (5,12)
	(min_count, max_count) = (0,500)
	for i in xrange(len(mysimulations)): 
		line=mysimulations[i]
		if len(line.strip().split('\t')) ==3: 
			(dir, blocks, events)=line.strip().split('\t')
		else: 
			(dir, simid, blocks, events)=line.strip().split('\t')
		statsfile=os.path.join(dir, "%s.stats" % edgeorevent)
		data=SimData(statsfile)
		truecount=data.TP[type] + data.FN[type]
		if truecount >= min_count and truecount <= max_count:
			sys.stderr.write("truecount is %d\n" % (truecount))
			datfile=os.path.join(dir, "%s.dat" % edgeorevent)
			add_scores(datfile, truescores, fpscores)
	if savedata: 
		np.savetxt("%s.tp.txt" % outname, np.array(truescores))
		np.savetxt("%s.fp.txt" % outname, np.array(fpscores))
	if plot: 
		title=edgeorevent
		sys.stderr.write("making plot...\n")
		make_histograms(truescores, fpscores, title) 	
		plt.savefig(outname+".png")	
コード例 #14
0
ファイル: AEViewer.py プロジェクト: zhangwise/ae
    def save(self):
        if self.data is None:
            return
        ax = self.fig.gca()
        start,end = ax.get_xlim()

        d = Dialog(self.root, "Save data", [
            ("Start [{}]:".format(self.data.timeunit), start),
            ("End [{}]:".format(self.data.timeunit), end),
            ("Channel:", 0),
            ("WAV Rate [1/{}]:".format(self.data.timeunit), int(1/self.data.timescale))
            ])
        if d.result is None:
            return
        
        fname = tkFileDialog.asksaveasfilename(parent=self.root, 
                    filetypes=[('Envelope', '.txt .dat'), ('WAV','.wav'), ('BDAT','.bdat')])
        if not fname:
            return 
        
        start, end, channel, rate = d.result

        if fname[-4:] in [".txt", ".dat"]:
            from numpy import savetxt, transpose
            x,y = self.data.resample( (start,end), channel=channel, num=10000)
            savetxt(fname, transpose([x,y]))

        elif fname[-4:] == ".wav":
            r = int(start/self.data.timescale), int(end/self.data.timescale)
            self.data.save_wav(fname, range=r, channel=channel, rate=rate)

        elif fname[-5:] == ".bdat":
            r = int(start/self.data.timescale), int(end/self.data.timescale)
            self.data.save_bdat(fname, range=r, channel=channel)
コード例 #15
0
ファイル: generic.py プロジェクト: maedoc/tvb-virtualizer
    def write_connectivity_zip(self, conn_dir, weigths, tracts, cortical, region_names, centers, areas, orientations, atlas):
        tmpdir = tempfile.TemporaryDirectory()

        file_weigths = os.path.join(tmpdir.name, 'weights.txt')
        file_tracts = os.path.join(tmpdir.name, 'tract_lengths.txt')
        file_cortical = os.path.join(tmpdir.name, 'cortical.txt')
        file_centers = os.path.join(tmpdir.name, 'centers.txt')
        file_areas = os.path.join(tmpdir.name, 'areas.txt')
        file_orientations = os.path.join(tmpdir.name, 'average_orientations.txt')

        numpy.savetxt(file_weigths, weigths, fmt='%d')
        numpy.savetxt(file_tracts, tracts, fmt='%.3f')
        numpy.savetxt(file_cortical, cortical, fmt='%d')

        with open(str(file_centers), "w") as f:
            for idx, (val_x, val_y, val_z) in enumerate(centers):
                f.write("%s %.2f %.2f %.2f\n" % (region_names[idx], val_x, val_y, val_z))

        numpy.savetxt(file_areas, areas, fmt='%.2f')
        numpy.savetxt(file_orientations, orientations, fmt='%.2f %.2f %.2f')

        filename = os.path.join(conn_dir, OutputConvFiles.CONNECTIVITY_ZIP.value.replace("%s", atlas))
        with ZipFile(filename, 'w') as zip_file:
            zip_file.write(file_weigths, os.path.basename(file_weigths))
            zip_file.write(file_tracts, os.path.basename(file_tracts))
            zip_file.write(file_cortical, os.path.basename(file_cortical))
            zip_file.write(file_centers, os.path.basename(file_centers))
            zip_file.write(file_areas, os.path.basename(file_areas))
            zip_file.write(file_orientations, os.path.basename(file_orientations))
コード例 #16
0
ファイル: new_experiment.py プロジェクト: fepettersen/thesis
	def SaveError(self,header=None):
		fname = self.result_path+'/error.txt'
		f = np.asanyarray(self.error)
		if header is None:
			np.savetxt(fname,f)
		else:
			np.savetxt(fname,f)
コード例 #17
0
ファイル: model.py プロジェクト: davidchoi/radtranpy
	def write_ft_input(self, filename='workmodl000'):
		"""
		Output atmosphere model in the format required by the fortran
		adding/doubling code.
		"""
		data_array = np.zeros([self.nlayers, 5])		
		
		for x in range(self.nlayers):
			layer = getattr(self, self.layer_names[x])
			data_array[x,:] = [layer.pi0bl, layer.tau, layer.p, 
							   layer.rp, layer.pi0uv]
			#NOTE order difference with writetxt()
			#print data_array[x,:] #debug
			
		#open file
		
		f = open(filename, 'w')
		
		f.write('{:12}{:12}{:12.5f}\n'.format(self.nlayers,
											  self.ablayer, self.abfactor))
		
		np.savetxt(f, data_array, fmt='%10.5f', delimiter='  ')
		
		f.close()
		logging.info("Model written out for RT input.")
コード例 #18
0
ファイル: rww_tools.py プロジェクト: TCioms/adc_tests
def dohist(base_name='hist', type='sin', gethist=True, plt=True):
  hc_name=base_name+'_cores'
  if gethist:
    get_hist(fname=hc_name)
  res = np.empty([5, 256], dtype=float)
  res[0] = np.arange(256, dtype=float)
  z_fact = 500.0/256.0
  (a1,z1), res[1] =fit_cores.fit_hist(1,type, hc_name)
  (a2,z2), res[2] =fit_cores.fit_hist(2,type, hc_name)
  (a3,z3), res[3] =fit_cores.fit_hist(3,type, hc_name)
  (a4,z4), res[4] =fit_cores.fit_hist(4,type, hc_name)
  avamp = (a1+a2+a3+a4)/4.0
  # Reverse the amplitude and zero differences so they can be applied to the
  # offset and gain registers directly.  The phase registers don't need the
  # reversal
  a1p = 100*(avamp -a1)/avamp
  a2p = 100*(avamp -a2)/avamp
  a3p = 100*(avamp -a3)/avamp
  a4p = 100*(avamp -a4)/avamp
  ogp=np.array([z_fact*z1, a1p, 0, z_fact*z2, a2p, 0, z_fact*z3, a3p, 0, \
      z_fact*z4, a4p, 0])
  avz=(z1+z2+z3+z4)*z_fact/4.0
  print "#avg    %7.4f %7.4f %8.4f" %  (ogp[1], avamp, 0)
  print "core A  %7.4f %7.4f %8.4f" %  tuple(ogp[0:3])
  print "core B  %7.4f %7.4f %8.4f" %  tuple(ogp[3:6])
  print "core C  %7.4f %7.4f %8.4f" %  tuple(ogp[6:9])
  print "core D  %7.4f %7.4f %8.4f" %  tuple(ogp[9:12])
  np.savetxt(base_name+"_ogp.meas", ogp, fmt= "%8.4f")
  r_name=base_name+'.res'
  np.savetxt(r_name, np.transpose(res), fmt='%3i %6.3f %6.3f %6.3f %6.3f')
  fit_cores.fit_inl(fname=r_name)
  if plt:
    plotres(r_name)
コード例 #19
0
ファイル: pmfs.py プロジェクト: TensorDuck/project_tools
def plot_1D_pmf(coord,title):
    ''' Plot a 1D pmf for a coordinate.'''

    x = get_data(coord)

    path = os.getcwd()
    savedir = path+"/pmfs"
    if os.path.exists(savedir) == False:
        os.mkdir(savedir)

    if coord in ["Rg","rmsd"]:
        skip = 80
    else:
        skip = 4 
    vals = np.unique(list(x))[::skip]

    n,bins = np.histogram(x,bins=vals,density=True)
    np.savetxt(savedir+"/"+coord+"_n.dat",n,delimiter=" ",fmt="%.4f")
    np.savetxt(savedir+"/"+coord+"_bins.dat",bins,delimiter=" ",fmt="%.4f")

    pmf = -np.log(n)
    pmf -= min(pmf)

    plt.figure()
    plt.plot(bins[1:]/max(bins),pmf)
    plt.xlabel(coord,fontsize="xx-large")
    plt.ylabel("F("+coord+") / kT",fontsize="xx-large")
    plt.title("F("+coord+") "+title,fontsize="xx-large")
    plt.ylim(0,6)
    plt.xlim(0,1)
    plt.savefig(savedir+"/"+coord+"_pmf.pdf")
コード例 #20
0
ファイル: rww_tools.py プロジェクト: TCioms/adc_tests
def hist_from_snapshots(rpt = 10):
#  hist_all = np.zeros(256,dtype=int)
  hist1 = np.zeros(256,dtype=int)
  hist2 = np.zeros(256,dtype=int)
  hist3 = np.zeros(256,dtype=int)
  hist4 = np.zeros(256,dtype=int)
  for i in range(rpt):
    snap=adc5g.get_snapshot(roach2, snap_name, man_trig=True, wait_period=2)
    snap = 128 + np.array(snap)
#    hist = np.bincount(snap, minlength=256)
#    hist_all += hist
    hist = np.bincount(snap[0:: 4], minlength=256)
    hist1 += hist
    hist = np.bincount(snap[1:: 4], minlength=256)
    hist2 += hist
    hist = np.bincount(snap[2:: 4], minlength=256)
    hist3 += hist
    hist = np.bincount(snap[3:: 4], minlength=256)
    hist4 += hist
  data=np.column_stack((np.arange(-128., 128, dtype=int), hist1, hist2,
      hist3, hist4))
  np.savetxt("hist_cores", data, fmt=("%d"))
#  print "all ",np.sum(hist_all[0:128]), np.sum(hist_all[128:256])
  print "core a  ",np.sum(hist1[0:128]), np.sum(hist1[129:256])
  print "core b  ",np.sum(hist3[0:128]), np.sum(hist3[129:256])
  print "core c  ",np.sum(hist2[0:128]), np.sum(hist2[129:256])
  print "core d  ",np.sum(hist4[0:128]), np.sum(hist4[129:256])
コード例 #21
0
ファイル: rww_tools.py プロジェクト: TCioms/adc_tests
def og_from_noise(fname="ogp.noise", rpt=100):
  """
  Take a number of snapshots of noise.  Analyze for offset and gain
  for each core separately.
  """
  sum_result = np.zeros((15), dtype=float)
  sum_cnt = 0
  for n in range(rpt):
    result = np.zeros((15), dtype=float)
    snap=adc5g.get_snapshot(roach2, snap_name, man_trig=True, wait_period=2)
    if(rpt == 1):
      np.savetxt("t.og_noise", snap,fmt='%d')
    l=float(len(snap))
    snap_off=np.sum(snap)/l
    snap_amp=np.sum(abs(snap-snap_off))/l
    result[0]=snap_off*(-500.0/256.0)
    result[1]=snap_amp
    for core in range(4):
      # This will actually sample the cores in the order A,C,B,D
      # index will fix this up when data is put in the result array
      index=(3,9,6,12)[core]
      c=snap[core::4]
      l=float(len(c))
      off=np.sum(c)/l
      result[index] = off*(-500.0/256.0)
      amp=np.sum(abs(c-off))/l
      result[index+1]= 100.0*(snap_amp-amp)/snap_amp
    sum_result += result
    sum_cnt += 1
    print "%.4f "*15 % tuple(result)
  sum_result /= sum_cnt
  print "%.4f "*15 % tuple(sum_result)
  np.savetxt(fname, sum_result[3:], fmt="%8.4f")
コード例 #22
0
def get_mean_timeseries(infile,roi,mask):
    import os
    import nibabel as nib
    from nipype.utils.filemanip import fname_presuffix, split_filename
    import numpy as np

    img = nib.load(infile)
    data, aff = img.get_data(), img.get_affine()

    roi_img = nib.load(roi) 
    roi_data, roi_affine = roi_img.get_data(), roi_img.get_affine()

    if len(roi_data.shape) > 3:
        roi_data = roi_data[:,:,:,0]

    mask = nib.load(mask).get_data()
    roi_data = (roi_data > 0).astype(int) + (mask>0).astype(int)

    _,roiname,_ = split_filename(roi)
    outfile = fname_presuffix(infile,"%s_"%roiname,'.txt',newpath=os.path.abspath('.'),use_ext=False)
    
    out_data = np.mean(data[roi_data>1,:],axis=0)
    print out_data.shape
    
    np.savetxt(outfile,out_data)

    return outfile, roiname
コード例 #23
0
ファイル: xye.py プロジェクト: AustralianSynchrotron/pdviper
 def save_fxye(self,filename):
     f=open(filename,'w')
     newx=self.data[:,0]*100
     newdata=np.column_stack((newx,self.data[:,1],self.data[:,2]))
     f.writelines(['Automatically generated file {} from PDViPeR \n'.format(splitext(basename(filename))[0]),
                  'BANK\t1\t{0}\t{1}\tCONS\t{2}\t{3} 0 0 FXYE \n'.format(len(newx),len(self.data[:,1]),newx[0],newx[1]-newx[0])])
     savetxt(filename, newdata, fmt='%1.6f')
コード例 #24
0
ファイル: mortgage_main.py プロジェクト: statX/nz-houses
	def populate_price_array(self):
		
		a = random.Random()
		b = random.Random()
		
		price = self.price_start
		
		for i in range(0,self.max_periods):
			#if (i % 6) == 0:
			
			ra = a.random()
			for j in range(0,self.random_walk_probabilities.shape[0]):
				if ra <= self.random_walk_probabilities[j,3]:
					price_increase = self.random_walk_probabilities[j,0]*0.01
					break
			
			rb = b.random()
			if rb < 0.35:
				price_increase = -1.0*price_increase
			
			price = price + self.multiplier*price_increase
			self.price_array[i] = price
			
		np.savetxt('/Users/james/development/code_personal/nz-houses/data/math/capital_gains_array.txt',self.price_array)
		
		print 'maximum price = ' + str(max(self.price_array))
コード例 #25
0
ファイル: dmevolution.py プロジェクト: tmancal74/quantarhei
    def _exportDataToText(self, file):
        """Saves textual data to a file

        """
        Nt = self.data.shape[0]
        N = self.data.shape[1]
        # all elements [real] + (all elements - diagonal) [imaginary] + time
        Na = N + 1 + N*(N-1) 

        out = numpy.zeros((Nt, Na), dtype=numpy.float64)   
        
        for i in range(Nt):
            #print("%%%%")
            # time
            out[i,0] = self.TimeAxis.data[i]
            #print(0)
            # populations
            for j in range(N):
               out[i,j+1] = numpy.real(self.data[i,j,j])
               #print(j+1)
            # coherences
            l = 0
            for j in range(N):
                for k in range(j+1,N):
                    out[i,N+1+l] = numpy.real(self.data[i,j,k])
                    #print(N+1+l)
                    l += 1
                    out[i,N+1+l] = numpy.imag(self.data[i,j,k])
                    #print(N+1+l)
                    l += 1
                    
        numpy.savetxt(file, out)
コード例 #26
0
ファイル: pointsel.py プロジェクト: jochym/pointsel
 def exportData(self, fn):
     hdr = " ;".join([" %s" % s.strip() for s in self.dat[0]])
     sel = self.getSelected()
     try:
         x, y = self.toolbar.roi.get_xy()
         w = self.toolbar.roi.get_width()
         h = self.toolbar.roi.get_height()
         hdr += "\n"
         hdr += " ROI (um): X=%.2f  Y=%.2f  W=%.2f  H=%.2f    Points=%d   Concentration=%g" % (
             x,
             y,
             w,
             h,
             sel.shape[1],
             sum(sel[2]) / (w * h),
         )
     except AttributeError:
         # No roi
         pass
     if sel is None:
         wx.MessageBox(
             "Nothing to save yet. Make some selection before trying to export data.", "Nothing to export!"
         )
     else:
         d = array(sel)
         # Shift exported data to the origin
         d[0] -= min(d[0])
         d[1] -= min(d[1])
         np.savetxt(fn, d.T, fmt="%.3f", delimiter=" ", newline="\n", header=hdr, footer="", comments="#")
コード例 #27
0
ファイル: mortgage_main.py プロジェクト: statX/nz-houses
	def populate_rate_array(self):
		
		a = random.Random()
		b = random.Random()
		
		rate = 0.08
		
		for i in range(self.fixed_period,self.max_periods):
			
			# determine a rate increase for this time-step
			for j in range(0,self.random_walk_probabilities.shape[0]):
				if a.random() <= self.random_walk_probabilities[j,3]:
					rate_increase = self.random_walk_probabilities[j,0]*0.01
					break
			
			# determine whether the step is positive or negative
			if b.random() < 0.3:
				rate_increase = -1.0*rate_increase
			
			
			# increment the interest rate and store the result
			rate += rate_increase
			self.rate_array[i] = rate
			
		np.savetxt('/Users/james/development/code_personal/nz-houses/data/math/interest_rate_array.txt',self.rate_array)
コード例 #28
0
ファイル: RBFN.py プロジェクト: osigaud/ArmModelPython
 def saveTheta(self,fileName):
     '''
     Records theta under numpy format
     
     Input:    -fileName: name of the file where theta will be recorded
     '''
     np.savetxt(fileName, self.theta)
コード例 #29
0
ファイル: create_fsl_model.py プロジェクト: RanjitK/C-PAC
def create_grp_file(data, model_name, gp_var, outputModelFilesDirectory):

    """
    create the grp file
    """

    dimx = None
    dimy = None
    if len(data.shape) == 1:
        dimy = 1
        dimx = data.shape[0]
    else:
        dimx, dimy = data.shape
    data = np.ones(dimx)

    if not (gp_var == None):
        i = 1
        for key in sorted(gp_var.keys()):

            for index in gp_var[key]:
                data[index] = i

            i += 1

    f = open(os.path.join(outputModelFilesDirectory, model_name + ".grp"), "w")

    print >> f, "/NumWaves\t1"
    print >> f, "/NumPoints\t%d\n" % dimx
    print >> f, "/Matrix"
    np.savetxt(f, data, fmt="%d", delimiter="\t")

    f.close()
コード例 #30
0
def main(argv):
	data = []
	with open('accelerometerData.txt', 'rU') as csvfile:
		cell_file = csv.reader(csvfile, delimiter=',', skipinitialspace=True)
		for row in cell_file:
			row = [float(x) for x in row]
			data.append(row)

	calculatedValues = []

	yo = 2

	for i in range(yo, len(data)-yo):
		values = []
		prev = data[i-yo]
		curr = data[i]
		nextV = data[i+yo]
		print prev[0]
		print nextV[0]
		print
		for i in range(1, 4):
			values.append((nextV[i] - prev[i])/(nextV[0]-prev[0]))
		for i in range(1,4):
			max_v = max([prev[i], curr[i], nextV[i]])
			min_v = min([prev[i], curr[i], nextV[i]])
			diff = abs(max_v-min_v)
			values.append(diff)
		calculatedValues.append(values)

	data = np.array(calculatedValues)

	np.savetxt("queue_data.csv", data, delimiter=",")
コード例 #31
0
# PLOT
#--------------------

fig = plt.figure()

ax = fig.add_subplot(111)

ax.plot(time, flow, 'k')
ax.set_ylabel('flow rate at inlet $Q_\mathrm{in}(t)$ $[m^3/s]$')
ax.grid('on')

plt.show()

#--------------------
# SAVE
#--------------------
cpath = os.getcwd()
dpath = cpath.replace('scripts', 'data')

output = flow
filename = 'inletFlow'
np.savetxt(dpath + '/' + filename, output, '%.8f')

output = np.array([TB, TV, N]).T
filename = 'TBTVN'
np.savetxt(dpath + '/' + filename, output, ['%f', '%.8f', '%d'])

output = np.array([[nbr], [1. / dt]]).T
filename = 'nbfs'
np.savetxt(dpath + '/' + filename, output, ['%d', '%f'])
コード例 #32
0
def analysis_routine(trajfile, grofile, pdbfile):

    import json
    from collections import OrderedDict
    import bilayer_analysis_functions
    import matplotlib
    matplotlib.use('Agg')
    import matplotlib.pyplot as plt
    from matplotlib.backends.backend_pdf import PdfPages

    traj = mdtraj.load(trajfile, top=grofile)
    traj_pdb = mdtraj.load(trajfile, top=pdbfile)
    topol = traj.topology

    # Compute system information
    lipid_tails, headgroup_dict = bilayer_analysis_functions.identify_groups(
        traj, forcefield='charmm36')
    n_lipid = len([res for res in traj.topology.residues if not res.is_water])
    n_lipid_tails = len(lipid_tails.keys())
    n_tails_per_lipid = n_lipid_tails / n_lipid

    # Vectorized Calculations start here
    apl_avg, apl_std, apl_list = bilayer_analysis_functions.calc_APL(
        traj, n_lipid, blocked=True)
    np.savetxt('apl.dat', apl_list)

    angle_avg, angle_std, angle_list = bilayer_analysis_functions.calc_tilt_angle(
        traj, topol, lipid_tails, blocked=True)
    np.savetxt('angle.dat', angle_list)

    apt_avg, apt_std, apt_list = bilayer_analysis_functions.calc_APT(
        traj, apl_list, angle_list, n_tails_per_lipid, blocked=True)
    np.savetxt('apt.dat', apt_list)

    s2_ave, s2_std, s2_list = bilayer_analysis_functions.calc_nematic_order(
        traj, blocked=True)
    np.savetxt('s2.dat', s2_list)

    headgroup_distance_dict = bilayer_analysis_functions.compute_headgroup_distances(
        traj, topol, headgroup_dict, blocked=True)
    Hpp_ave, Hpp_std, Hpp_list = bilayer_analysis_functions.calc_bilayer_height(
        traj, headgroup_distance_dict, blocked=True, anchor='DSPC')
    np.savetxt('height.dat', Hpp_list)

    offset_dict = bilayer_analysis_functions.calc_offsets(
        traj, headgroup_distance_dict, blocked=True, anchor='DSPC')

    d_a, d_t, d_b, bins, interdig_list,interdig_avg, interdig_std = \
        bilayer_analysis_functions.calc_density_profile(traj, topol,
                                                        blocked=True)
    np.savetxt('idig.dat', interdig_list)
    ##print('Calculating hydrogen bonds...')
    ##hbond_matrix_avg, hbond_matrix_std, hbond_matrix_list, labelmap = bilayer_analysis_functions.calc_hbonds(traj, traj_pdb, topol, lipid_dict, headgroup_dict)
    #
    # Printing properties
    outpdf = PdfPages(('bilayeranalysis.pdf'))
    datafile = OrderedDict()
    datafile['trajectory'] = trajfile
    datafile['structure'] = grofile
    datafile['n_frames'] = traj.n_frames
    datafile['lipids'] = n_lipid
    datafile['tails'] = n_lipid_tails
    datafile['APL'] = OrderedDict()
    datafile['APL']['unit'] = str(apl_avg.unit)
    datafile['APL']['mean'] = float(apl_avg._value)
    datafile['APL']['std'] = float(apl_std._value)
    datafile['APT'] = OrderedDict()
    datafile['APT']['unit'] = str(apt_avg.unit)
    datafile['APT']['mean'] = float(apt_avg._value)
    datafile['APT']['std'] = float(apt_std._value)
    datafile['Bilayer Height'] = OrderedDict()
    datafile['Bilayer Height']['unit'] = str(Hpp_ave.unit)
    datafile['Bilayer Height']['mean'] = float(Hpp_ave._value)
    datafile['Bilayer Height']['std'] = float(Hpp_std._value)
    datafile['Tilt Angle'] = OrderedDict()
    datafile['Tilt Angle']['unit'] = str(angle_avg.unit)
    datafile['Tilt Angle']['Bilayer'] = OrderedDict()
    datafile['Tilt Angle']['Bilayer']['mean'] = float(angle_avg._value)
    datafile['Tilt Angle']['Bilayer']['std'] = float(angle_std._value)
    datafile['S2'] = OrderedDict()
    datafile['S2']['mean'] = s2_ave
    datafile['S2']['std'] = s2_std
    datafile['Interdigitation'] = OrderedDict()
    datafile['Interdigitation']['unit'] = str(interdig_avg.unit)
    datafile['Interdigitation']['mean'] = float(interdig_avg._value)
    datafile['Interdigitation']['std'] = float(interdig_std._value)

    datafile['Offset'] = OrderedDict()
    for key in offset_dict.keys():
        datafile['Offset']['unit'] = str(offset_dict[key][0].unit)
        datafile['Offset'][key] = OrderedDict()
        datafile['Offset'][key]['mean'] = float(offset_dict[key][0]._value)
        datafile['Offset'][key]['std'] = float(offset_dict[key][1]._value)
        #datafile['Offset (A)'][key] = [str(offset_dict[key][0]), str(offset_dict[key][1])]

    datafile['Tilt Angle']['Leaflet 1'] = OrderedDict()
    datafile['Tilt Angle']['Leaflet 1']['mean'] = float(
        np.mean(angle_list[:, 0:int(np.floor(n_lipid_tails / 2))])._value)
    datafile['Tilt Angle']['Leaflet 1']['std'] = float(
        np.std(angle_list[:, 0:int(np.floor(n_lipid_tails / 2))])._value)

    datafile['Tilt Angle']['Leaflet 2'] = OrderedDict()
    datafile['Tilt Angle']['Leaflet 2']['mean'] = float(
        np.mean(angle_list[:, int(np.floor(n_lipid_tails / 2)):])._value)
    datafile['Tilt Angle']['Leaflet 2']['std'] = float(
        np.std(angle_list[:, int(np.floor(n_lipid_tails / 2)):])._value)
    #for row_label in labelmap.keys():
    #    for col_label in labelmap.keys():
    #        row_index = labelmap[row_label]
    #        col_index = labelmap[col_label]
    #        hbond_avg = hbond_matrix_avg[row_index, col_index]
    #        hbond_std = hbond_matrix_std[row_index, col_index]
    #        outfile.write('{:<20s}: {} ({})\n'.format(str(row_label+"-"+ col_label), hbond_avg, hbond_std))

    # Plotting

    fig1 = plt.figure(1)
    plt.subplot(3, 2, 1)
    plt.plot(apl_list)
    plt.title('APL')

    plt.subplot(3, 2, 2)
    plt.plot(np.mean(angle_list, axis=1))
    plt.title('Tilt Angle ($^o$)')

    plt.subplot(3, 2, 3)
    plt.plot(np.mean(apt_list, axis=1))
    plt.title('APT')

    plt.subplot(3, 2, 4)
    plt.plot(Hpp_list)
    plt.title('H$_{PP}$')

    plt.subplot(3, 2, 5)
    plt.plot(s2_list)
    plt.title('S2')

    plt.subplot(3, 2, 6)
    plt.plot(interdig_list)
    plt.title('Interdigitation (A)')

    plt.tight_layout()
    outpdf.savefig(fig1)
    plt.close()

    density_profile_top_avg = np.mean(d_t, axis=0)
    density_profile_bot_avg = np.mean(d_b, axis=0)
    density_profile_avg = np.mean(d_a, axis=0)
    #
    #
    fig2 = plt.figure(2)
    plt.subplot(2, 1, 1)
    plt.plot(bins, density_profile_avg)
    plt.xlabel('Depth (nm)')
    plt.title('Density Profile (kg m$^{-3}$)')

    plt.subplot(2, 1, 2)

    #plt.plot(bins,density_profile_bot_avg)
    #plt.plot(bins,density_profile_top_avg)

    plt.hist(np.mean(angle_list[:, 0:int(np.floor(n_lipid_tails / 2))],
                     axis=0)._value,
             bins=50,
             alpha=0.5,
             facecolor='blue',
             normed=True)
    plt.hist(np.mean(angle_list[:, int(np.floor(n_lipid_tails / 2)):],
                     axis=0)._value,
             bins=50,
             alpha=0.5,
             facecolor='red',
             normed=True)
    plt.title('Angle Distribution by Leaflet')
    plt.xlabel('Angle ($^o$)')

    plt.tight_layout()
    outpdf.savefig(fig2)
    plt.close()
    outpdf.close()
    with open('data.txt', 'w') as f:
        json.dump(datafile, f, indent=2)
コード例 #33
0
model.add(Dense(10, kernel_initializer="normal", activation='softmax'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.fit(wine_data, y, epochs=1600, verbose=1)
#y_pred = model.predict(x_test)
#print(model.evaluate(x_test,y_test))

test = pd.read_csv("winequality-solution-input.csv")
del (test['id'])

#import xlsxwriter
#
#workbook = xlsxwriter.Workbook('sample_submission.xlsx')
#worksheet = workbook.add_worksheet()
#Y_t = Y_t[Y_t>0]
b = list()
y_pred = model.predict(x_test)
(m, n) = y_pred.shape
for i in range(m):
    b.append(np.argmax(y_pred[i]))

#col = 1
#row = 1
#for data in enumerate(b):
#    worksheet.write_column(row, col, data)
b = np.array(b).astype(int)
np.savetxt('out.csv', b, delimiter=',')
コード例 #34
0

# Get data for plotly
data = [
    go.Surface(
        x = X,
        y = Y,
        z = Z
    )
]

# Layout
layout = go.Layout(
    title = path,
    scene = dict(
        xaxis=dict(title=var1),
        yaxis=dict(title=var2),
        zaxis=dict(title="fitness"),
    ),
)

# Construct ploty
fig = go.Figure(data=data, layout = layout)
py.plot(fig, filename=path + "interactive_3d")

# Save XYZ
np.savetxt(path + "X.txt", np.matrix(X), fmt='%f', delimiter = ",")
np.savetxt(path + "Y.txt", np.matrix(Y), fmt='%f', delimiter = ",")
np.savetxt(path + "Z.txt", np.matrix(Z), fmt='%f', delimiter = ",")

コード例 #35
0
            w_final_adam, epochs_eff_adam, initial_cost_adam, final_cost_adam, overall_training_time_adam = bmsvmsgdadam.train_benchmark_light(
                x_train=x_training,
                y_train=y_training,
                x_test=x_testing,
                y_test=y_testing,
                w_init=w_init,
                log_file=log_file_adam,
                tolerance=tolerance,
                indices_init=indices_init,
                beta1=ba[0],
                beta2=ba[1])

            ## save weight vectors
            if repition == 0:
                np.savetxt(init_weight_file_sgd, w_init)
                np.savetxt(init_weight_file_mom, w_init)
                np.savetxt(init_weight_file_ada, w_init)
                np.savetxt(init_weight_file_rmsprop, w_init)
                np.savetxt(init_weight_file_adam, w_init)
            np.savetxt(final_weight_file_sgd, w_final_sgd)
            np.savetxt(final_weight_file_mom, w_final_mom)
            np.savetxt(final_weight_file_ada, w_final_ada)
            np.savetxt(final_weight_file_rmsprop, w_final_rmsprop)
            np.savetxt(final_weight_file_adam, w_final_adam)

            ## testing
            overall_accuracy_sgd = bmsvmsgd.advanced_test(x_testing=x_testing,
                                                          y_testing=y_testing,
                                                          w=w_final_sgd)
            overall_accuracy_mom = bmsvmsgdmomentum.advanced_test(
コード例 #36
0
ファイル: Dailies.py プロジェクト: cxrodgers/ns5_process
def run_tones(rs=None, 
    output_dir='/media/dendrite',
    all_channels_file='../DO1_ALL_CHANNELS', 
    channel_groups_file='../DO1_CHANNEL_GROUPS', 
    analog_channels_file='../ANALOG_7_8',
    ns5_filename=None,
    remove_from_TC=None,
    soft_time_limits=(-1.0, 1.0),
    hard_time_limits=(-.04, .15),
    do_add_bcontrol=True,
    bcontrol_folder='/media/hippocampus/TRWINRIG_DATA/Data/SPEAKERCAL/',
    bcontrol_files=None,
    n_tones=None,
    n_tones_per_bout=200,
    TR_NDAQ_offset_sec=420,
    start_offset=0,
    stop_offset=0,
    do_timestamps=True,
    plot_spectrograms=True,
    break_at_spectrograms=False,
    force_put_neural_data=False,
    do_avg_plots=True,
    do_extract_spikes=True,
    detection_kwargs=None,
    CAR=True,
    save_to_klusters=True,
    do_MUA_grand_plot=True,
    group_multiplier=100,
    psth_time_limits=(None,None),
    do_tuning_curve=True,
    **kwargs):
    """Daily run script for tones (tuning curve)
    
    rs : RS if it exists. If it doesn't, provide the following:
        output_dir, all_channels_file, channel_groups_file, 
        analog_channels_file, ns5_filename, remove_from_TC, soft_time_limits,
        hard_time_limits
    
    do_add_bcontrol: if True, will find bcontrol files, extract information,
        write "tones" and "attens" to directory. (If those files already exist,
        then this block is skipped, so delete them if you want to force.)

        You can specify explicitly, or else it will search.
        
        bcontrol_files : a list of bcontrol files that you've selected.
        
        bcontrol_folder : If bcontrol_files is None, then will look here
            for them. Will try to guess from ns5 time which are appropriate.
            It will keep grabbing files until it find at least `n_tones`
            tones. If n_tones is None, uses the number of timestamps.

        In either case, the mat files are copied into the directory, and then
        the `tones` and `attens` files are written. Those files are used
        for all subsequent analyses.
    
    plot_spectrograms: if True, will plot spectrograms of the audio stimulus
        for every 200 tones, in order to check that the tones and attens 
        are correctly lined up.
        
        break_at_spectrograms : if True, errors immediately after plotting
        spectrograms, for debugging
    
    do_tuning_curve : if True, plots tuning curve
    
    n_tones_per_bout : int, or list of ints
        Number of tones expected in each bout.
        This is used for calculating timestamps of each tone, from timestamps
        of each trial (which corresponds to a single bcontrol file).
    
    Other parameters should be same as other Dailies.
    """
    if len(kwargs) > 0:
        print("unexpected kwargs")
        print(kwargs)
    
    # Make session
    if rs is None:
        printnow("creating recording session %s" % ns5_filename)
        rsm = rswrap.RecordingSessionMaker(
            data_analysis_dir=output_dir,
            all_channels_file=all_channels_file,
            channel_groups_file=channel_groups_file,
            analog_channels_file=analog_channels_file)

        if remove_from_TC is None:
            remove_from_TC = []

        rs = rsm.make_session(
            ns5_filename=ns5_filename,
            remove_from_TC=remove_from_TC)
    
        rs.write_time_limits(soft_time_limits, hard_time_limits)
    rs.group_multiplier = group_multiplier
    printnow("RS %s" % rs.full_path)

    # add timestamps
    if do_timestamps:
        printnow("adding timestamps")
        # write timestamps to directory
        # have to force, otherwise will sub-time it twice
        times, numbers = rswrap.add_timestamps_to_session(rs, verbose=True, 
            force=True, meth='digital_trial_number')
        
        # Right now one time per trial (bcontrol file)
        # Need to decimate by number of trials per bout
        # First figure out whether int or list of ints
        if not hasattr(n_tones_per_bout, '__len__'):
            n_tones_per_bout = [n_tones_per_bout] * len(times)

        # Now decimate each bout
        # This command works for 200 tones, extrapolate correct formula from it
        # subtimes = np.rint(np.linspace(3300, 1194210, 200)).astype(np.int)
        alltimes = []
        for time, n_tones in zip(times, n_tones_per_bout):
            istart = 3300
            istop = istart + np.rint((n_tones - 1) * 5984.5).astype(np.int)
            subtimes = np.rint(np.linspace(istart, istop, n_tones)).astype(np.int)
            alltimes.append(time + subtimes)
        alltimes = np.concatenate(alltimes)
        rs.add_timestamps(alltimes)        
    
    # add bcontrol
    tone_filename = os.path.join(rs.full_path, 'tones')
    atten_filename = os.path.join(rs.full_path, 'attens')
    if do_add_bcontrol and (not os.path.exists(tone_filename) or not \
        os.path.exists(atten_filename)):
        printnow('adding bcontrol')
        
        # First find out how many tones there probably are
        if n_tones is None:
            n_tones = len(rs.read_timestamps())
        
        if bcontrol_files is None:
            # Guess by ns5 filetime
            ns5_stoptime = gettime(rs.get_ns5_filename())
            ns5_startime = ns5_stoptime - datetime.timedelta(seconds=
                old_div(rs.get_ns5_loader().header.n_samples, rs.get_sampling_rate()))
            ns5_stoptime += datetime.timedelta(seconds=TR_NDAQ_offset_sec)
            ns5_startime += datetime.timedelta(seconds=TR_NDAQ_offset_sec)
            mintime = ns5_startime + datetime.timedelta(seconds=start_offset)
            maxtime = ns5_stoptime + datetime.timedelta(seconds=stop_offset)
            
            # Find the bcontrol files that were saved during the recording
            # And sort by time
            allfiles = np.asarray(glob.glob(os.path.join(
                bcontrol_folder, 'speakercal*.mat')))
            bcontrol_filetimes = np.asarray(list(map(gettime, allfiles)))
            sidxs = np.argsort(bcontrol_filetimes)
            bcontrol_filetimes = bcontrol_filetimes[sidxs]
            allfiles = allfiles[sidxs]
            
            # Choose the files within the window
            check_idxs = np.where(
                (bcontrol_filetimes > mintime) & 
                (bcontrol_filetimes < maxtime))[0]
            
            # Iterate through the found files until a sufficient number
            # of tones have been found
            n_found_tones = 0
            found_files = []
            for check_idx in check_idxs:
                # Load file
                filename = allfiles[check_idx]
                tl = myutils.ToneLoader(filename)
                
                # Skip if WN
                if not np.all(tl.tones == 0):
                    found_files.append(filename)
                    n_found_tones += len(tl.tones)
                
                # Break if enough found
                if n_found_tones >= n_tones:
                    break

            # Output debugging info
            print("I found %d tones in %d files" % (
                n_found_tones, len(found_files)))
            if n_found_tones < n_tones:
                print("insufficient tones found ... try increasing start delta")
            
            # More debugging info about first file
            print("Using general offset of " + str(TR_NDAQ_offset_sec) + " ....")
            idx1 = np.where(allfiles == found_files[0])[0]
            offsets = bcontrol_filetimes[idx1-1:idx1+2] - ns5_startime
            poffsets1 = [offset.seconds if offset > datetime.timedelta(0) 
                else -(-offset).seconds for offset in offsets]
            print("First file (prev,curr,next) offsets from start: %d %d %d" % \
                (poffsets1[0], poffsets1[1], poffsets1[2]))
            
            # And last file
            idx1 = np.where(allfiles == found_files[-1])[0]
            offsets = bcontrol_filetimes[idx1-1:idx1+2] - ns5_stoptime
            poffsets2 = [offset.seconds if offset > datetime.timedelta(0) 
                else -(-offset).seconds for offset in offsets]
            print("Last file (prev,curr,next) offsets from stop: %d %d %d" % \
                (poffsets2[0], poffsets2[1], poffsets2[2]))

            # Now put in forward order
            bcontrol_files = np.asarray(found_files)
            
            # Debugging output
            print("Like these results? Here's how to replicate:")
            print("<speakercal_files>")
            for bcf in bcontrol_files:
                print(os.path.split(bcf)[1])
            print("</speakercal_files>")
            print("clock_offset='%d' start_offset='%d %d %d' stop_offset='%d %d %d'" % (
                TR_NDAQ_offset_sec, 
                poffsets1[0], start_offset, poffsets1[1], 
                poffsets2[1], stop_offset, poffsets2[2]))
        
        # Add to RS
        if bcontrol_files is not None:
            for file in bcontrol_files:
                rs.add_file(file)
    
        # Now that we've settled on a canonical bcontrol file ordering,
        # dump tones and attens
        tls = [myutils.ToneLoader(file) for file in bcontrol_files]
        tones = np.concatenate([tl.tones for tl in tls])
        attens = np.concatenate([tl.attens for tl in tls])  
        np.savetxt(tone_filename, tones)
        np.savetxt(atten_filename, attens, fmt='%d')
        

    if plot_spectrograms:
        tones = np.loadtxt(tone_filename)
        attens = np.loadtxt(atten_filename, dtype=np.int)
        
        # verify timestamps
        timestamps = rs.read_timestamps()
        if len(timestamps) < len(tones):
            print("warning not enough timestamps, discarding tones: " + \
                "%d timestamps but %d tones" % (
                len(timestamps), len(tones)))
            tones = tones[:len(timestamps)]
            attens = attens[:len(timestamps)]
        elif len(timestamps) > len(tones):
            print("warning too many timestamps, provide more tones: " + \
                "%d timestamps but %d tones" % (
                len(timestamps), len(tones)))
        
        # check spectrograms
        # plot debugging spectrograms of audio
        l = rs.get_ns5_loader()
        raw = l.get_chunk_by_channel()
        ain135 = raw[135]
        ain136 = raw[136]
        
        # Spectrogrammer object
        sg = myutils.Spectrogrammer(NFFT=1024, Fs=30e3, max_freq=30e3, 
            min_freq=0, noverlap=512, downsample_ratio=1)
        
        # Fake toneloader to calculate aliased tones
        tl = myutils.ToneLoader()
        tl.tones = tones
        aliased_tones = tl.aliased_tones()
        
        for n in range(0, len(tones) - 5, 200):
            ts = timestamps[n]
            known_tones = aliased_tones[n:n+5]
            slc1 = ain135[ts:ts+30e3]
            slc2 = ain136[ts:ts+30e3]
            
            # Transform and plot
            Pxx, freqs, t = sg.transform(np.mean([slc1, slc2], axis=0))
            myutils.my_imshow(Pxx, t, freqs)
            plt.axis('auto')
            
            # Title with known tones
            plt.title('tl%d %0.1f %0.1f %0.1f %0.1f %0.1f' % (
                n, known_tones[0], known_tones[1], known_tones[2], 
                known_tones[3], known_tones[4]))
            
            # Save to RS
            plt.savefig(os.path.join(rs.full_path, 'tones_%d.png' % n))
            plt.close()
        
        if break_at_spectrograms:
            old_div(1,0)

    # put in neural db (does nothing if exists unless forced)
    printnow('putting neural data')
    rs.put_neural_data_into_db(verbose=True, force=force_put_neural_data)

    # plot averages
    if do_avg_plots:
        printnow("avg plots")
        rswrap.plot_avg_lfp(rs, savefig=True)
        rswrap.plot_avg_audio(rs, savefig=True)

    # spike extract
    if do_extract_spikes:
        printnow('extracting spikes')
        rs.generate_spike_block(CAR=CAR, smooth_spikes=False, verbose=True)
        rs.run_spikesorter(save_to_db=True, save_to_klusters=save_to_klusters,
            detection_kwargs=detection_kwargs)
        rs.spiketime_dump()

    # plot MUA stuff
    if do_MUA_grand_plot:
        printnow('mua grand psths')        
        rswrap.plot_all_spike_psths(rs, savefig=True)
        
    
    # make a tuning curve
    if do_tuning_curve:
        # extract tones and attens from each
        tones = np.loadtxt(tone_filename)
        attens = np.loadtxt(atten_filename, dtype=np.int)
        if len(timestamps) < len(tones):
            print("warning not enough timestamps, discarding tones: " + \
                "%d timestamps but %d tones" % (
                len(timestamps), len(tones)))
            tones = tones[:len(timestamps)]
            attens = attens[:len(timestamps)]
        elif len(timestamps) > len(tones):
            print("warning too many timestamps, provide more tones: " + \
                "%d timestamps but %d tones" % (
                len(timestamps), len(tones)))        
        
        
        # parameters for tuning curve
        tc_freqs = 10 ** np.linspace(np.log10(5e3), np.log10(50e3), 15)
        tc_attens = np.unique(attens)

        # Determine which bin each trial belongs to
        tone_freq_bin = np.searchsorted(tc_freqs, tones, side='right') - 1
        tone_atten_bin = np.searchsorted(tc_attens, attens, side='right') - 1
        
        # spike count for each trial
        group = 5
        spike_time_file = os.path.join(rs.last_klusters_dir(),
            '%s.res.%d' % (rs.session_name, group))
        spike_times = np.loadtxt(spike_time_file, dtype=np.int)
        timestamps = rs.read_timestamps()
        spike_counts = count_within_window(timestamps, spike_times,
            .005*30e3, .030*30e3)
        
        # reshape into tuning curve
        tc_mean = np.zeros((len(tc_attens), len(tc_freqs) - 1))
        tc_std = np.zeros((len(tc_attens), len(tc_freqs) - 1))        
        tc_median = np.zeros((len(tc_attens), len(tc_freqs) - 1))        
        for n, tc_freq in enumerate(tc_freqs[:-1]):
            for m, tc_atten in enumerate(tc_attens):
                # Which tones go into this bin
                tone_idxs = np.where(
                    (tone_freq_bin == n) & (tone_atten_bin == m))[0]
                if len(tone_idxs) == 0:
                    print("none in this bin %f %d" % (tc_freq, tc_atten))
                    continue        
                
                tc_mean[m, n] = np.mean(spike_counts[tone_idxs])
                tc_median[m, n] = np.median(spike_counts[tone_idxs])
                tc_std[m, n] = np.std(spike_counts[tone_idxs])
        
        # plot
        np.savez('data', tc_mean=tc_mean, tc_freqs=tc_freqs, tc_attens=tc_attens)
        myutils.my_imshow(tc_mean, tc_freqs, tc_attens, cmap=plt.cm.Purples)
        plt.axis('tight')
        plt.colorbar()
        myutils.my_imshow(tc_median, tc_freqs, tc_attens, cmap=plt.cm.Purples)
        plt.colorbar()
        myutils.my_imshow(tc_std, tc_freqs, tc_attens, cmap=plt.cm.gray)
        plt.colorbar()
        plt.show()
    
    return rs
コード例 #37
0
 def WriteForces(self, Input):
     cd = []
     cl = []
     try:
         os.remove('surfaceData.dat')
     except OSError:
         pass
     try:
         os.remove('IntsurfaceData.txt')
     except OSError:
         pass
     ExpectedDragCoeff = 0.00290
     f = open('surfaceData.dat', 'a')
     headerSurfaceData = "variables=X REX CF"
     f.write(headerSurfaceData + '\n')
     for block, value in self.BCList.items():
         for face, BC in value.items():
             if BC == -5:
                 #print FaceData[block][face]['']
                 PDiff = (self.FaceData[block][face]['Pressure'] -
                          self.PRef)
                 x = 0.5 * (self.FaceData[block][face]['x'][1:, 0] +
                            self.FaceData[block][face]['x'][:-1, 0])
                 nx = self.FaceData[block][face]['jn'][:, :, 0]
                 ny = self.FaceData[block][face]['jn'][:, :, 1]
                 nz = self.FaceData[block][face]['jn'][:, :, 2]
                 Area = self.FaceData[block][face]['jArea'][:, :, 0]
                 dudx = self.FaceData[block][face]['Dudx']
                 dudy = self.FaceData[block][face]['Dudy']
                 dudz = self.FaceData[block][face]['Dudz']
                 dvdx = self.FaceData[block][face]['Dvdx']
                 dvdy = self.FaceData[block][face]['Dvdy']
                 dvdz = self.FaceData[block][face]['Dvdz']
                 dwdx = self.FaceData[block][face]['Dwdx']
                 dwdy = self.FaceData[block][face]['Dwdy']
                 dwdz = self.FaceData[block][face]['Dwdz']
                 delv = dudx + dvdy + dwdz
                 mu = self.FaceData[block][face]['Mu']
                 txx = mu * ((dudx + dudx) - 2.0 * delv / 3.0)
                 tyy = mu * ((dvdy + dvdy) - 2.0 * delv / 3.0)
                 tzz = mu * ((dwdz + dwdz) - 2.0 * delv / 3.0)
                 txy = mu * ((dudy + dvdx))
                 tyz = mu * ((dwdy + dvdz))
                 txz = mu * ((dudz + dwdx))
                 tyx = txy
                 tzy = tyz
                 tzx = txz
                 Fx = (txx * nx + txy * ny + txz * nz)
                 Fy = (tyx * nx + tyy * ny + tyz * nz)
                 Fz = (tzx * nx + tzy * ny + tzz * nz)
                 Fn = Fx * nx + Fy * ny + Fz * nz
                 Fwallx = Fx - Fn * nx
                 Fwally = Fy - Fn * ny
                 Fwallz = Fz - Fn * nz
                 cf_x = Fwallx / self.DynamicVelocity
                 cf_y = Fwally / self.DynamicVelocity
                 cf_z = Fwallz / self.DynamicVelocity
                 Fwall = np.sqrt(
                     np.power(Fwallx, 2) + np.power(Fwally, 2) +
                     np.power(Fwallz, 2))
                 cp = PDiff / self.DynamicVelocity
                 cp_x = -cp * nx * Area
                 cp_y = -cp * ny * Area
                 cp_z = -cp * nz * Area
                 cd.append(
                     np.sum((Fx * self.nDrag[0] + Fy * self.nDrag[1] +
                             Fz * self.nDrag[2]) * Area /
                            self.DynamicVelocity))
                 cd.append(
                     np.sum(cp_x * self.nDrag[0] + cp_y * self.nDrag[1] +
                            cp_y * self.nDrag[2]))
                 cl.append(
                     np.sum((Fx * self.nLift[0] + Fy * self.nLift[1] +
                             Fz * self.nLift[2]) * Area /
                            self.DynamicVelocity))
                 cl.append(
                     np.sum(cp_x * self.nLift[0] + cp_y * self.nLift[1] +
                            cp_z * self.nLift[2]))
                 #print Cf_x.shape, Fwall.shape, nDrag[0].shape
                 Rex = self.RhoRef * self.Velocity * x / self.MuRef
                 #np.savetxt(f, np.c_[x, Rex, cp, cf_x, cf_y], delimiter="  ")
                 np.savetxt(f, np.c_[x, Rex, cf_x], delimiter="  ")
     CD = sum(cd) / (2 * 0.04)
     Difference = np.abs((ExpectedDragCoeff - CD) * 100 / ExpectedDragCoeff)
     print(" ------ Turbulent Test case: Flat plate ------ ")
     print(" Flux Scheme        : " + Input.SchemeDict['FluxScheme'])
     print(" Higher order method: " + Input.SchemeDict['FaceScheme'])
     print(" Turbulence model   : " + Input.SchemeDict['TurbulenceModel'])
     print(" Expected drag coeffcient    : " +
           "{:.3E}".format(ExpectedDragCoeff))
     print(" Calculated drag coefficient : " + "{:.3E}".format(CD))
     print(" Difference                  : " + "{:.3E}".format(Difference) +
           " %")
     print(" Allowed Tolerance           : 2 %")
     if Difference < 2:
         print("------------ >>> Test Passed  <<< --------------")
     else:
         print("------------ xxx Test Failed  xxx --------------")
コード例 #38
0
ファイル: gw_script.py プロジェクト: catfather10/gw
def generateSample(name,sampleSize,detector,mergerRateFun,massFName,\
                    saveFlag=True,savePath=''):
    print('########## generateSample ##########')
    print(detector)
    startTime = time.time()  
    print('massFName',massFName)
    fNameToCheck="mass/"+massFName+".txt"
    my_file = Path(fNameToCheck)
    if (not my_file.is_file()):
        print(os.getcwd())
        print("nie znaleziono pliku z masami")
        return -1
    else:
        print('znaleziono plik z masami')
        global massesData
        massesData=np.loadtxt(fNameToCheck)

        
    zMaxGeneral=zMaxGet(detector,max(np.loadtxt('mass/'+massFName+'.txt')))
    if (zMaxGeneral>10):
        zMaxGeneral=10
    print('zMaxGeneral',zMaxGeneral)
    
    DNSonlyZ=lambda z: binaryDistribution(z,mergerRateFun,zMaxGeneral)
    normGeneral=integrate.quad(DNSonlyZ,0,zMaxGeneral)[0]
    toMinimize=minimize(lambda z:-1*DNSonlyZ(z)/normGeneral,0.01,bounds=((0,zMaxGeneral),))
    probMaxGeneral=-1*toMinimize.fun[0]
    zPDF=lambda z:binaryDistribution(z,mergerRateFun,zMaxGeneral)/normGeneral
    vectzPDF=np.vectorize(zPDF)


    ## nowy invserse sampling
    zToInv=np.linspace(0,zMaxGeneral,num=1000,endpoint=True)
    yToInv=vectzPDF(zToInv)
    integs, yy=[], []
    for i in range(len(zToInv)):
        integs.append(integrate.simps(yToInv[:i+1],zToInv[:i+1])) ## tablicowanie CDF
    zCDF=interp1d(zToInv,integs)
     
    zzz=np.linspace(0,zMaxGeneral,1e5,endpoint=True)
    zz=np.linspace(0,zMaxGeneral,num=1e3)
    ts=np.linspace(0,1,num=1e3,endpoint=False)

    for t in ts:
        toSolve =lambda z: zCDF(z)-t
        yy.append(brentq(toSolve,a=0,b=zMaxGeneral,disp=True)) ### tablicowanie invCDF
    yy.append(zMaxGeneral)
    ts=np.append(ts,1)
    invCDF=interp1d(ts,yy)
    sampler = lambda :invCDF(rng())

    tohist=[]
    for i in range(10**5):
        tohist.append(sampler())
        """
    import matplotlib.pyplot as plt
    plt.hist(tohist,bins=100,normed=True)
    plt.plot(zz,vectzPDF(zz)/integrate.simps(vectzPDF(zz),zz))
    plt.xlabel('z')
    plt.savefig(name+'.png',dpi=300)
    plt.show()
   """
    #if(customSamplerEnvelope==False):
    #    print('MonteCarloSampler')
    #    sampler=lambda pdf: MonteCarloSampling(pdf,(0,zMaxGeneral+.1),(0,probMaxGeneral+.1))
    #else:
    #    print("using envelope rejection sampling")
    #    sampler= lambda pdf:rejectionEnvelopeSampling(pdf,customSamplerEnvelope)
    
    draws,k,current,last,SNRs,sampleData=0,0,0,0,[],[]
    while(k<sampleSize):
        draws+=1
        randomSample=randNS(detector,sampler)
        tempSNR=randomSample[0]
        if(tempSNR>8):
            SNRs.append(tempSNR)
            sampleData.append(randomSample)
            k+=1
            if(current!=last):
                print('\rComplete: '+str(int(k/sampleSize*100))+"%",end="")
                last=current
            current=int(k/sampleSize*100)
            
    print("draws= "+str(draws)+" draws/samplesize= "+str((draws/k))) 
    SNRs,sampleData=np.array(SNRs),np.array(sampleData)
    if(saveFlag):
        if(savePath!=''):
                oldPath=os.getcwd()
                os.chdir(savePath)
        if(not os.path.isdir('SNR')):
                os.mkdir('SNR')
        if(not os.path.isdir('SNR/'+detector)):
                os.mkdir('SNR/'+detector)
        np.savetxt("SNR/"+detector+"/"+name+detector+"_sample"+str(sampleSize)+".gz",sampleData)
        os.chdir(oldPath)
        #np.savetxt("SNR/"+detector+"/"+name+detector+"_sample"+str(sampleSize)+".txt",SNRs)
    doneTime = time.time()
    print('Done in this many minutes: '+str((doneTime-startTime)/60))
    massesData=0
コード例 #39
0
ファイル: feature_match.py プロジェクト: stefanv/supreme
if stack:
    pairs = np.array(correspondences)
    print '%d correspondences found' % len(pairs)
    if len(pairs) <= 4:
        raise RuntimeError('Not enough correspondences to do H-matrix'
                           'estimation.')
    M, converged = supreme.register.sparse(pairs[:, 1, 0],
                                           pairs[:, 1, 1],
                                           pairs[:, 0, 0],
                                           pairs[:, 0, 1],
                                           mode=registration_method,
                                           confidence=RANSAC_confidence)
    #                                           inliers_required=len(pairs)*0.8)
    print np.array2string(M, separator=', ')
    print "Also writing transformation matrix to /tmp/H.H."
    np.savetxt('/tmp/H.H', M)

    if refine_using_MI:
        # Estimate parameters from M
        s = np.sqrt(M[0, 0]**2 + M[1, 0]**2)
        theta = np.arccos(M[0, 0] / s)
        p = [theta, s, s, M[0, 2], M[1, 2]]
        M, S = supreme.register.dense_MI(img0.astype(np.uint8),
                                         img1.astype(np.uint8),
                                         p=p,
                                         levels=1)
        print np.array2string(M, separator=', ')

    if show_features:
        plt.subplot(2, 1, 2)
        stack = supreme.register.stack.with_transform((imgc0, imgc1),
コード例 #40
0
ファイル: lopatin.py プロジェクト: daimessdn/py_lopatin
    ax1.scatter(umur, ketebalan[i], s=400)  # scatter plot

# posisi label sumbu primer
ax1.set_xlabel("umur (dalam juta tahun)", fontsize=20)
ax1.xaxis.set_label_position('top')
ax1.set_ylabel("kedalaman", fontsize=20)
ax1.set_ylim(bottom=0, top=maximum)

ax1.invert_yaxis()

ax1.tick_params(axis="x", labelsize=20)
ax1.tick_params(axis="y", labelsize=20)
ax2.tick_params(axis="y", labelsize=20)

np.savetxt("csv/file_name.csv",
           np.row_stack((umur, ketebalan)),
           delimiter=",",
           fmt='%s')
np.savetxt("csv/temperature.csv",
           np.row_stack((umur, temperature)),
           delimiter=",",
           fmt='%s')

# np.savetxt("csv/tt_index.csv", TTI, delimiter=",", fmt='%s')

plt.title("Lopatin Burial History", fontsize=20)
ax1.legend(bbox_to_anchor=(1, 0), ncol=4, prop={'size':
                                                20})  # menampilkan legenda
fig.tight_layout()
plt.xlim(min(umur), max(umur))
ax1.invert_xaxis()
ax2.invert_xaxis()
コード例 #41
0
        #qmax = int(float(qmax)) + 20
        #print os.path.join(subdir, file)

        if file.endswith('.dat'):
            iteration = re.findall(r'\d+', file)

            if len(iteration) == 0:
                continue

            iteration = int(iteration[0])
            if qmax in result:
                if iteration > result[qmax]:
                    result[qmax] = iteration
            else:
                result[qmax] = iteration

#print(result)

q, it = [], []
for i in sorted(result.keys()):
    q.append(i)
    it.append(result[i])

print(zip(q, it))

numpy.savetxt('K_simulation.dat',
              numpy.array(zip(q, it), dtype=int),
              fmt='%i',
              header="q_max * 100 [1/Ang]\tmax_iteration [1]",
              delimiter="\t")
コード例 #42
0
ファイル: MAB.py プロジェクト: vaswanis/randucb
        (BetaBandit, {"a_plus_b": 16}, 0.1, "Beta (hard)")    
    ]

    for env_def in environments:
        env_class, env_params, max_gap, env_name = env_def[0], env_def[1], env_def[2], env_def[-1]
        print("================== running environment", env_name, "==================")
    
        envs = []
        for run in range(num_runs):

            np.random.seed(run)

            mu = max_gap * np.random.rand(K) + (0.5 - max_gap/2)
            envs.append(env_class(mu, seed=run, **env_params))
    
            res_dir = os.path.join(base_dir, env_name)
            os.makedirs(res_dir, exist_ok=True)

        for alg_def in algorithms:
    
            alg_class, alg_params, alg_name = alg_def[0], alg_def[1], alg_def[-1]        

            fname = os.path.join(res_dir, alg_name)
            if os.path.exists(fname):
                print('File exists. Will load saved file. Moving on to the next algorithm')
            else:
                regret, _ = evaluate(alg_class, alg_params, envs, n)
                cum_regret = regret.cumsum(axis=0)

                np.savetxt(fname, cum_regret, delimiter=",")
コード例 #43
0
def main():
    start = time.time()
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=5,
                        metavar='N',
                        help='number of epochs to train (default: 5)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=True,
                        help='For Saving the current Model')
    parser.add_argument('--file-name',
                        type=str,
                        default='test_' + str(int(start))[-3:],
                        metavar='filename',
                        help='Name of file to store model and losses')
    parser.add_argument(
        '--quant-type',
        type=str,
        default='none',
        metavar='qtype',
        help='Type of quantisation used on activation functions')
    parser.add_argument('--bit-res',
                        type=int,
                        default=4,
                        metavar='bitres',
                        help='Bit resolution of activation funtion')
    args = parser.parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    qt = args.quant_type
    if qt == 'dumb':
        model = DumbNet().to(device)
        print("Building dumb {0} bit network".format(args.bit_res))
    elif qt == 'lit':
        model = LitNet().to(device)
        print("Building LIT {0} bit network".format(args.bit_res))
    else:
        model = Net().to(device)
        print("\nBuilding full resolution network")

    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)

    losses_train = np.zeros((args.epochs))
    losses_test = np.zeros((args.epochs))

    start = time.time()

    for epoch in range(1, args.epochs + 1):
        epoch_train_loss = train(args, model, device, train_loader, optimizer,
                                 epoch)
        epoch_test_loss = test(args, model, device, test_loader)
        losses_train[epoch - 1] = epoch_train_loss
        losses_test[epoch - 1] = epoch_test_loss
        current_time = time.time() - start
        print('\nEpoch: {:d}'.format(epoch))
        print('Training set loss: {:.6f}'.format(epoch_train_loss))
        print('Test set loss: {:.6f}'.format(epoch_test_loss))
        print('Time taken: {:.6f}s'.format(current_time))

    if (args.save_model):
        if not os.path.exists('models'):
            os.mkdir('models')
        torch.save(model.state_dict(), 'models/' + args.file_name + '.pt')
        if not os.path.exists('data'):
            os.mkdir('data')
        losses = np.stack((losses_train, losses_test), axis=1)
        np.savetxt('data/' + args.file_name + '.txt', losses, delimiter=', ')

    fig = plt.figure()
    ax = fig.gca()
    ax.set_title('Loss per Epoch')
    plt.plot(losses_train)
    plt.plot(losses_test)
    plt.ylabel('loss')
    plt.xlabel('epoch')
    blue_line = mpatches.Patch(color='blue', label='Training Loss')
    orange_line = mpatches.Patch(color='orange', label='Testing Loss')
    plt.legend(handles=[blue_line, orange_line])
    plt.show()
コード例 #44
0
temp_seq_mat2 = temp_seq_mat.toarray()

#we need a parameter for the effect of mutation at each base pair.
#we remove 20 base pairs to remove the barcode sequence on the end of the sequence.
len_seq = len(df.loc[0, 'seq'])
len_outputseq = len_seq - 20
len_barcode = 20
#We add 4 parameters for the barcode, because
total_params = len_outputseq + len_barcode * 4
seq_mat = np.zeros((temp_seq_mat2.shape[0], total_params))
for i in range(len_outputseq):
    seq_mat[:, i] = np.sum(temp_seq_mat2[:, i * 4:(i * 4 + 4)], axis=1)
seq_mat[:, len_outputseq:] = temp_seq_mat2[:, -len_barcode * 4:]
seq_mat = scipy.sparse.csr_matrix(seq_mat)
emat_0 = np.zeros((4, len_seq))
emat_0[:2, :len_outputseq] = utils.RandEmat(len_outputseq, 2)
emat_0[:, len_outputseq:] = utils.RandEmat(20, 4)
emat = MaximizeMI_memsaver(seq_mat,
                           df.copy(),
                           emat_0,
                           wtrow,
                           db=sys.argv[2],
                           iteration=600000,
                           burnin=1000,
                           thin=60,
                           runnum=0,
                           verbose=True,
                           temp=4200)

np.savetxt(sys.argv[3], emat)
コード例 #45
0
ファイル: shallow_water.py プロジェクト: jedbrown/sweet
		maxval = x.spectogrd(phispec).max()
		print("TIMESTEP "+str(ncycle)+"   "+str(maxval))
		#outputMinMaxSum(i_data, i_prefix):

		# RK2 time stepping
		if 0:
			(vrtdt, divdt, phidt) = timestep(vrtspec, divspec, phispec)
		else:
			(vrtdt, divdt, phidt) = timestep(vrtspec, divspec, phispec)
			(vrtdt, divdt, phidt) = timestep(vrtspec+0.5*dt*vrtdt, divspec+0.5*dt*divdt, phispec+0.5*dt*phidt)

		vrtspec += dt*vrtdt
		divspec += dt*divdt
		phispec += dt*phidt


	vrtg = x.spectogrd(vrtspec)
	ug,vg = x.getuv(vrtspec,divspec)
	phig = x.spectogrd(phispec)

	np.savetxt("vrt_final.csv", x.spectogrd(vrtspec), delimiter="\t")
	np.savetxt("div_final.csv", x.spectogrd(divspec), delimiter="\t")
	np.savetxt("phi_final.csv", x.spectogrd(phispec), delimiter="\t")

	outputMinMaxSum(vrtg, "vrtg")
	outputMinMaxSum(ug, "ug")
	outputMinMaxSum(vg, "vg")
	outputMinMaxSum(phig, "phig")

	sys.exit(1)
コード例 #46
0
        tn[i] += ((baseline != i) * (myresults != i)).sum()
        fp[i] += ((baseline != i) * (myresults == i)).sum()
        fn[i] += ((baseline == i) * (myresults != i)).sum()
    return tp, tn, fp, fn


if __name__ == '__main__':
    cf, vectorizer = gen_classifier()

    # for trump tweets
    # my_trump_senti = my_sentiment(cf, vectorizer, TRUMP_TWEETS_FILTERED_CLEANED)
    # trump_baseline_senti = baseline_sentiment(TRUMP_BASELINE_PREDICTION)

    # for hillary tweets
    my_hillary_senti = my_sentiment(cf, vectorizer, HILLARY_TWEETS_FILTERED_CLEANED)
    hillary_baseline_senti = baseline_sentiment(HILLARY_BASELINE_PREDICTION)
    tp, tn, fp, fn = compare_to_baseline(hillary_baseline_senti, my_hillary_senti)


    results = get_validation_summary(tp, tn, fp, fn, True)
    header = 'Label,Model(u),Freq,accuracy,precison,recall'

    ans = []
    for lb in results:
        r = results[lb]
        ans.append([lb, 0, 2, r[0], r[1], r[2]])
    numpy.savetxt('hillary_large_prediction_base_comp.csv', ans, fmt='%d,%d,%d,'+'%.10f,'*3, delimiter=',', header=header)



コード例 #47
0
for g in num.unique(group):
    i = num.where(group == g)
    ax.scatter(scatter_x[i], scatter_y[i], label=g)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * .9, box.height * .9])
ax.legend(loc='center_left', bbox_to_anchor=(1, 0.5))
plt.figure(figsize=(20, 20))
plt.show()

# This allows us to look deeper into what the clusters mean,
# by providing the summed important attributes for each cluster
att_freq, sizes = analyze_clusters(pred, np)

for i in range(len(att_freq)):
    savetxt("clus_" + str(i) + "_values.csv",
            att_freq[i],
            delimiter=',',
            fmt='%d')

savetxt("sizes.csv", sizes, delimiter=',')

# cls 0: 8, 2, 3, 4
# cls 1: 10
# cls 2: 1, 7
# cls 3: 9, 130
# cls 4: 6, 11

# cls 0: woods, courts, dogs, track
# cls 1: picnicArea
# cls 2: playground, pool
# cls 3: field
# cls 4: paths, natureArea
コード例 #48
0
plt.savefig(lossStr)
plt.clf()

#Show graph of validation and training accuracy
acc = hist.history['acc']
val_acc = hist.history['val_acc']
plt.plot(epochs, acc, color='red', label='Training acc')
plt.plot(epochs, val_acc, color='green', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
accStr = os.getcwd() + "accuracy.png"
plt.savefig(accStr)
plt.clf()

fileStr = os.getcwd() + "loss.json"
np_save = np.array(loss)
np.savetxt(fileStr, np_save, delimiter=",")
fileStr = os.getcwd() + "val_loss.json"
np_save = np.array(val_loss)
np.savetxt(fileStr, np_save, delimiter=",")
fileStr = os.getcwd() + "acc.json"
np_save = np.array(acc)
np.savetxt(fileStr, np_save, delimiter=",")
fileStr = os.getcwd() + "val_acc.json"
np_save = np.array(val_acc)
np.savetxt(fileStr, np_save, delimiter=",")

tf.keras.backend.clear_session()
コード例 #49
0
etc_colibri.information["lambda_end"] = 2.5
etc_colibri.information["lambda_step"] = 0.01

for band in filters:
    etc_colibri.information["filter_band"] = band
    if band in ['gri', 'g', 'r', 'i']:
        etc_colibri.information['channel'] = 'DDRAGO-B'
    elif band in ['zy', 'z', 'y']:
        etc_colibri.information['channel'] = 'DDRAGO-R'
    elif band in ['J', 'H']:
        etc_colibri.information['channel'] = 'CAGIRE'
    etc_colibri.sim()
    x = etc_colibri.information['wavelength_ang'] / 10
    y = etc_colibri.information['system_response']
    np.savetxt(path_pyGRBz + '/transmissions/colibri/' + '%s.txt' % band,
               np.array([x, y]).T,
               fmt='%.2f %.4f')

# # Define observational strategy

# In[5]:

# Load package
from colibri_obs_strategy.build_lc import obs_strategy

obs_strat = obs_strategy(resdir=resdir, plot=display_plot)

# In[6]:

#Choose filters for each channel.
# For each channel, the first list corresponds to the filters to use prior to detection.
コード例 #50
0
    random = Random(seed)

    # create some random data
    N = 10000

    # an array of random numbers from numpy
    x = np.random.rand(N)

    # an array of random numbers using our Random class
    myx = []
    for i in range(0, N):
        myx.append(random.Rayleigh(sigma))

    # create histogram of our data
    n, bins, patches = plt.hist(myx,
                                50,
                                density=False,
                                color='red',
                                alpha=0.75)

    # plot formating options
    plt.xlabel('Number')
    plt.ylabel('Counts')
    plt.title('Rayleigh Random Number Distribution: sigma = 0.5 - 5.5')
    plt.grid(True)

    plt.show()

    #save the array of random numbers to a .txt file
    np.savetxt("random_numbers.txt", myx)
コード例 #51
0
def postprocess(temperature=1., numResampleLogX=1, plot=True, loaded=[], \
			cut=0., save=True, zoom_in=True):
	if len(loaded) == 0:
		levels = np.atleast_2d(np.loadtxt("levels.txt"))
		sample_info = np.atleast_2d(np.loadtxt("sample_info.txt"))
		sample = np.atleast_2d(np.loadtxt("sample.txt"))
		#if(sample.shape[0] == 1):
		#	sample = sample.T
	else:
		levels, sample_info, sample = loaded[0], loaded[1], loaded[2]

	sample = sample[int(cut*sample.shape[0]):, :]
	sample_info = sample_info[int(cut*sample_info.shape[0]):, :]

	if sample.shape[0] != sample_info.shape[0]:
		print('# Size mismatch. Truncating...')
		lowest = np.min([sample.shape[0], sample_info.shape[0]])
		sample = sample[0:lowest, :]
		sample_info = sample_info[0:lowest, :]

	if plot:
		if numResampleLogX > 1:
			plt.ion()

		plt.figure(1)
		plt.plot(sample_info[:,0])
		plt.xlabel("Iteration")
		plt.ylabel("Level")
		if numResampleLogX > 1:
			plt.draw()

		plt.figure(2)
		plt.subplot(2,1,1)
		plt.plot(np.diff(levels[:,0]))
		plt.ylabel("Compression")
		plt.xlabel("Level")
		xlim = plt.gca().get_xlim()
		plt.axhline(-1., color='r')
		plt.ylim(ymax=0.05)
		if numResampleLogX > 1:
			plt.draw()

		plt.subplot(2,1,2)
		good = np.nonzero(levels[:,4] > 0)[0]
		plt.plot(levels[good,3]/levels[good,4])
		plt.xlim(xlim)
		plt.ylim([0., 1.])
		plt.xlabel("Level")
		plt.ylabel("MH Acceptance")
		if numResampleLogX > 1:
			plt.draw()

	# Convert to lists of tuples
	logl_levels = [(levels[i,1], levels[i, 2]) for i in xrange(0, levels.shape[0])] # logl, tiebreaker
	logl_samples = [(sample_info[i, 1], sample_info[i, 2], i) for i in xrange(0, sample.shape[0])] # logl, tiebreaker, id
	logx_samples = np.zeros((sample_info.shape[0], numResampleLogX))
	logp_samples = np.zeros((sample_info.shape[0], numResampleLogX))
	logP_samples = np.zeros((sample_info.shape[0], numResampleLogX))
	P_samples = np.zeros((sample_info.shape[0], numResampleLogX))
	logz_estimates = np.zeros((numResampleLogX, 1))
	H_estimates = np.zeros((numResampleLogX, 1))

	# Find sandwiching level for each sample
	sandwich = sample_info[:,0].copy().astype('int')
	for i in xrange(0, sample.shape[0]):
		while sandwich[i] < levels.shape[0]-1 and logl_samples[i] > logl_levels[sandwich[i] + 1]:
			sandwich[i] += 1

	for z in xrange(0, numResampleLogX):
		# For each level
		for i in range(0, levels.shape[0]):
			# Find the samples sandwiched by this level
			which = np.nonzero(sandwich == i)[0]
			logl_samples_thisLevel = [] # (logl, tieBreaker, ID)
			for j in xrange(0, len(which)):
				logl_samples_thisLevel.append(copy.deepcopy(logl_samples[which[j]]))
			logl_samples_thisLevel = sorted(logl_samples_thisLevel)
			N = len(logl_samples_thisLevel)

			# Generate intermediate logx values
			logx_max = levels[i, 0]
			if i == levels.shape[0]-1:
				logx_min = -1E300
			else:
				logx_min = levels[i+1, 0]
			Umin = np.exp(logx_min - logx_max)

			if N == 0 or numResampleLogX > 1:
				U = Umin + (1. - Umin)*np.random.rand(len(which))
			else:
				U = Umin + (1. - Umin)*np.linspace(1./(N+1), 1. - 1./(N+1), N)
			logx_samples_thisLevel = np.sort(logx_max + np.log(U))[::-1]

			for j in xrange(0, which.size):
				logx_samples[logl_samples_thisLevel[j][2]][z] = logx_samples_thisLevel[j]

				if j != which.size - 1:
					left = logx_samples_thisLevel[j+1]
				elif i == levels.shape[0]-1:
					left = -1E300
				else:
					left = levels[i+1][0]
				
				if j != 0:
					right = logx_samples_thisLevel[j-1]
				else:
					right = levels[i][0]

				logp_samples[logl_samples_thisLevel[j][2]][z] = np.log(0.5) + logdiffexp(right, left)

		logl = sample_info[:,1]/temperature

		logp_samples[:,z] = logp_samples[:,z] - logsumexp(logp_samples[:,z])
		logP_samples[:,z] = logp_samples[:,z] + logl
		logz_estimates[z] = logsumexp(logP_samples[:,z])
		logP_samples[:,z] -= logz_estimates[z]
		P_samples[:,z] = np.exp(logP_samples[:,z])
		H_estimates[z] = -logz_estimates[z] + np.sum(P_samples[:,z]*logl)

		if plot:
			plt.figure(3)
			if z == 0:
				plt.subplot(2,1,1)
				plt.plot(logx_samples[:,z], sample_info[:,1], 'b.', markersize=1, label='Samples')
				plt.hold(True)
				plt.plot(levels[1:,0], levels[1:,1], 'r.', label='Levels')
				plt.legend(numpoints=1, loc='lower left')
				plt.title('Likelihood Curve')
				plt.ylabel(r'$\log(L)$')

				# Use all plotted logl values to set ylim
				combined_logl = np.hstack([sample_info[:,1], levels[1:, 1]])
				combined_logl = np.sort(combined_logl)
				lower = combined_logl[int(0.07*combined_logl.size)]
				upper = combined_logl[-1]
				diff = upper - lower
				lower -= 0.05*diff
				upper += 0.05*diff
				if zoom_in:
					plt.ylim([lower, upper])

				if numResampleLogX > 1:
					plt.draw()
				xlim = plt.gca().get_xlim()

		if plot:
			plt.subplot(2,1,2)
			plt.hold(False)
			plt.plot(logx_samples[:,z], P_samples[:,z], 'b.', markersize=1)
			plt.ylabel('Posterior Weights')
			plt.xlabel(r'$\log(X)$')
			plt.xlim(xlim)
			if numResampleLogX > 1:
				plt.draw()

			plt.savefig('sinewaves_likelihood.pdf', bbox_inches='tight')

	P_samples = np.mean(P_samples, 1)
	P_samples = P_samples/np.sum(P_samples)
	logz_estimate = np.mean(logz_estimates)
	logz_error = np.std(logz_estimates)
	H_estimate = np.mean(H_estimates)
	H_error = np.std(H_estimates)
	ESS = np.exp(-np.sum(P_samples*np.log(P_samples+1E-300)))

	print("log(Z) = " + str(logz_estimate) + " +- " + str(logz_error))
	print("Information = " + str(H_estimate) + " +- " + str(H_error) + " nats.")
	print("Effective sample size = " + str(ESS))

	# Resample to uniform weight
	N = int(ESS)
	posterior_sample = np.zeros((N, sample.shape[1]))
	w = P_samples
	w = w/np.max(w)
	if save:
		np.savetxt('weights.txt', w) # Save weights
	for i in xrange(0, N):
		while True:
			which = np.random.randint(sample.shape[0])
			if np.random.rand() <= w[which]:
				break
		posterior_sample[i,:] = sample[which,:]
	if save:
		np.savetxt("posterior_sample.txt", posterior_sample)

	if plot:
		if numResampleLogX > 1:
			plt.ioff()
		plt.show()

	return [logz_estimate, H_estimate, logx_samples]
コード例 #52
0
		cv_count_errs.append(np.mean(cv_count_err))
		cv_rel_errs.append(np.mean(cv_rel_err))
	print('{}-{}: MCE {:.4f}, STD {:.4f}, MRE {:.4f}, STD {:.4f}'.format(method,dataset, np.mean(count_err), np.std(count_err), np.mean(rel_err), np.std(rel_err)))
	print('CV-{}-{}: MCE {:.4f}, STD {:.4f}, MRE {:.4f}, STD {:.4f}'.format(method,dataset, np.mean(cv_count_errs), np.std(cv_count_errs), np.mean(cv_rel_errs), np.std(cv_rel_errs)))
	print('{}-{}: counting time:{:.5f}'.format(method,dataset, np.mean(counting_times)))
	lines.append('{}-{}: MCE {:.4f}, STD {:.4f}, MRE {:.4f}, STD {:.4f}'.format(method,dataset, np.mean(count_err), np.std(count_err), np.mean(rel_err), np.std(rel_err)))
	cv_lines.append('CV-{}-{}: MCE {:.4f}, STD {:.4f}, MRE {:.4f}, STD {:.4f}'.format(method,dataset, np.mean(cv_count_errs), np.std(cv_count_errs), np.mean(cv_rel_errs), np.std(cv_rel_errs)))
	time_lines.append('{}-{}: counting time:{:.5f}'.format(method,dataset, np.mean(counting_times)))
	# save the error
	result_file = os.path.join(result_root_dir, method, dataset, 'err_summary.txt')
	with open(result_file, 'w+') as f:
		f.write('Count err summary:\n')
		for i, err in enumerate(count_err):
			f.write('{}: count err {:.4f}, Relative err {:.4f}\n'.format(tags[i], err, rel_err[i]))
	# save the count err and relative err
	np.savetxt(os.path.join(result_root_dir, method, dataset, 'count_err.txt'), count_err)
	np.savetxt(os.path.join(result_root_dir, method, dataset, 'rel_err.txt'), rel_err)

log_file = os.path.join(result_root_dir, method, '{}_summary.txt'.format(method))
with open(log_file, 'w+') as f:
	for line in lines:
		f.write(line+'\n')

log_file = os.path.join(result_root_dir, method, '{}_cv_summary.txt'.format(method))
with open(log_file, 'w+') as f:
	for line in cv_lines:
		f.write(line+'\n')

log_file = os.path.join(result_root_dir, method, '{}_time_summary.txt'.format(method))
with open(log_file, 'w+') as f:
	for line in time_lines:
コード例 #53
0
def saveSvm(svmDir, experimentName, svmWeights, svmBias, featureScale):
    svmWeightsPath, svmBiasPath, svmFeatScalePath = getSvmModelPaths(
        svmDir, experimentName)
    np.savetxt(svmWeightsPath, svmWeights)
    np.savetxt(svmBiasPath, svmBias)
    np.savetxt(svmFeatScalePath, featureScale)
コード例 #54
0
def translational_symmetry_divide(pathdir, filename, err_sym_divide_factor=-1):
    try:
        pathdir_weights = "results/NN_trained_models/models/"

        # load the data
        n_variables = np.loadtxt(pathdir + "/%s" % filename,
                                 dtype='str').shape[1] - 1
        variables = np.loadtxt(pathdir + "/%s" % filename, usecols=(0, ))

        if n_variables == 1:
            print(filename, "just one variable for ADD")
            # if there is just one variable you have nothing to separate
            return (0, 0, 0)
        else:
            for j in range(1, n_variables):
                v = np.loadtxt(pathdir + "/%s" % filename, usecols=(j, ))
                variables = np.column_stack((variables, v))

        f_dependent = np.loadtxt(pathdir + "/%s" % filename,
                                 usecols=(n_variables, ))
        f_dependent = np.reshape(f_dependent, (len(f_dependent), 1))

        factors = torch.from_numpy(variables)
        if is_cuda:
            factors = factors.cuda()
        else:
            factors = factors
        factors = factors.float()

        product = torch.from_numpy(f_dependent)
        if is_cuda:
            product = product.cuda()
        else:
            product = product
        product = product.float()

        # load the trained model and put it in evaluation mode
        if is_cuda:
            model = SimpleNet(n_variables).cuda()
        else:
            model = SimpleNet(n_variables)
        model.load_state_dict(torch.load(pathdir_weights + filename + ".h5"))
        model.eval()

        models_one = []
        models_rest = []

        with torch.no_grad():
            if rmse_loss(model(factors), product) > 0.01:
                return (0, 0, 0)

            a = 1.2
            # make the shift x->x*a and y->y*a for 2 variables at a time (different variables)
            for i in range(0, n_variables, 1):
                for j in range(0, n_variables, 1):
                    if i < j:
                        fact_translate = factors.clone()
                        fact_translate[:, i] = fact_translate[:, i] * a
                        fact_translate[:, j] = fact_translate[:, j] * a

                        if err_sym_divide_factor == -1:
                            error_threshold = 7 * rmse_loss(
                                model(factors), product)
                        else:
                            error_threshold = err_sym_divide_factor * rmse_loss(
                                model(factors), product)
                        print(filename, error_threshold)
                        error = torch.sqrt(
                            torch.mean((product - model(fact_translate))**
                                       2)) / torch.sqrt(torch.mean(product**2))

                        print("ERROR: ", abs(error))
                        if abs(error) < error_threshold:
                            file_name = filename + "-translated_divide"
                            data_translated = variables
                            data_translated[:,
                                            i] = variables[:, i] / variables[:,
                                                                             j]
                            data_translated = np.delete(data_translated,
                                                        j,
                                                        axis=1)
                            data_translated = np.column_stack(
                                (data_translated, f_dependent))
                            try:
                                os.mkdir("results/translated_data_divide/")
                            except:
                                pass
                            np.savetxt(
                                "results/translated_data_divide/" + file_name,
                                data_translated)
                            print("SUCCESS", i, j)
                            return (file_name, i, j)

    except Exception as e:
        print(e)
        return (0, 0, 0)

    return (0, 0, 0)
コード例 #55
0
def make_flagsummary_uvdist_data(myvis, nbin=25, output_folder="perfield_flagfraction",
                                 intent='*', overwrite=False):
    '''
    Make a binned flagging fraction vs. uv-distance.
    '''

    from casatools import ms

    from casatasks import flagdata

    myms = ms()

    myms.open(myvis)

    mymsmd = myms.metadata()

    # Get VLA antenna ID
    antenna_names = mymsmd.antennanames() #returns a list that corresponds to antenna ID

    # Get fields matching intent
    fieldsnums = mymsmd.fieldsforintent(intent)

    if len(fieldsnums) == 0:
        raise ValueError("No calibrator intents are in this MS.")

    fields = np.array(mymsmd.fieldnames())[fieldsnums]

    # Get SPWs
    spw_list = mymsmd.spwsforfield(fieldsnums[0])

    casalog.post(f"Selecting on fields: {fields}")
    print(f"Selecting on fields: {fields}")

    for field in fields:

        casalog.post(f"Creating uvdist flagging fraction for {field}")
        print(f"Creating uvdist flagging fraction for {field}")

        baseline_flagging_table = []

        save_name = f"{output_folder}/field_{field}_flagfrac_uvdist.txt"

        if os.path.exists(save_name) and overwrite:
            os.system(f"rm {save_name}")

        if not os.path.exists(save_name):

            for spw in spw_list:

                flag_dict = flagdata(vis=myvis, mode='summary', basecnt=True, action='calculate',
                                    field=field, spw=str(spw))

                # Make plot of flagging statistics

                # Get information for flagging percentage vs. uvdistance
                myms.selectinit()
                myms.selectchannel(1, 0, 1, 1) # look at data just for first channel - easily translates
                gantdata = myms.getdata(['antenna1','antenna2','uvdist']) # get the points I need

                # create adictionary with flagging info
                base_dict = create_baseline_dict(antenna_names, gantdata)

                # match flagging data to dictionary entry
                datamatch = flag_match_baseline(flag_dict['baseline'], base_dict)

                # 25 is the number of uvdist bins such that there is minimal error in uvdist.
                binned_stats, barwidth = bin_statistics(datamatch, nbin)

                spw_vals = [spw] * len(binned_stats[0])
                field_vals = [field] * len(binned_stats[0])

                baseline_flagging_table.append([field_vals, spw_vals, binned_stats[0], binned_stats[1]])

            baseline_flagging_table_hstack = np.hstack(baseline_flagging_table).T

            out_table = np.zeros(baseline_flagging_table_hstack.shape[0],
                                dtype=[("field", 'U32'),
                                        ('spw', int),
                                        ('uvdist', float),
                                        ('frac', float)])

            out_table['field'] = baseline_flagging_table_hstack[:, 0].astype('U32')
            out_table['spw'] = baseline_flagging_table_hstack[:, 1].astype(int)
            out_table['uvdist'] = baseline_flagging_table_hstack[:, 2].astype(float)
            out_table['frac'] = baseline_flagging_table_hstack[:, 3].astype(float)

            np.savetxt(save_name, out_table, fmt='%s %d %f %f', header="field,spw,uvdist,frac")


    mymsmd.close()
    myms.close()
コード例 #56
0
ファイル: ISIs_data.py プロジェクト: ModelDBRepository/189153
    result = []
    for l in lista:
        if l.endswith('spine_plasticity_spine_1_head_Ca1.txt'):
            result.append(l)

    return result
def find_isi(fname):
    return float(fname.split('ISI_')[-1].split('_')[0])
if __name__ == '__main__':
    for fname_base in fname_bases:
        fnames = get_fnames(fname_base)
        fnames.sort()
        res = np.zeros((len(fnames),5))
        for i,fname in enumerate(fnames):
            print fname
            res[i,0] = find_isi(fname)
            f = open(fname)
            header = f.readline().split()
            data = np.loadtxt(f)
            which = [j for j,x in enumerate(header) if 'weight' in x][0]
            which2 = [j for j,x in enumerate(header) if 'Ca' in x][0]
            dt = data[1,0]-data[0,0]
            long_hi,long_lo = duration(data[:,which2],dt)
            res[i,1] = data[-1,which]
            res[i,2] =  data[:,which2].max()
            res[i,3] = long_hi
            res[i,4] = long_lo
        #print res
        print fname_base+'s.txt'
        np.savetxt(fname_base+'s.txt',res,comments='',header='ISI weight')
コード例 #57
0
 i = 0
 min1 = 4.0008
 max1 = 88.9919
 min2 = -14.1366
 max2 = 16.4287
 min3 = 0
 max3 = 28.1513
 min4 = 0
 max4 = 28.1513
 data = sio.loadmat('trainset150000sample.mat')
 trainset = data['trainset']
 data1 = sio.loadmat('aimset150000sample.mat')
 aimset = data1['aimset']
 model = build_model()  # Build Model(3 layers, LSTM)
 hist = model.fit(trainset,
                  aimset,
                  nb_epoch=500,
                  batch_size=100,
                  verbose=2,
                  validation_split=0.2)
 history = hist.history.items()
 mkdir('result0827')
 model.save_weights('result0827\50vprediction150000samplev1.h5')
 data2 = sio.loadmat('valid10.mat')
 validset = data2['valid']
 prediction = model.predict(validset)
 result_file = os.path.join(os.getcwd(), 'result0827')
 result_file = os.path.join(result_file,
                            '150000sample50vprediction10to1v1.txt')
 numpy.savetxt(result_file, prediction)
コード例 #58
0
def make_flagsummary_freq_data(myvis, output_folder='perfield_flagfraction',
                               intent="*", overwrite=False):
    '''
    This mimics the summary plots made by flagdata, but removes the interactive
    part so we can save it.
    '''

    from casatools import ms

    from casatasks import flagdata

    myms = ms()

    myms.open(myvis)

    mymsmd = myms.metadata()

    fieldsnums = mymsmd.fieldsforintent(intent)

    if len(fieldsnums) == 0:
        raise ValueError("No calibrator intents are in this MS.")

    fields = np.array(mymsmd.fieldnames())[fieldsnums]

    spw_nums = mymsmd.spwsforscan(1)

    casalog.post(f"Selecting on fields: {fields}")
    print(f"Selecting on fields: {fields}")

    for field in fields:

        casalog.post(f"Creating freq. flagging fraction for {field}")
        print(f"Creating freq. flagging fraction for {field}")

        save_name = f"{output_folder}/field_{field}_flagfrac_freq.txt"

        if os.path.exists(save_name) and overwrite:
            os.system(f"rm {save_name}")

        if not os.path.exists(save_name):

            flag_dict = flagdata(vis=myvis, mode='summary', spwchan=True, action='calculate',
                                field=field)

            flag_data = []

            for spw in spw_nums:
                spw_freqs = mymsmd.chanfreqs(spw) / 1e9  # GHz

                spw_flagfracs = []
                for chan in range(len(spw_freqs)):
                    spw_flagfracs.append(flag_dict['spw:channel'][f"{spw}:{chan}"]['flagged'] / flag_dict['spw:channel'][f'{spw}:{chan}']['total'])

                # Make an equal length SPW column
                spw_labels = [spw] * len(spw_freqs)

                flag_data.append([spw_labels, np.arange(len(spw_freqs)), spw_freqs, spw_flagfracs])

            output_data = np.hstack(flag_data).T

            np.savetxt(save_name, output_data, header="spw,channel,freq,frac")

        else:
            casalog.post(message="File {} already exists. Skipping".format(save_name),
                         origin='make_qa_tables')


    mymsmd.close()
    myms.close()
コード例 #59
0
def writeFGAFile(data, radius, resStep, padWidth=1):
    # Set the size of the box and
    # the resolution step...user inputs.
    # Also calculate diameter in res units
    DRes = int(2 * radius / resStep + 1)
    print('DRes:', DRes)
    # Set scaling values
    windScale = 30
    gravScale = 30

    allVariables = data.variables

    # Sometimes we have time_bnds, lat_bnds, etc.
    # Keep anything that doesn't have 'bnds'
    varNames = list(
        filter(lambda x: 'bnds' not in x, list(allVariables.keys())))
    # Remove the dimensions
    varNames = list(filter(lambda x: x not in data.dimensions, varNames))

    latPoints = allVariables['latitude'][:] * np.pi / 180
    latStep = latPoints[1] - latPoints[0]

    lonPoints = allVariables['longitude'][:] * np.pi / 180
    lonStep = lonPoints[1] - lonPoints[0]

    numLatPoints = len(latPoints)
    numLonPoints = len(lonPoints)

    #### For horizontal vector field ###
    # numLatPoints = 10
    # numLonPoints = 10
    #
    # directions = np.zeros(numLatPoints * numLonPoints).reshape((numLatPoints, numLonPoints))
    # u = np.cos(directions)
    # v = np.sin(directions)
    # plt.quiver(u, v)
    # plt.show()
    #
    # latPoints = np.arange(-90 + 180 / (numLatPoints + 1),
    #                       90 - 180 / (numLatPoints + 1) + 0.01,
    #                       180 / (numLatPoints + 1)) * np.pi / 180
    # lonPoints = np.arange(0, 360 - 360 / numLonPoints, 360 / numLonPoints)

    u, v = allVariables[varNames[0]][0], allVariables[varNames[1]][0]
    # print(u[:2], v[:2])
    # plt.quiver(lonPoints * 180 / np.pi, latPoints * 180 / np.pi, u, v)
    # plt.savefig('plotDownSample.png', dpi=300)
    # plt.show()

    ################# TRANSFORM TO 3D VECTORS ##################
    print('Transforming to 3D vectors...')
    vects3D = np.zeros((numLatPoints, numLonPoints, 3))
    vects3D[:, :, 0] = u
    vects3D[:, :, 1] = v
    # print('Initial 3D vectors:', vects3D)
    # Make matrix of exact locations of the vectors
    exactVectLocs = np.zeros((numLatPoints, numLonPoints, 3))

    ### Rotate each 2D vector to its 3D counterpart
    for i, lat in enumerate(latPoints, 0):
        for j, lon in enumerate(lonPoints, 0):
            # Find the rotation matrix...look up on Wikipedia
            # It is a pair of two transformations, one about
            # the z-axis for proper latitude orientation,
            # and one about the y-axis for proper
            # longitude orientation.
            rotMatrix = np.dot(
                [[np.cos(lon), -np.sin(lon), 0], [np.sin(lon),
                                                  np.cos(lon), 0], [0, 0, 1]],
                # pi / 2 - lat gives you phi
                [[np.cos(np.pi / 2 - lat), 0,
                  np.sin(np.pi / 2 - lat)], [0, 1, 0],
                 [-np.sin(np.pi / 2 - lat), 0,
                  np.cos(np.pi / 2 - lat)]])
            # Before we rotate, we need to convert to a "top-down" view,
            # so the +x-axis will point down and the +y-axis will point right.
            # (x,y,0) ==> (-y,x,0)
            np.dot(rotMatrix,
                   np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]]),
                   out=rotMatrix)
            # Apply the entire rotation matrix
            # print(vects3D[i,j], '\t\t==> ', end='')
            vects3D[i, j] = np.dot(rotMatrix, vects3D[i, j])
            # print(vects3D[i,j], '\t\t==> ', end='')

            ### Calculate the 2d point. Basically spherical coordinates
            exactVectLocs[i, j] = radius * np.array([
                np.sin(np.pi / 2 - lat) * np.cos(lon),
                np.sin(np.pi / 2 - lat) * np.sin(lon),
                np.cos(np.pi / 2 - lat)
            ])
            # print('at', exactVectLocs[i, j], '({:.2f}, {:.2f})'.format(lat * 180 / np.pi, lon * 180 / np.pi))
    print('Transform complete!')
    # print('3D Transformation after:', vects3D)
    # print()
    # print('Exact vector locations:', exactVectLocs)
    # sys.exit(1)

    ################# GRAVITY ##################

    # # Next is applying a gravitational field...
    # # The application will go on any vector which is currently 0
    print('Applying gravity...')
    padRadius = radius + padWidth * resStep
    fgaVectors = np.transpose(
        np.mgrid[-padRadius:padRadius + resStep / 2:resStep,
                 -padRadius:padRadius + resStep / 2:resStep,
                 -padRadius:padRadius + resStep / 2:resStep], (1, 2, 3, 0))
    # Normalize and flip vectors that
    # are > radius away...
    norms = np.linalg.norm(fgaVectors, axis=-1)
    fgaVectors /= norms[:, :, :, None]
    fgaVectors[norms > radius] = -1 * fgaVectors[norms > radius]
    fgaVectors *= gravScale
    print(fgaVectors.shape)
    print('Application complete!')

    ################# INTERPOLATION ######################

    print('Interpolating vectors...')
    # So here is the plan: We will first make
    # a 3D (really 4D) array where each element
    # is the actual [x, y, z] coordinate across
    # the whole box.
    # Then, we go through each point, and see if it's
    # "close enough" to the surface to the sphere
    # (not inside).
    # If it is, then we calculate that point's
    # latitude and longitude values, using a conversion
    # to spherical coordinates.
    # We then check which 2 latitudes and longitudes we
    # have actual data on. Each of our [x,y,z]
    # points will fall in a rectangle where the corners
    # have actual wind data.
    # At this point, we can interpolate horizontally
    # (using the 2 longitudal sides), and interpolate
    # vertically (using the 2 latitude sides) across
    # both angle and magnitude. Do this for every
    # point and we have finished our interpolation.

    ### LET'S GET STARTED! ###

    # First we need a way to iterate through all possible
    # point values, along with their respective indices,
    # so we can assign the proper values in fgaVectors.
    # Because this a square box, this is made easy
    # through itertools.product(). We make one each for
    # the point values and index values. We then iterate
    # through them at the same time using zip().

    # From our padding, value space goes from radius
    # to radius. But our index space now starts at
    # the padWidth, and goes the length of the value space.
    valueSpace = np.arange(-radius, radius + 1e-10, resStep)
    indexSpace = np.arange(padWidth, padWidth + len(valueSpace))
    valueProduct = product(valueSpace, valueSpace, valueSpace)
    indexProduct = product(indexSpace, indexSpace, indexSpace)
    for point, indices in zip(valueProduct, indexProduct):
        x, y, z = point
        xi, yi, zi = indices
        distFromOrigin = np.linalg.norm([x, y, z])
        # If the distance from the point to
        # the origin is "close enough", then we
        # interpolate on this point. Here, "close enough"
        # means above the sphere and less than resStep away.
        if 0 <= distFromOrigin - radius < resStep:
            # print('({}, {}, {}) ==> '.format(x, y, z), end='')
            # print('[{}, {}, {}] ==> '.format(xi, yi, zi), end='')

            # Find the spherical coordinates of this point.
            # The method returns the latitude and longitude
            # in the correct ranges.
            rho, lat, lon = cart2sph(x, y, z)
            # print('({:.2f}, {:.2f}, {:.2f}) ==> '.format(rho, lat * 180 / np.pi, lon * 180 / np.pi), end='')

            # Use numpy's fancy searchsorted function to find
            # the closest locations for latitude and longitude.
            # searchsorted returns a right associated index.
            # The reason for the mod is that if the value is
            # off the deep end, then searchsorted returns
            # the length of the array, which is invalid index.
            # So mod the length the array to turn it 0, and
            # the left index (which should wrap around), works as
            # intended.

            boxLocs, weightLat, weightLon = findBoxPointsAndWeights(
                lat, lon, latPoints, lonPoints)

            # Point 4 is assumed to be diagonally opposite Point 1,
            # and Point 2 is assumed to be horizontally opposite Point 1.
            # First we need to find the two vectors directly
            # above and below (left and right also works) our
            # target location.
            interAbove = (1 - weightLon) * vects3D[
                boxLocs[0]] + weightLon * vects3D[boxLocs[1]]
            interBelow = (1 - weightLon) * vects3D[
                boxLocs[2]] + weightLon * vects3D[boxLocs[3]]
            # Now we interpolate vertically
            # between these two points

            interedVec = (1 - weightLat) * interBelow + weightLat * interAbove

            # print('({:.2f}, {:.2f}, {:.2f})'.format(interedVec[0], interedVec[1], interedVec[2]))

            # We're done with the interpolation with this vector,
            # so now using the index values all the way above, assign
            # to fgaVectors...scaling as necessary
            fgaVectors[xi, yi, zi] = interedVec * windScale

    ############# WRITING TO FILE ##############

    # AND WE'RE DONE. Now unwrap, add in resolution and box data and write to file
    # unwrap
    # print(fgaVectors)
    print('Unwrapping and writing...')
    print(DRes, '==>', DRes + 2 * padWidth)
    DRes += 2 * padWidth
    fgaVectors = fgaVectors.reshape((DRes**3, 3), order='F')

    fgaVectors = np.vstack(([[DRes, DRes, DRes],
                             [-padRadius, -padRadius, -padRadius],
                             [padRadius, padRadius, padRadius]], fgaVectors))
    with open('.//{}.fga'.format(varNames[0]), 'wb') as f:
        np.savetxt(f, fgaVectors, delimiter=',', newline=',\r\n', fmt='%4.7f')
    # with open('.//noGravity.fga', 'wb') as f:
    #     np.savetxt(f, fgaVectors, delimiter=',', newline=',\r\n', fmt='%4.7f')
    print('Unwrapping and writing complete!')
コード例 #60
0
    return emotion_vectors
    conn.close()


f_emo = open('jpg_url.txt', 'r')
data = f_emo.read().split('\n')
vectors = np.zeros((28, MAXN, 8))
res = []
for url in data:
    if (url != ''):
        print url
        if ('make' in url):
            No = 0
        elif ('east' in url):
            No = 1
        else:
            No = 2
        last_res = res
        pattern = re.compile(r'\(\d*\)')
        m = pattern.search(url)
        if (m):
            timeline = int(m.group()[1:-1])
        res = parse_emotion(url)
        if (res == []):
            res = last_res
        print str(res) + '\n'
        vectors[timeline - 1, No, :] = np.array(res[0])
        time.sleep(1)
vectors = vectors.reshape((28, MAXN * 8))
np.savetxt('emotion_vector.txt', vectors)