示例#1
0
def find_homog_trans(points_a, points_b, err_threshold=0, rot_0=None):
    """Finds a homogeneous transformation matrix that, when applied to 
    the points in points_a, minimizes the squared Euclidean distance 
    between the transformed points and the corresponding points in 
    points_b. Both points_a and points_b are (n, 3) arrays.
    """
    #Align the centroids of the two point clouds
    cent_a = sp.average(points_a, axis=0)
    cent_b = sp.average(points_b, axis=0)
    points_a = points_a - cent_a
    points_b = points_b - cent_b
    
    #Define the error as a function of a rotation vector in R^3
    rot_cost = lambda rot: (sp.dot(vec_to_rot(rot), points_a.T).T
                    - points_b).flatten()**2
    
    #Run the optimization
    if rot_0 == None:
        rot_0 = sp.zeros(3)
    rot = opt.leastsq(rot_cost, rot_0)[0]
    
    #Compute the final homogeneous transformation matrix
    homog_1 = sp.eye(4)
    homog_1[0:3, 3] = -cent_a
    homog_2 = sp.eye(4)
    homog_2[0:3,0:3] = vec_to_rot(rot)
    homog_3 = sp.eye(4)
    homog_3[0:3,3] = cent_b
    homog = sp.dot(homog_3, sp.dot(homog_2, homog_1))
    return homog, rot
def get_cluster_distribution(g, method = 'average'):
	""" 
		The clustering coefficient distribution grouped by degree. Similar to the histogram shows the possible degree k,
		and average/median clustering coefficient of nodes with degree k in graph g.

		Parameters:
		-----------
			g: NetworkX Graph
			method: str, ('average', 'median'), (default = 'average')
		Returns:
		--------
			xdata, ydata, a 2-tuple of array, (k, avg_cc(V_k)), where V_k are the nodes with degree k
	"""
	g = to_undirected(g)
	k = nx.clustering(g)
	d = g.degree()
	ck = defaultdict(list)
	for n in g.nodes_iter():
		ck[d[n]].append(k[n])
	xdata, ydata = list(), list()
	
	if method == 'average':
		for x, y in ifilter(lambda x: x[0] > 1 and average(x[1]) > 0, ck.iteritems()):
			xdata.append(x)
			ydata.append(average(y))
	elif method == 'median':
		for x, y in ifilter(lambda x: x[0] > 1 and median(x[1]) > 0, ck.iteritems()):
			xdata.append(x)
			ydata.append(median(y))
	else:
		raise NameError("method should be 'average' or 'mean'")
	xdata = array(xdata)
	ydata = array(ydata)
	return(xdata, ydata)
 def calc_velocity(vol_flow, side):
     r"""Calculates the velocity field for a rate BC"""
     #
     x_vel = 0.0
     z_vel = 0.0
     avg_fact = namespace.sim_params['avg_fact']
     #
     if side == 'top':
         avg_b = sp.average(map_data_field.data_map[-1, :])
         axis_len = avg_fact * len(map_data_field.data_map[-1, :])
         z_vel = vol_flow/(avg_b * axis_len)
     elif side == 'bottom':
         vol_flow = -vol_flow
         avg_b = sp.average(map_data_field.data_map[0, :])
         axis_len = avg_fact * len(map_data_field.data_map[0, :])
         z_vel = vol_flow/(avg_b * axis_len)
     elif side == 'left':
         vol_flow = -vol_flow
         avg_b = sp.average(map_data_field.data_map[:, 0])
         axis_len = avg_fact * len(map_data_field.data_map[:, 0])
         x_vel = vol_flow/(avg_b * axis_len)
     elif side == 'right':
         avg_b = sp.average(map_data_field.data_map[:, -1])
         axis_len = avg_fact * len(map_data_field.data_map[:, -1])
         x_vel = vol_flow/(avg_b * axis_len)
     else:
         raise ValueError('Invalid side given: '+side)
     #
     return 'uniform ({} 0.0 {})'.format(x_vel, z_vel)
def standardize_vacuum_quadratures(args, h5):
    vacuum_quadratures = h5["vacuum_quadratures"][:]
    corrected_vacuum = correct_intrastep_drift(vacuum_quadratures)
    create_dataset(args, h5,
                   "corrected_vacuum_quadratures", data=corrected_vacuum)
    mean = average(corrected_vacuum, axis=1)
    centered_vacuum = corrected_vacuum - mean[:, None]
    create_dataset(args, h5,
                   "centered_vacuum_quadratures", data=centered_vacuum)
    return average(std(centered_vacuum, axis=1))
示例#5
0
    def update(self):
        """ former set image data."""
        """ 
        has to check: -is average? -is dFF? -flag to show? -only one?
       
        average behaviour: take all that are active, average and overlay
        
        dFF behaviour: if multiple channels are active, the dFF are over
        layed and colored according to their channel

        if only one channel is active:        
        raw is in grayscale, dFF is in glow color map
        """
        ### for implementation of global lut mod
#        current_lut = self.LUTwidgets.currentIndex()

        # work only on those that are active
        for n in range(self.data.nFiles):
            if self.Options.view['show_flags'][n] == False: # hide inactive
                self.ImageItems[n].hide()
                self.ImageItems_dFF[n].hide()

                
            if self.Options.view['show_flags'][n] == True: # work only on those that are active
                
                if self.Options.view['show_dFF']: # when showing dFF
                
                    if self.Options.view['show_monochrome']: # when in mono glow mode
                        self.ImageItems[n].show()
                    else:
                        self.ImageItems[n].hide()
                        
                    if self.Options.view['show_avg']: # when showing avg
                        self.ImageItems_dFF[n].setImage(sp.average(self.data.dFF[:,:,:,n],axis=2))
                        self.ImageItems[n].setImage(sp.average(self.data.raw[:,:,:,n],axis=2))

                    else: 
                        self.ImageItems_dFF[n].setImage(self.data.dFF[:,:,self.frame,n])
                        self.ImageItems[n].setImage(self.data.raw[:,:,self.frame,n])


                    self.ImageItems_dFF[n].show()
                    
                else: # when showing raw
                    self.ImageItems_dFF[n].hide() # no dFF
                    if self.Options.view['show_avg']:
                        self.ImageItems[n].setImage(sp.average(self.data.raw[:,:,:,n],axis=2))
                    else:
                        self.ImageItems[n].setImage(self.data.raw[:,:,self.frame,n])
                        
                    self.ImageItems[n].show()
                    
                self.ImageItems[n].setLevels(self.Data_Display.LUT_Controlers.raw_levels[n])
                self.ImageItems_dFF[n].setLevels(self.Data_Display.LUT_Controlers.dFF_levels[n])        
        pass
示例#6
0
 def update_frame(self):
     for ind in self.active_inds:
         if self.Main.Options.view['show_avg']:
             self.ImageItems_dFF[ind].setImage(sp.average(self.Main.Data.dFF[:,:,:,ind],axis=2))
             self.ImageItems[ind].setImage(sp.average(self.Main.Data.raw[:,:,:,ind],axis=2))
         else: 
             self.ImageItems_dFF[ind].setImage(self.Main.Data.dFF[:,:,self.frame,ind])
             self.ImageItems[ind].setImage(self.Main.Data.raw[:,:,self.frame,ind])
     
     self.update_levels()
     pass
    def against_the_field(self):
        wins = scipy.zeros((len(self.realTeams), len(self.weekly['OP'])))
        for (i, t) in enumerate(self.realTeams):
            for (j, w) in enumerate(self.weekly['PTS FOR']):
                for t2 in [el for el in self.realTeams if el != t]:
                    wins[i, j] += int(self.dataDic[t][w] > self.dataDic[t2][w]) if self.dataDic[t][w] else 0.0
                    wins[i, j] += .5 * int(self.dataDic[t][w] == self.dataDic[t2][w]) if self.dataDic[t][w] else 0.0

        losses = 11. - wins

        return scipy.average(wins, axis=1), scipy.std(wins, axis=1), scipy.average(losses, axis=1), scipy.std(losses, axis=1)
	def get_network_reading(self):

		# Update the readings for all nodes
		self.update_all_readings()

		# Get the current readings from all nodes
		node_readings = []
		for node_name in self.nodes:

			node_readings.append(self.nodes[node_name].stable_field_prediction)

		#node_readings = np.array(node_readings)

		network_map = np.full((25,25), 0)
		network_confidence = np.zeros((25,25))

		# Go through each cell and get values from node predictions
		for x_index in range(25):
			for y_index in range(25):

				cell_vals = []

				index = (x_index, y_index)

				for plane in node_readings:

					# Get the value
					val = plane[index]

					if not np.isnan(val):

						cell_vals.append(val)

				#if x_index == 13 and y_index == 13:

				#	print cell_vals

				if not np.isnan(scipy.average(np.array(cell_vals))):
					network_map[index] = scipy.average(np.array(cell_vals))

					network_confidence[index] = scipy.std(np.array(cell_vals))

				else:

					network_map[index] = 0
					network_confidence[index] = 0

		# Get the average
		#network_avg = scipy.average(node_readings)

		# Get the standard deviation
		#network_std = scipy.std(node_readings)

		return network_map, network_confidence
示例#9
0
  def _bess(npts, x1, x2, x1err, x2err, cerr):
    """
    Do the entire regression calculation for 4 slopes:
      OLS(Y|X), OLS(X|Y), bisector, orthogonal
    """

    # calculate sigma's for datapoints using length of confidence intervals
    sig11var = sum(x1err ** 2) / npts
    sig22var = sum(x2err ** 2) / npts
    sig12var = sum(cerr) / npts

    # calculate means and variances
    x1av = scipy.average(x1)
    x1var = scipy.std(x1) ** 2
    x2av = scipy.average(x2)
    x2var = scipy.std(x2) ** 2
    covar_x1x2 = sum((x1 - x1av) * (x2 - x2av)) / npts

    # compute the regression slopes for OLS(X2|X1), OLS(X1|X2), 
    # bisector and orthogonal
    b = scipy.zeros(4)
    b[0] = (covar_x1x2 - sig12var) / (x1var - sig11var)
    b[1] = (x2var - sig22var) / (covar_x1x2 - sig12var)
    b[2] = (b[0] * b[1] - 1 + scipy.sqrt((1 + b[0] ** 2) * \
           (1 + b[1] ** 2))) / (b[0] + b[1])
    b[3] = 0.5 * ((b[1] - 1 / b[0]) + scipy.sign(covar_x1x2) * \
           scipy.sqrt(4 + (b[1] - 1 / b[0]) ** 2))

    # compute intercepts for above 4 cases:
    a = x2av - b * x1av

    # set up variables to calculate standard deviations of slope and intercept
    xi = []
    xi.append(((x1 - x1av) * (x2 - b[0] * x1 - a[0]) + b[0] * x1err ** 2) / \
              (x1var - sig11var))
    xi.append(((x2 - x2av) * (x2 - b[1] * x1 - a[1]) + x2err ** 2) / \
              covar_x1x2)
    xi.append((xi[0] * (1 + b[1] ** 2) + xi[1] * (1 + b[0] ** 2)) / \
              ((b[0] + b[1]) * scipy.sqrt((1 + b[0] ** 2) * (1 + b[1] ** 2))))
    xi.append((xi[0] / b[0] ** 2 + xi[1]) * b[3] / \
              scipy.sqrt(4 + (b[1] - 1 / b[0]) ** 2))
    zeta = []
    for i in xrange(4):
      zeta.append(x2 - b[i] * x1 - x1av * xi[i])

    # calculate  variance for all a and b
    bvar = scipy.zeros(4)
    avar = scipy.zeros(4)
    for i in xrange(4):
      bvar[i] = scipy.std(xi[i]) ** 2 / npts
      avar[i] = scipy.std(zeta[i]) ** 2 / npts

    return a, b, avar, bvar, xi, zeta
    def react_xy(self, rolling_av=False, toprint=True):

        if rolling_av:
            weights = scipy.exp((-1.*(scipy.arange(self.rolling,0,-1.)/self.rolling)**2)/2.)
            xd = scipy.average(self.xdrift[(-1*self.rolling):],weights=weights)
            yd = scipy.average(self.ydrift[(-1*self.rolling):],weights=weights)
        else:
            xd = self.xdrift[-1]
            yd = self.ydrift[-1]

        if len(self.xdrift)>1:
            last_slope_1_x = self.xdrift[-1] - self.xdrift[-2]
            last_slope_1_y = self.ydrift[-1] - self.ydrift[-2]

        integrated_diff_x = scipy.sum(self.xdrift)
        integrated_diff_y = scipy.sum(self.ydrift)

        move_x =  xd * self.micronperpixel_x
        move_y =  -1*yd * self.micronperpixel_y

        if not self.use_marz:
            last_x = self.piezo.getPosition(1)
            last_y = self.piezo.getPosition(2)

        
        if (not self.movedLastTime[-1]) or (not self.move_every_other):
            if (abs(xd) > self.xythreshold_pixels) and (not self.no_xy):
                if self.use_marz:
                    if toprint:
                        print "Moving x:", move_x
                    self.xystage.goRelative(move_x,0)
                    self.movedx.append(move_x)
                else:
                    if toprint:
                        print "Moving x:", move_x
                    self.piezo.moveTo(1, last_x+move_x, waitForConvergence=False)
                    self.movedx.append(move_x)
            else:
                self.movedx.append(0)
            if (abs(yd) > self.xythreshold_pixels) and (not self.no_xy):
                if self.use_marz:
                    if toprint:
                        print "Moving y:", move_y
                    self.xystage.goRelative(0,move_y)
                    self.movedy.append(move_y)
                else:
                    if toprint:
                        print "Moving y:", move_y
                    self.piezo.moveTo(2, last_y+move_y, waitForConvergence=False)
                    self.movedx.append(move_y)
            else:
                self.movedy.append(0)
 def locate(self, P1, P2, C):
     pointlist = []
     
     for i, testfunc in enumerate(self.testfuncs):
         if self.flagfuncs[i] == iszero:
             for ind in range(testfunc.m):
                 X, V = testfunc.findzero(P1, P2, ind)
                 pointlist.append((X,V))
     
     X = array(average([point[0] for point in pointlist]))
     V = array(average([point[1] for point in pointlist]))
     C.Corrector(X,V)
     
     return X, V
示例#12
0
def main(argv=None):
	global args
	parser=argparse.ArgumentParser(description="Compute various statistics related to sequences sets or individual sequences; either in the provided fasta files or for the sequences piped in")
#	parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
	parser.add_argument('-p',dest="pretty",action="store_true",help="Pretty print using PrettyTable module")
	parser.add_argument('-i',dest="individual",action="store_true",help="Display statistics for each individual sequences")
	parser.add_argument('-d',dest="delimiter",help="Colum separator for output, default to whitespace",default=" ")
	parser.add_argument('-t',dest="min_length",help="Minimun length threshold to filter fasta file",default=0,type=int)
	parser.add_argument('-r',dest="reference_length",help="(Not yet implemented)Reference length used to compute corrected Nx values",default=0)
	parser.add_argument('-o', nargs='?', type=argparse.FileType('w'), default=sys.stdout,dest="outfile")
	parser.add_argument('FASTAFILE',action='append',nargs="+",help='List of fasta files to keep. Use "*" to keep them all')
	args=parser.parse_args()
	all_records=[]
	FASTAFILE=args.FASTAFILE[0]
	if args.pretty:
		import prettytable

	for f in FASTAFILE: 
		for record in SeqIO.parse(f, "fasta", generic_dna):
			if len(record.seq)<=args.min_length:
				continue
			all_records.append(SequenceStat(f,record))

	if args.individual:
		process_individual_sequences(all_records)
		return 0


	# Display summary statistics per file
	sequences_per_files=collections.defaultdict(list)
	for s in all_records:
		sequences_per_files[s.file].append(s)
	if args.pretty:
		table=prettytable.PrettyTable(["File","#Seqs","Avg GC","Avg Length(kb)", "Quant","Sum Length(kb)","N50(kb)","L50"])
		table.align["File"] = "l" 

		for file,seqs in sequences_per_files.items():
			lengths=[x.length for x in seqs]
			table.add_row([file,len(seqs),round(scipy.average([x.gc for x in seqs]),2),\
				round(scipy.average(lengths)/1000,2),mquantiles(lengths),round(sum(lengths)/1000,2),round(N50.N50(lengths)/1000,2),N50.L50(lengths)])
		print >>args.outfile,table.get_string(sortby="N50(kb)")

	else:
		for file,seqs in sequences_per_files.items():
			lengths=[x.length for x in seqs]

			print >>args.outfile," ".join(map(str,[\
				file,len(seqs),scipy.average([x.gc for x in seqs]),\
				scipy.average(lengths),sum(lengths),N50.N50(lengths),N50.L50(lengths)
				]))
示例#13
0
def suppressFire_callback(channel):
    x,y = float('nan'),float('nan')
    while np.isnan(x) or np.isnan(y):
        FireImage = abs(average(ImQueue[-1],-1) - average(ImQueue[0],-1))
        x,y = findFire(FireImage)
    fo = '-'.join(map(str, datetime.now().timetuple()[:6]))
    misc.imsave('fire'+fo+'.bmp',FireImage)
    xdivtmp, ydivtmp = xdivs[:], ydivs[:]
    bisect.insort(xdivtmp,x)   # Insert the fire coordinates into the protection grid
    bisect.insort(ydivtmp,y)
    xzone = xdivtmp.index(x) - 1   # Find the grid coordinates
    yzone = ydivtmp.index(y) - 1
    del xdivtmp, ydivtmp             
    firePorts((xzone,yzone))
    print 'Fire at (%.2f,%.2f) in zone %d,%d\nFiring ports %d & %d' % ((x,y,xzone,yzone,) + fireDict[(xzone,yzone)])
def center_on_cos(raw_quadratures, phi0=None, omega=None, snap_omega=False):
    mean = scipy.average(raw_quadratures, axis=1)
    no_angles, no_pulses = raw_quadratures.shape
    model = Model(cos_model)
    offset, amplitude, phi0, omega = guess_initial_parameters(mean, phi0, omega)
    model.set_param_hint("offset", value=offset)
    model.set_param_hint("amplitude", min=0., value=amplitude)
    model.set_param_hint("phi0", value=phi0)
    model.set_param_hint("omega", min=0., value=omega)
    model.make_params(verbose=False)
    steps = scipy.arange(no_angles)
    res = model.fit(mean, x=steps, verbose=False)
    omega_param = res.params["omega"]
    if snap_omega:
        appx_omega = float(omega_param)
        no_pi_intervals = int(round(pi/appx_omega))
        omega = pi/no_pi_intervals
        omega_param.set(omega, vary=False)
        res.fit(mean, x=steps, verbose=False)
    d_value, p_value_ks = kstest(res.residual, 'norm')
    mean_fit = res.eval(x=steps)
    offset = mean-mean_fit
    aligned_quadratures = raw_quadratures - offset[:,None]
    centered_quadratures = aligned_quadratures - float(res.params["offset"])
    return (centered_quadratures,
            float(omega_param), float(res.params["phi0"]), p_value_ks)
示例#15
0
def print_all_stats(ctx, series):
    ftime = get_ftime(series)
    start = 0 
    end = ctx.interval
    print('start-time, samples, min, avg, median, 90%, 95%, 99%, max')
    while (start < ftime):  # for each time interval
        end = ftime if ftime < end else end
        sample_arrays = [ s.get_samples(start, end) for s in series ]
        samplevalue_arrays = []
        for sample_array in sample_arrays:
            samplevalue_arrays.append( 
                [ sample.value for sample in sample_array ] )
        #print('samplevalue_arrays len: %d' % len(samplevalue_arrays))
        #print('samplevalue_arrays elements len: ' + \
               #str(map( lambda l: len(l), samplevalue_arrays)))
        # collapse list of lists of sample values into list of sample values
        samplevalues = reduce( array_collapser, samplevalue_arrays, [] )
        #print('samplevalues: ' + str(sorted(samplevalues)))
        # compute all stats and print them
        myarray = scipy.fromiter(samplevalues, float)
        mymin = scipy.amin(myarray)
        myavg = scipy.average(myarray)
        mymedian = scipy.median(myarray)
        my90th = scipy.percentile(myarray, 90)
        my95th = scipy.percentile(myarray, 95)
        my99th = scipy.percentile(myarray, 99)
        mymax = scipy.amax(myarray)
        print( '%f, %d, %f, %f, %f, %f, %f, %f, %f' % (
            start, len(samplevalues), 
            mymin, myavg, mymedian, my90th, my95th, my99th, mymax))

        # advance to next interval
        start += ctx.interval
        end += ctx.interval
示例#16
0
    def clutch_performance(self):
        """Record against the field in the playoffs only"""
        playoffs = [el for el in self.weekly['OP'] if 'PLAYOFFS' in el]
        wins = scipy.zeros((len(self.realTeams), len(playoffs)))
        for (i, t) in enumerate(self.realTeams):
            for (j, w) in enumerate(playoffs):
                for t2 in [el for el in self.realTeams if el != t]:
                    wins[i, j] += int(self.dataDic[t][w] > self.dataDic[t2][w]) if self.dataDic[t][w] else 0.0
                    wins[i, j] += .5 * int(self.dataDic[t][w] == self.dataDic[t2][w]) if self.dataDic[t][w] else 0.0

        losses = 11. - wins

        return (scipy.average(wins, axis=1),
                scipy.std(wins, axis=1),
                scipy.average(losses, axis=1),
                scipy.std(losses, axis=1))
示例#17
0
    def update(self):
#        self.last_pos = pos # needed for keeping the lines while some are removed or added
        if self.Main.ROIs.nROIs != 0:
                
            for n in range(self.Main.Data.nFiles):
                if self.Options.view['show_flags'][n] == True: # only work on active datasets
                
                    # implementation using the pyqtgraph internal slicing
                    ROI = self.Main.ROIs.ROI_list[self.Main.ROIs.active_ROI_id]
                    
                    # func bool mask slicing
                    mask, inds = self.Main.ROIs.get_ROI_mask(ROI)
                    if self.Options.view['show_dFF']:
                        sliced = self.Main.Data.dFF[mask,:,n]
                    else:
                        sliced = self.Main.Data.raw[mask,:,n]
    
                    Trace = sp.average(sliced,axis=0)
                    self.traces[n].setData(Trace)
                    self.traces[n].show()
                    
                    # update the normal traces plot or update the Trace Inspector.
                    # this needs cleaning!
    #                if self.Traces_Inspector_exists_flag:
    #                    self.Traces_Inspector.update_trace(Trace,n)
    #                    self.Traces_Inspector.traces[n].show()
    #                else:
    
                else:
                    self.traces[n].hide()
    #                if self.Traces_Inspector_exists_flag:
    #                    self.Traces_Inspector.traces[n].hide()
    #                self.dots[n].hide()
                pass
            pass
示例#18
0
def smoothMemory(ffty, degree=3):
	global ffts
	ffts = ffts + [ffty]
	if len(ffts) <= degree:
		return ffty
	ffts = ffts[1:]
	return scipy.average(scipy.array(ffts), 0)
示例#19
0
def continuous_phase(phase, axis=0, center=False):
    """Add and subtract 2 pi such that the phase in the array is
       as continuous as possible, along first or given axis. Optionally,
       it also centers the phase data so that the average is smallest."""

    phase = _n.array(phase, copy=0)

    rowshape = list(phase.shape)
    
    if len(rowshape) > 0:
        rowshape[axis] = 1

        slip = _n.concatenate([ _n.zeros(rowshape),
                                scipy.diff(phase, axis=axis) ],
                              axis=axis)
        slip = _n.around(slip/(2*_n.pi))
        cumslip = scipy.cumsum(slip, axis=axis)

        phase = phase - 2*_n.pi*cumslip
    else:
        pass

    if center:
        offset = _n.around(scipy.average(phase, axis=axis)/(2*_n.pi))
        offset = _n.reshape(offset, rowshape)
        offset = _n.repeat(offset, cumslip.shape[axis], axis=axis)
        phase = phase - 2*_n.pi*offset
    
    return phase
示例#20
0
def decode(file_name):
    border.rotate(file_name)
    image = Image.open("temp.png")
    q = border.find("temp.png")
    ind = sp.argmin(sp.sum(q, 1), 0)
    up_left = q[ind, 0] + 2
    up_top = q[ind, 1] + 2
    d_right = q[ind+1, 0] - 3
    d_bottom = q[ind-1, 1] - 3

    box = (up_left, up_top, d_right, d_bottom)
    region = image.crop(box)
    h_sum = sp.sum(region, 0)
    m = argrelmax(sp.correlate(h_sum, h_sum, 'same'))
    s = sp.average(sp.diff(m))
    m = int(round(d_right - up_left)/s)
    if m % 3 != 0:
        m += 3 - m % 3
    n = int(round(d_bottom - up_top)/s)
    if n % 4 != 0:
        n += 4 - n % 4
    s = int(round(s))+1

    region = region.resize((s*m, s*n), PIL.Image.ANTIALIAS)
    region.save("0.png")
    pix = region.load()
    matrix = mix.off(rec.matrix(pix, s, m, n))
    str2 = hamming.decode(array_to_str(matrix))

    return hamming.bin_to_str(str2)
def randomly_clustering(g, tries = 10):
	"""
		Comparing the average clustering coefficient of g with other graphs h
		which share identical degree sequence. This function returns the comparison ratio.

		Parameters:
		-----------
			g: NetworkX Graph, NetworkX DiGraph
			tries: int, optional, (default = 10)
				number of tries (compared graphs)
		See also:
		---------
			mean_clustering
		Returns:
		--------
			float, the ratio of avg clustering coefficient, avg_cc(g) / mean(avg_cc(h))
	"""
	from scipy import average
	g = to_undirected(g)
	d = g.degree().values()
	c = mean_clustering(g, normalized = False)
	p = list()
	for t in xrange(tries):
		ng = nx.configuration_model(d, create_using = nx.Graph())
		p.append(mean_clustering(ng))
		del ng
	return(c / average(p))
示例#22
0
def get_r2(t,u,ucalc):
    umean=scipy.average(u)
    sigmai=(((u-umean)**2)/9)**0.5
    dumean2=(u-umean)**2  
    ducalc2=(u-ucalc)**2
    r2=1-sum(ducalc2)/sum(dumean2)
    return r2,sigmai
示例#23
0
def simplex_quivers(sc,form):
    """
    Sample a Whitney 1-form at simplex barycenters
    """

    quiver_bases = average(sc.vertices[sc[-1].simplices],axis=1)
    quiver_dirs  = zeros((sc[-1].num_simplices,sc.embedding_dimension()))

    s_to_i = sc[1].simplex_to_index

    for n,s in enumerate(sc[-1].simplices):
        verts = sorted(s)
        
        d_lambda = barycentric_gradients(sc.vertices[verts,:])
        edges   = [Simplex(x) for x in combinations(s,2)]
        indices = [s_to_i[x] for x in edges]
        values  = [form[i] for i in indices]

        for e,v in zip(combinations(range(len(verts)),2),values):
            quiver_dirs[n,:] += v*(d_lambda[e[1]] - d_lambda[e[0]])


        
    quiver_dirs /= (sc.complex_dimension() + 1)

    return quiver_bases,quiver_dirs
示例#24
0
def show_intensity(filename, form):
    x, fs, nbits = getattr(audiolab, form + 'read')(filename)
    leds = LEDs(num=8)
    N = fs / 30
    c = lambda stuff: int(map_to_range(scipy.average(stuff), 0.0, 1.0, 0.0, 255.0))

    def do_work():
        global i
        #print x[i * N : (i+1) * N]
        data = x[i * N : (i+1) * N]

        r = c(scipy.absolute(data))

        leds[0:8] = [(r, 0, 0) for _ in xrange(8)]
        leds.update()

        i += 1

    def start_music(conn, play_args):
        conn.send(None)
        conn.close()
        audiolab.play(*play_args)

    print "starting"
    #parent_conn, child_conn = Pipe()
    #Process(target=start_music, args=(child_conn, (x.T, fs))).start()
    l = task.LoopingCall(do_work)
    #parent_conn.recv()
    l.start(1.0 / 30.0)
    reactor.run()
def sales_mapping():
    data = read_data_from_pickle()
    testData = data["test"]
    trainData = data["train"]
    testData["Weekly_Sales"] = None

    # this loop needs to be threaded.
    # for index,record in testData.iterrows():
    #     print(index)
    #     record["Weekly_Sales"] = average(trainData.loc[(trainData["Store"]==record["Store"]) & (trainData["Dept"]==record["Dept"]) & (abs(trainData["WeekNum"] - record["WeekNum"]) < 2)]["Weekly_Sales"])

    newTrainData = pd.DataFrame(columns=trainData.columns.values)
    newTrainData["Weekly_Sales_Averaged"] = None

    for storeNum in range(1, NUM_STORES + 1):
        print("Store: ", storeNum)
        storeTrainData = trainData[trainData["Store"] == storeNum]
        for deptNum in range(1, NUM_DEPTS + 1):
            print("Dept: ", deptNum)
            deptTrainData = storeTrainData[trainData["Dept"] == deptNum]
            for index, record in deptTrainData.iterrows():
                valuesToAverage = deptTrainData.loc[(abs(deptTrainData["WeekNum"] - record["WeekNum"]) < 2) & (deptTrainData["IsHoliday"] == record["IsHoliday"])]["Weekly_Sales"]
                deptTrainData.set_value(index, "Weekly_Sales_Averaged", average(valuesToAverage))

            newTrainData = newTrainData.append(deptTrainData)

    trainData = newTrainData

    print(testData.head())
示例#26
0
def show_fft(filename, form):
    x, fs, nbits = getattr(audiolab, form + 'read')(filename)
    leds = LEDs(num=8)
    N = fs / 30.0
    db_range = (-60.0, 0.0)
    c = lambda stuff: int(map_to_range(scipy.average(stuff), db_range[0], db_range[1], 0.0, 255.0))

    def do_work():
        global i
        lower, upper = int(i * N), int((i+1) * N)
        X = scipy.fft(x[lower:upper])
        Xdb = numpy.clip(20 * scipy.log10(scipy.absolute(X)), db_range[0], db_range[1])
        #print Xdb
        f = scipy.linspace(0, fs, N, endpoint=False)[:100]

        #print Xdb[0:3]
        r = c(Xdb[0:4])
        g = c(Xdb[4:10])
        b = c(Xdb[10:])
        if i % 15 == 0: print (r, g, b)

        leds[0:8] = [(r, g, b) for _ in xrange(8)]
        leds.update()
        i += 1

    print "starting"
    l = task.LoopingCall(do_work)
    l.start(1.0 / 30.0)
    reactor.run()
示例#27
0
 def _cell_to_point_data(data_map, nx, nz):
     r"""
     This function takes a cell data map and calculates average values
     at the corners to make a point data map. The Created array is 3-D with
     the final index corresponding to corners.
     Index Locations: 0 = BLC, 1 = BRC, 2 = TRC, 3 = TLC
     """
     #
     point_data = sp.zeros((nz+1, nx+1, 4))
     #
     # setting corners of map first
     point_data[0, 0, 0] = data_map[0, 0]
     point_data[0, -1, 1] = data_map[0, -1]
     point_data[-1, -1, 2] = data_map[-1, -1]
     point_data[-1, 0, 3] = data_map[-1, 0]
     #
     # calculating point values for the map interior
     for iz in range(nz):
         for ix in range(nx):
             val = sp.average(data_map[iz:iz+2, ix:ix+2])
             point_data[iz, ix, 2] = val
             point_data[iz+1, ix+1, 0] = val
             point_data[iz+1, ix, 1] = val
             point_data[iz, ix+1, 3] = val
     #
     # handling left and right edges
     for iz in range(nz):
         val = sp.average(data_map[iz:iz+2, 0])
         point_data[iz, 0, 3] = val
         point_data[iz+1, 0, 0] = val
         #
         val = sp.average(data_map[iz:iz+2, -1])
         point_data[iz, -1, 2] = val
         point_data[iz+1, -1, 1] = val
     #
     # handling top and bottom edges
     for ix in range(nx):
         val = sp.average(data_map[0, ix:ix+2])
         point_data[0, ix, 1] = val
         point_data[0, ix+1, 0] = val
         #
         val = sp.average(data_map[-1, ix:ix+2])
         point_data[-1, ix, 2] = val
         point_data[-1, ix+1, 3] = val
     #
     return point_data[0:nz, 0:nx, :]
示例#28
0
 def dumpSeries(self):
     for series in self.series:
         print "name:",series.getFullName()
         
         for index,value in enumerate(series):
             print value
             #print "index=",index, " , value=",value
         print "avg=",scipy.average(series)," , variance=",scipy.var(series), " , stddev=",scipy.std(series)
    def _estimate_fit_param(self):
        B = self._orig_image.min()
        w = self._orig_image - B
        A = w.max() 

        X, Y = scipy.indices(self._shape)

        x0 = scipy.average(X, None, w)
        y0 = scipy.average(Y, None, w)

        col = w[:, int(y0)]
        var_x = scipy.average((scipy.arange(col.size) - y0)**2, None, col)

        row = w[int(x0), :]
        var_y = scipy.average((scipy.arange(row.size) - x0)**2, None, row)
    
        return A, B, x0, y0, var_x**0.5, var_y**0.5, 0
def estimate_position_from_quadratures(eta, angles, quadratures):
    X = quadratures/sqrt(eta)
    mean = scipy.average(X, axis=1)
    avg = interp1d(angles, mean)
    q_mean = -avg(pi)
    p_mean = avg(pi/2.)
    s_max = scipy.std(X, axis=1).max()
    return q_mean, p_mean, s_max
示例#31
0
 def get_center(self):
     """ pos is the centroid """
     return sp.average(self.contour[sp.argmax([cont.shape[0] for cont in self.contour])],axis=0)
示例#32
0
        RA.append(d.read_header()['RA'])
        DEC.append(d.read_header()['DEC'])
        Z.append(d.read_header()['Z'])
        PLATE.append(d.read_header()['PLATE'])
        MJD.append(d.read_header()['MJD'])
        FIBER.append(d.read_header()['FIBERID'])
        THING_ID.append(d.read_header()['THING_ID'])
    hdu.close()

nq = len(data_list)
print 'nq = ', nq

if args.substract:
    print 'substract first the mean delta as a function of wavelength, then substract a and b*dlwave terme per quasar'
    for spec in range(nq):
        mde = sp.average(data_list[spec], weights=ivar_list[spec])
        mll = sp.average(lwave_list[spec], weights=ivar_list[spec])
        mld = sp.sum(ivar_list[spec] * data_list[spec] *
                     (lwave_list[spec] - mll)) / sp.sum(
                         ivar_list[spec] * (lwave_list[spec] - mll)**2)
        data_list[spec] -= mde + mld * (lwave_list[spec] - mll)
    print 'done with the substraction'

lwmin = None
lwmax = None
lwstep = None
for q in range(nq):
    if lwmin is None:
        lwmin = np.log10(wave_list[q][0])
        lwmax = np.log10(wave_list[q][-1])
        lwstep = np.log10(wave_list[q][1]) - np.log10(wave_list[q][0])
示例#33
0
 def calcAvg(self):
     self.avg = scipy.average(self.series)
def vacuum_correct(quadratures, vacuum_quadratures):
    gamma_prime = sqrt(2.) * average(std(vacuum_quadratures, axis=1))
    return quadratures / gamma_prime
示例#35
0
    def table(self, skip_single_keys=False, space_tabs=True):
        """Print a table summarizing the average numbers.
        """
        self.check_num_instances()

        show_corpus = not skip_single_keys or len(self.numbers) > 1
        show_system = not skip_single_keys or len(self.numbers) < \
                sum(len(self.numbers[corpus]) for corpus in self.numbers)

        # TODO: rewrite all this using string.format for sanity

        header = [[self.adjust_tabs('corpus', 'corpus', space_tabs),
                   self.adjust_tabs('system', 'system', space_tabs)][i]
                   for i,j in enumerate((show_corpus, show_system))
                if j]

        # Check if the standard precision/recall metrics are being used
        # and if micro-averaging is needed
        show_micro = 'correct' in self.max_field_lens
        show_prf = show_micro or 'p %' in self.max_field_lens
        if show_prf:
            if show_micro:
                header += [self.adjust_tabs(name, name + '  ', space_tabs)
                            for name in ('avg', 'p %', 'r %', 'f %')]
            else:
                header += [self.adjust_tabs(name, name + '  ', space_tabs)
                            for name in ('p %', 'r %', 'f %')]

        standard_header_set = set(header)
        rows = []

        prev_corpus = None
        prev_system = None
        for corpus in sorted(self.numbers.iterkeys()):
            for system in sorted(self.numbers[corpus].iterkeys()):
                new_row = []
                if show_corpus:
                    new_row.append(self.adjust_tabs('corpus', corpus,
                                                    space_tabs)
                                if corpus != prev_corpus
                                else self.adjust_tabs('corpus', '',
                                                      space_tabs))
                if show_system:
                    new_row.append(self.adjust_tabs('system', system,
                                                    space_tabs)
                                if corpus != prev_corpus or
                                system != prev_system
                                else self.adjust_tabs('system', '',
                                                      space_tabs))
                prev_corpus = corpus
                prev_system = system

                if show_prf:
                    # Macro averaging + additional values
                    if show_micro:
                        new_row.append('macro')
                    for metric in ('p %', 'r %', 'f %'):
                        macro_avg = sp.average(
                                self.numbers[corpus][system][metric])
                        new_row.append(self.adjust_tabs('x %  ',
                                        '%.2f' % (100*macro_avg,), space_tabs))

                # Additional fields
                for field_name in sorted(self.numbers[corpus][system]):
                    if field_name in ('p %', 'r %', 'f %', '#correct',
                                      '#incorrect', '#missed'):
                        # These were either already added to the header (p,r,f)
                        # or are intentionally dropped from the table
                        # (correct, incorrect, missed)
                        continue

                    # Check if the field name was added to the header
                    if '|'.join(header).find(field_name) == -1:
                        header.append(self.adjust_tabs('00.00', field_name,
                                                       space_tabs))

                    # Find out how many spots to leave blank before this
                    # field
                    padding = ''
                    for header_field in header:
                        if header_field.strip() == field_name:
                            # Found a match, so add the padding and move on
                            new_row.append(padding)
                            break
                        elif header_field in self.numbers[corpus][system] \
                                or header_field in standard_header_set:
                            # No padding needed; these were actually printed
                            padding = ''
                        else:
                            padding += self.adjust_tabs(header_field, '',
                                                        space_tabs)

                    new_row.append(self.adjust_tabs(
                        field_name,
                        self.aggregate_field(corpus, system, field_name),
                        space_tabs))
                rows.append(new_row)

                # Micro averaging
                if not show_micro:
                    continue
                sum_correct = sum(self.numbers[corpus][system]['correct'])
                sum_incorrect = sum(self.numbers[corpus][system]['incorrect'])
                sum_missed = sum(self.numbers[corpus][system]['missed'])

                micro_p, micro_r, micro_f = 0, 0, 0
                if sum_correct > 0:
                    micro_p = 100 * sum_correct / (sum_correct + sum_incorrect)
                    micro_r = 100 * sum_correct / (sum_correct + sum_missed)
                    micro_f = (2 * micro_p  * micro_r) / (micro_p + micro_r)

                micro_row = [[self.adjust_tabs('corpus', '', space_tabs),
                              self.adjust_tabs('system', '', space_tabs)][i]
                        for i,j in enumerate((show_corpus, show_system))
                        if j] + ['micro',
                                 '%.2f' % (micro_p,),
                                 '%.2f' % (micro_r,),
                                 '%.2f' % (micro_f,)]
                rows.append(micro_row)

        colsep = '  ' if space_tabs else '\t'
        return '\n'.join([colsep.join(header)] + [colsep.join(row)
                                                  for row in rows])
示例#36
0
found from curve fitting'''
m = [1.0, 2.0]  #intial guess
cont, cov = curve_fit(dhvstime, time, dh, m)
a0 = cont[0]  #value of a
b0 = cont[1]  #valye of b
dhfitted = (a0 * np.array(time)) / (1 + b0 * np.array(time)
                                    )  #array to find fitted value

diffh = dhfitted - dh  #array of difference between fitted and experimental

diffh2 = diffh**2
moddiffh = diffh2**0.5  # difference should be modei.e. should be positive.
print 'Sr.No', 'time   ', 'dh experi', ' dh fitted    ', '   dh exp-dh fitted'
for i in range(n):
    print '(', i + 1, ') ', time[i], '   ', dh[i], '    ', dhfitted[
        i], '     ', moddiffh[i]
averagedh = scipy.average(moddiffh)  # to find mean
dhdavg = ((np.array(averagedh)) - moddiffh)**2  # to find (x-xavg)^2
variance = (sum(dhdavg)) / (n - 1)  # just found variance
sd = variance**0.5  # standard deviation
print 'standard deviation: ', sd
error = (sum(((np.array(moddiffh)))**2)) / variance
print 'variance: ', variance
error = sd / (n**0.5)
print 'error:', error

plt.plot(time, dh, '*')
hfit = dhvstime(time, a0, b0)
plt.plot(time, hfit, 'r-')
plt.show
示例#37
0
def to_grayscale(frame):
    if len(frame.shape) == 3:
        return average(frame, -1)
    else:
        return frame
示例#38
0
def main(argv=None):
    parser = argparse.ArgumentParser(
        description=
        "Compute kmer counts for all sequences in the input multi fasta file")
    # parser.add_argument('-k',dest="kmer_length",help="mer length",default=3,type=int)
    parser.add_argument('-r',
                        dest="recursive",
                        help="Perform recursive search for fasta files",
                        action="store_true")

    parser.add_argument('-n',
                        dest="n_contigs",
                        help="Number of total contigs to sample",
                        default=1000,
                        type=int)
    parser.add_argument('-S',
                        dest="by_sequence",
                        help="Sampling of contigs is made for each sequence",
                        default=False,
                        action="store_true")
    parser.add_argument('-l',
                        dest="avg_length",
                        help="Length of conting to sample",
                        default=500,
                        type=int)
    parser.add_argument('-m',
                        dest="min_length",
                        help="Minimal length of conting to sample",
                        default=200,
                        type=int)
    parser.add_argument("-d",
                        "--stdev",
                        dest="stdev",
                        help="standard deviation",
                        type=int,
                        default=200)
    parser.add_argument("-p",
                        dest="preview",
                        help="Only print preview of number of contigs output",
                        default=False,
                        action="store_true")
    parser.add_argument("-R",
                        dest="reverse",
                        help="Randomly perform reverse complement",
                        default=False,
                        action="store_true")

    parser.add_argument('-I',
                        dest="keep_IUPAC",
                        help="Indicate whether to keep ambiguous site",
                        default=False,
                        action="store_true")
    parser.add_argument(
        '-i',
        dest="index_path",
        help=
        "MANDATORY path to an SQLite file index for out of memory sampling. If absent, will be generated; if present will be loaded back. Will trigger an error if an existing index do not match the provided input fasta files.",
        default=None,
        type=str)
    parser.add_argument(
        '-s',
        dest="simplify_ncbi_fasta_header",
        help=
        "If set, headers of fasta file will only contain the GI number from the NCBI full description",
        default=False,
        action="store_true")
    parser.add_argument(
        '-A',
        dest="append",
        help="If set, fasta sequences are appended to the output file",
        default=False,
        action="store_true")
    parser.add_argument('-o',
                        dest="output_name",
                        help="Name of output file",
                        default="contigs.fasta",
                        type=str)
    parser.add_argument(
        '-P',
        dest="pretty",
        help="Use (and require) prettytable for summary output",
        default=False,
        action="store_true")
    parser.add_argument(
        '-k',
        dest="key",
        help="Indicate string key inserted in sampled contig headers",
        default="")
    parser.add_argument('-M',
                        dest="max_sequences",
                        help="Maximal number of sequences to consider,-1: all",
                        default=-1,
                        type=int)
    parser.add_argument(
        '-F',
        dest="max_input_fastas",
        help="Maximal number of fastas files to process; -1: all",
        default=-1,
        type=int)
    parser.add_argument(
        '-u',
        dest="at_least_one",
        help=
        "Enable subsampling: less than one contig per sequence is possible",
        default=True,
        action="store_false")

    parser.add_argument('FASTAFILE',
                        action='append',
                        nargs="+",
                        help='list of fasta files')
    args = parser.parse_args()

    if args.pretty:
        import prettytable

    FASTAFILE = args.FASTAFILE[0]
    all_fastas = set()
    if args.recursive:
        for f in FASTAFILE:
            all_fastas.update(find_fasta_files(f))
    else:
        all_fastas = set(FASTAFILE)

    logger.info("Found %d fasta files" % (len(all_fastas)))
    if len(all_fastas) < 1 and not args.index_path:
        logger.critical("No fasta files found, no index provided, bailing out")
        sys.exit(1)
    if args.max_input_fastas != -1:
        logger.info("Downsampling input list of FASTA files down to %d" %
                    (args.max_input_fastas))
        all_fastas = random.sample(list(all_fastas), k=args.max_input_fastas)
    all_fastas = list(all_fastas)

    # load the index if provided
    sequence_index = None
    if args.index_path:
        if len(all_fastas) > 0:
            sequence_index = SeqIO.index_db(args.index_path, all_fastas,
                                            "fasta")
            logger.info("Built DB index %s" % (args.index_path))
        else:
            sequence_index = SeqIO.index_db(args.index_path)
            logger.info("Reusing DB index %s" % (args.index_path))

    if sequence_index == None:
        logger.critical("No index provided, bailing out")
        sys.exit(1)

    # Build sequence DB

    sequence_lengths = dict([(x, len(sequence_index[x]))
                             for x in sequence_index])
    sequence_length_keys = sequence_lengths.keys()
    sequence_length_values = sequence_lengths.values()

    total_length = sum(sequence_length_values)
    logger.info("Computed total sequence lengths")

    all_records = []

    if args.pretty:
        table = prettytable.PrettyTable([
            "Fasta", "length(kb)", "N sequences", "N sampled contig",
            "Avg contig/seq", "median", "min", "max"
        ])
        table.align["File"] = "l"

    # these_sequence_length = dict([(k,v) for k,v in sequence_lengths.items() if k in fasta_to_sequences[fasta]])

    contig_coordinates = sample_intervals(sequence_length_values,
                                          args.n_contigs,
                                          BYSEQUENCE=args.by_sequence,
                                          AT_LEAST_ONE=args.at_least_one,
                                          length_avg=args.avg_length,
                                          length_sd=args.stdev)

    #index contig coordinates
    contig_coordinates_by_key = collections.defaultdict(list)
    for c in contig_coordinates:
        contig_coordinates_by_key[c[0]].append(c)

    n_contigs_per_key = [len(v) for v in contig_coordinates_by_key.values()]

    avg_contig_per_sequence = scipy.average(n_contigs_per_key)
    med_contig_per_sequence = scipy.median(n_contigs_per_key)
    min_contig_per_sequence = min(n_contigs_per_key)
    max_contig_per_sequence = max(n_contigs_per_key)

    info = [
        args.index_path, total_length / 1000.0,
        len(sequence_index),
        len(contig_coordinates), avg_contig_per_sequence,
        med_contig_per_sequence, min_contig_per_sequence,
        max_contig_per_sequence
    ]
    if args.pretty:
        table.add_row(info)
    else:
        logger.info(
            "Fasta: %s,length:%d, N sequences: %d, N sampled contig:%d, Avg contig per seq:%f, med: %d, min seq:%d, max seq:%d "
            % (tuple(info)))

    if (args.preview):
        return

    fasta_name = '/'.join(os.path.split(args.index_path)[-2:])
    last_seq_idx = None
    sample_id = 0
    generated_samples = 0

    for seq_idx, start, end in contig_coordinates:
        if seq_idx != last_seq_idx:
            sample_id = 0
            fasta_key = sequence_length_keys[seq_idx]
            current_seq = str(sequence_index[fasta_key].seq)

            if args.simplify_ncbi_fasta_header:
                seq_id = sequence_index[fasta_key].id.split(
                    "|")[1] + "_" + args.key + "_sample_" + str(
                        sample_id) + "_" + fasta_name
                seq_id = seq_id.replace(".", "_")
                seq_name = seq_id
                sequence_description = seq_id
            else:
                seq_id = sequence_index[
                    fasta_key].id + args.key + "_sample_" + str(sample_id)
                seq_name = sequence_index[
                    fasta_key].name + args.key + "_sample_" + str(sample_id)
                sequence_description = sequence_index[fasta_key].description
        generated_samples += 1
        if (generated_samples % 500) == 0:
            logger.info("Generated %d/%d sequences" %
                        (generated_samples, len(contig_coordinates)))

        sample_id += 1
        sub_seq = current_seq[start:end]
        if (len(sub_seq) <= args.min_length):
            continue

        if (len(sub_seq) >= 10000):
            assert False

        if (not args.keep_IUPAC) and (set(sub_seq) != set(['A', 'C', 'G', 'T'
                                                           ])):
            # contains ambiguous
            continue

        record = SeqRecord(Seq(sub_seq, generic_dna),
                           id=seq_id,
                           name=seq_name,
                           description=sequence_description)

        if args.reverse and bool(random.getrandbits(1)):
            recordRC = record.reverse_complement()
            recordRC.id = record.id + "_rev"
            # recordRC.name=record.name
            recordRC.description = ""
            record = recordRC
        # record = SeqRecord(Seq(sub_seq,generic_dna))
        all_records.append(record)

    if args.pretty:
        print table.get_string()
    if args.append:
        logger.info("Appending to file")
        output_handle = open(args.output_name, "a")
    else:
        output_handle = open(args.output_name, "w")

    SeqIO.write(all_records, output_handle, "fasta")
    output_handle.close()
示例#39
0
    # Filter incoming data
    #data = signal.lfilter(b,a,data)

    # Generate FFT
    freqs, y = get_fft(data)

    # Average the samples
    #y=smoothMemory(y,3)

    # Normalize
    y = y / 5

    # Average into chunks of N
    N = 25
    yy = [scipy.average(y[n:n + N]) for n in range(0, len(y), N)]
    yy = yy[:len(yy) / 2]  # Discard half of the samples, as they are mirrored

    ## Part 3: Algorithm ##

    # Loudness detection
    loudness = thresh(yy[CHANNEL] * GAIN, THRESHOLD)

    # Noisiness meter
    noisiness -= DECAY
    noisiness += loudness * ATTACK
    noisiness = limit(noisiness, 0.0, 1.0)

    # Brightness modulation
    modulation = MODULATION * limit(noisiness, 0.0, 1.0)
    brightness = limit(MIN_BRIGHTNESS + (loudness * modulation), 0.0, 1.0)
示例#40
0
def run():
    import pyfits, os, redsequence, math, pylab, commands
    import os, re, sys, string, scipy, MySQLdb
    from copy import copy

    subarudir = os.environ['subdir']
    cluster = sys.argv[1]  #'MACS1423+24'
    spec = False
    train_first = False
    magtype = 'APER1'
    AP_TYPE = ''
    type = 'all'
    SPECTRA = 'CWWSB_capak.list'
    FILTER_WITH_LIST = None
    if len(sys.argv) > 2:
        for s in sys.argv:
            if s == 'spec':
                type = 'spec'
                spec = True
            if s == 'rand':
                type = 'rand'
            if s == 'train':
                train_first = True
            if s == 'ISO':
                magtype = 'ISO'
            if s == 'APER1':
                magtype = 'APER1'

            if s == 'APER':
                magtype = 'APER'

            if string.find(s, 'flist') != -1:
                import re
                rs = re.split('=', s)
                FILTER_WITH_LIST = rs[1]

            if string.find(s, 'detect') != -1:
                import re
                rs = re.split('=', s)
                DETECT_FILTER = rs[1]
            if string.find(s, 'spectra') != -1:
                import re
                rs = re.split('=', s)
                SPECTRA = rs[1]
            if string.find(s, 'aptype') != -1:
                import re
                rs = re.split('=', s)
                AP_TYPE = '_' + rs[1]

    SEGMENTATION_IMAGE = False

    JPG_IMAGE = False

    STAR_COLUMN, ANJA_SEQUENCE, CLUSTER_REDSHIFT = False, True, True

    PLOT_CMD = True

    PLOT_CUTS = True

    REMAKE_CLUSTER_MASK = False

    BPZ_CUT = 0.3
    print 'opening photometry'
    photdir = '/nfs/slac/g/ki/ki05/anja/SUBARU/ki06/photometry_2010/' + cluster + '/PHOTOMETRY_' + DETECT_FILTER + AP_TYPE + '/'

    if CLUSTER_REDSHIFT:
        command = 'grep ' + cluster + ' ' + '/nfs/slac/g/ki/ki05/anja/SUBARU/' + '/clusters.redshifts '
        print command
        cluster_info = commands.getoutput(command)
        cluster_redshift = float(re.split('\s+', cluster_info)[1])
        print cluster_redshift

    probs = photdir + '/' + cluster + '.APER1.1.CWWSB_capak.list.all.probs'
    print 'converting probabilities'
    #convert_probs_to_fits(probs)

    probs_tab = pyfits.open(probs + '.tab')
    ''' make mask '''

    minus = float('%.2f' % (cluster_redshift - 0.05))
    plus = minus + 0.1

    list = [str(x) for x in scipy.arange(minus, plus, 0.01)]

    phot_clus_probs = reduce(lambda x, y: x + y,
                             [probs_tab[1].data.field(c) for c in list])

    phot_clus_mask = phot_clus_probs < 0.01

    print phot_clus_probs, phot_clus_mask

    print probs, 'finished'

    website = os.environ['sne'] + '/magnification/' + cluster + '/'
    os.system('mkdir -p ' + website)

    imdir = subarudir + cluster + '/' + DETECT_FILTER + '/SCIENCE/coadd_' + cluster + '_all/'

    if SEGMENTATION_IMAGE:
        make_segmentation_image(photdir, cluster)

    if JPG_IMAGE:
        ds9(imdir + 'coadd.fits', website + 'cluster.jpg', save=False)
        #ds9(photdir+'segmentation.fits',website+'cluster_seg.jpg',save=True)
        #ds9(photdir+'cluster_mask.fits',website+'cluster_mask.jpg',extra='xpaset -p ds9 scale limits 0 1',save=True)
    ''' start making webpage '''
    mag_page = open(website + 'index.html', 'w', 0)
    mag_page.write(
        '<html><h1>' + cluster +
        ' Magnification</h1>\n<br><img src=cluster.jpg onmouseout="this.src=\'cluster.jpg\';" onmouseover="this.src=\'cluster_seg.jpg\';"></img>\n'
    )
    mag_page.write(
        '<img src=cluster.jpg onmouseout="this.src=\'cluster.jpg\';" onmouseover="this.src=\'cluster_mask.jpg\';"></img><br>\n'
    )
    mag_page.write('<img src=stars.png></img><br>\n')
    mag_page.write('Color-Magnitude Diagram<br><img src=cmd.png></img><br>\n')

    db2 = MySQLdb.connect(db='subaru',
                          user='******',
                          passwd='darkmatter',
                          host='ki-sr01')
    c = db2.cursor()

    if STAR_COLUMN:
        print '''finding star column'''
        width_star, max_star = select_stars(photdir + cluster + '.slr.cat',
                                            cluster, DETECT_FILTER, website)

        commandst = 'update clusters_db set widthstar=' + str(
            width_star) + ' where objname="' + cluster + '"'
        c.execute(commandst)
        commandst = 'update clusters_db set maxstar=' + str(
            max_star) + ' where objname="' + cluster + '"'
        c.execute(commandst)
    else:
        db_keys = describe_db(c, ['clusters_db'])
        c.execute('select * from clusters_db where objname="' + cluster + '"')
        results = c.fetchall()
        line = results[0]
        dict = {}
        for i in range(len(db_keys)):
            dict[db_keys[i]] = str(line[i])
        width_star, max_star = float(dict['widthstar']), float(dict['maxstar'])
        print width_star, max_star

    if ANJA_SEQUENCE:
        print '''reading Anja's red sequence '''
        red_dict = anja_redsequence(cluster, DETECT_FILTER)
        from_stratch = False

    print ''' MERGING PHOTOZ AND PHOTOMETRY CATALOGS '''
    photoz = pyfits.open(photdir + cluster +
                         '.APER1.1.CWWSB_capak.list.all.bpz.tab')
    photometry = pyfits.open(photdir + cluster + '.slr.cat')

    cols = []
    for col in photoz[1].columns:
        cols.append(col)
    for col in photometry[1].columns:
        cols.append(col)
    hdu = pyfits.PrimaryHDU()
    temp1 = pyfits.new_table(cols)
    hdulist = pyfits.HDUList([hdu])
    hdulist.append(temp1)
    #hdulist.writeto('savetab.fits')

    data_all = hdulist[1].data

    print ''' APPLYING STAR CUT '''
    ''' @#$@#$^@$%&#$%& ALTERNATE CUT !!!!! '''
    data_all = data_all[data_all.field('FLUX_RADIUS') > max_star - width_star]
    phot_clus_mask = phot_clus_mask[data_all.field('FLUX_RADIUS') > max_star -
                                    width_star]

    if PLOT_CMD:
        print ''' MAKING CMD PLOT '''
        plot_cmd(data_all,
                 red_dict,
                 website + 'cmd.png',
                 cluster_redshift,
                 title='ALL GALAXIES')

    plot_cc(data_all,
            red_dict,
            website + 'cmd.png',
            cluster_redshift,
            title='ALL GALAXIES')

    if PLOT_CUTS:

        print ''' MAKING CUTS PLOT '''

        plot_var, bins, name = prefix(DETECT_FILTER), scipy.arange(
            21, 28, 0.2), 'lumfnc.png'
        pylab.clf()
        data_save = copy(data_all)
        #data_save = data_save[data_save.field('BPZ_ODDS') > BPZ_CUT]
        #data_save = data_save[(data_save.field('BPZ_Z_B') > cluster_redshift + 0.1)*(data_save.field('BPZ_Z_B') < 3)]

        data_save = data_save[phot_clus_mask]

        latepdf, latebins, patches = pylab.hist(
            data_save.field(plot_var)[data_save.field('BPZ_T_B') >= 3],
            bins=bins,
            histtype='step',
            label='LATE T >= 3')
        earlypdf, earlybins, patches = pylab.hist(
            data_save.field(plot_var)[data_save.field('BPZ_T_B') < 3],
            bins=bins,
            histtype='step',
            label='EARLY T < 3')
        [xmin, xmax, ymin, ymax] = pylab.axis()
        pylab.ylim([ymin, ymax * 2.0])
        pylab.legend()
        pylab.xlabel(plot_var)
        pylab.ylabel('Galaxies')
        pylab.savefig(website + '/' + name)

        mag_page.write('<br>Luminosity Functions<br><img src=' + name +
                       '></img>\n')

        pylab.clf()
        earlysum = 1
        earlylogpdf = []
        for v in earlypdf:
            earlysum += v
            earlylogpdf.append(math.log10(earlysum))

        earlylogpdf = scipy.array(earlylogpdf)

        latesum = 1
        latelogpdf = []
        for v in latepdf:
            latesum += v
            latelogpdf.append(math.log10(latesum))

        latelogpdf = scipy.array(latelogpdf)

        print latepdf, bins, patches, latesum, latelogpdf
        print earlypdf, bins, patches, earlysum, earlylogpdf

        plot_bins = scipy.array(bins[:-1])

        earlymask = (plot_bins > 22.5) * (plot_bins < 25)
        earlycoeffs = scipy.polyfit(plot_bins[earlymask],
                                    earlylogpdf[earlymask], 1)

        latemask = (plot_bins > 22) * (plot_bins < 25)
        latecoeffs = scipy.polyfit(plot_bins[latemask], latelogpdf[latemask],
                                   1)

        earlyline = scipy.polyval(earlycoeffs, plot_bins[earlymask])
        lateline = scipy.polyval(latecoeffs, plot_bins[latemask])

        pylab.plot(plot_bins[earlymask], earlyline, color='k')
        pylab.plot(plot_bins[latemask], lateline, color='k')

        x = plot_bins[earlymask][0]
        y = scipy.polyval(earlycoeffs, [x])[0] + 0.1
        print x, y
        pylab.figtext(0.15,
                      0.8,
                      's= %.2f' % earlycoeffs[0],
                      color='r',
                      size='x-large',
                      ha='left')

        x = plot_bins[latemask][0]
        y = scipy.polyval(latecoeffs, [x])[0] - 0.1
        print x, y
        pylab.figtext(0.15,
                      0.75,
                      's= %.2f' % latecoeffs[0],
                      color='b',
                      size='x-large',
                      ha='left')

        pylab.bar(bins[:-1],
                  earlylogpdf,
                  facecolor='none',
                  edgecolor='r',
                  linewidth=2,
                  width=(bins[1] - bins[0]),
                  label='EARLY T < 3')
        pylab.bar(bins[:-1],
                  latelogpdf,
                  facecolor='none',
                  edgecolor='b',
                  linewidth=2,
                  width=(bins[1] - bins[0]),
                  label='LATE T >= 3')
        pylab.xlabel('Apparent Magnitude')
        pylab.ylabel('log_10(N(>m))')
        pylab.legend(loc=4)

        pylab.savefig(website + '/loglum.png')

        mag_page.write('<br>LogN<br><img src=loglum.png></img>\n')

        for plot_var, bins, name in []:  #['BPZ_Z_B',scipy.arange(0,1.2,0.05),'redshifts.png'],[prefix(DETECT_FILTER),scipy.arange(19,28,0.2),'mags.png']]:

            pylab.clf()
            data_save = copy(data_all)
            pylab.hist(data_save.field(plot_var),
                       bins=bins,
                       histtype='step',
                       label='ALL')
            #data_save = data_save[data_save.field('BPZ_ODDS') > BPZ_CUT]
            data_save = data_save[phot_clus_mask]
            #pylab.hist(data_save.field(plot_var),bins=bins,histtype='step',label='ODDS > 0.3')
            pylab.hist(data_save.field(plot_var),
                       bins=bins,
                       histtype='step',
                       label='NO CLUSTER GALAXIES')
            data_save = data_save[(
                data_save.field('BPZ_Z_B') >
                cluster_redshift + 0.1)]  #*(data_save.field('BPZ_Z_B') < 1.2)]
            pylab.hist(data_save.field(plot_var),
                       bins=bins,
                       histtype='step',
                       label='Z > Z_CLUSTER + 0.1')
            pylab.hist(
                data_save.field(plot_var)[data_save.field('BPZ_T_B') < 3],
                bins=bins,
                histtype='step',
                label='EARLY T < 3')
            pylab.hist(
                data_save.field(plot_var)[data_save.field('BPZ_T_B') >= 3],
                bins=bins,
                histtype='step',
                label='LATE T >= 3')
            [xmin, xmax, ymin, ymax] = pylab.axis()
            pylab.ylim([ymin, ymax * 2.0])
            pylab.legend()
            pylab.xlabel(plot_var)
            pylab.ylabel('Galaxies')
            pylab.savefig(website + '/' + name)

            mag_page.write('<br><img src=' + name + '></img>\n')

    xcen, ycen = 5000, 5000

    if REMAKE_CLUSTER_MASK:
        print '''opening image + segmentation image'''
        image = pyfits.open(imdir + 'coadd.fits')
        os.system('ln -s ' + imdir + 'coadd.fits ' + photdir +
                  'coadd_link.fits')
        segmentation = pyfits.open(photdir + 'segmentation.fits')[0].data

        weight = pyfits.open(imdir + 'coadd.weight.fits')[0].data

        photoz_mask = (data_all.field('BPZ_Z_B') > 0.3) * (
            data_all.field('BPZ_Z_B') <
            1.2) * (data_all.field('BPZ_Z_B') < cluster_redshift + 0.1) * (
                data_all.field('BPZ_Z_B') > cluster_redshift - 0.1)

        diff = (data_all.field(red_dict['c1']) - data_all.field(red_dict['c2'])
                ) - data_all.field(red_dict['m']) * red_dict['slope']
        ''' mask for redsequence '''
        redseq_mask = (diff > red_dict['lower_intercept']) * (
            diff < red_dict['upper_intercept']
        )  # * (data_all.field(red_dict['m']) < float(red_dict['magcut']) )

        print red_dict['magcut']

        flag_mask = data_all.field('Flag') != 0

        mask = scipy.logical_or(photoz_mask, redseq_mask, flag_mask)

        objects_to_mask = data_all[mask]

        IDS_mask, x_mask, y_mask = objects_to_mask.field(
            'SeqNr'), objects_to_mask.field('Xpos'), objects_to_mask.field(
                'Ypos')

        areas = scipy.ones(segmentation.shape)
        areas[weight == 0] = 0.0

        print 'masking'
        for i in range(len(IDS_mask)):
            ID = IDS_mask[i]
            y = x_mask[i]
            x = y_mask[i]

            seg_num = segmentation[x, y]

            #print segmentation.shape
            print max(0,
                      x - 100), min(9999,
                                    x + 100), max(0,
                                                  y - 100), min(9999, y + 100)
            piece = segmentation[max(0, x - 100):min(9999, x + 100),
                                 max(0, y - 100):min(9999, y + 100)]
            #print
            mask = piece == seg_num
            #print mask
            areas[max(0, x - 100):min(9999, x + 100),
                  max(0, y - 100):min(9999, y + 100)][mask] = 0
            print areas[max(0, x - 100):min(9999, x + 100),
                        max(0, y - 100):min(9999, y + 100)], len(IDS_mask)
            print ID

        fitsobj = pyfits.HDUList()
        hdu = pyfits.PrimaryHDU()
        hdu.data = areas
        fitsobj.append(hdu)
        file = photdir + 'cluster_mask.fits'
        os.system('rm ' + file)
        fitsobj.writeto(file)
        print file

        area = areas
    else:
        area = pyfits.open(photdir + 'cluster_mask.fits')[0].data

    print 'making radii'
    x, y = scipy.meshgrid(scipy.arange(area.shape[0]),
                          scipy.arange(area.shape[1]))
    r = ((x - scipy.ones(area.shape) * xcen)**2. +
         (y - scipy.ones(area.shape) * ycen)**2.)**0.5

    bins = scipy.arange(0, 1.2, 0.05)

    dict = {}
    #[data_all.field('BPZ_T_B')<=3,'REDOFRS','green',False]]: #,
    for mask, name, color, photoz_cut in [[
            data_all.field('BPZ_T_B') < 4, 'EARLY', 'red', True
    ], [data_all.field('BPZ_T_B') > 3, 'LATE', 'blue',
            True]]:  #,[data_all.field('BPZ_T_B')>-99,'CONTAM','green',False]]:
        print len(data_all)

        diff = (data_all.field(red_dict['c1']) - data_all.field(red_dict['c2'])
                ) - data_all.field(red_dict['m']) * red_dict['slope']
        redseq_mask = (diff > red_dict['lower_intercept']) * (
            diff < red_dict['upper_intercept']
        )  #* (data_all.field(red_dict['m']) < float(red_dict['magcut']) )

        #mag_mask = (22.5 < data_all.field(prefix(DETECT_FILTER))) *  (25 > data_all.field(prefix(DETECT_FILTER)))
        if photoz_cut:
            #photoz_mask = phot_clus_mask*(data_all.field('BPZ_Z_B') > cluster_redshift + 0.1)*(data_all.field('BPZ_Z_B') < 3)*(mask)*(data_all.field(prefix('W-J-V')) < 25)#*(data_all.field(prefix(DETECT_FILTER)) < 25)

            photoz_mask = phot_clus_mask * (
                data_all.field('BPZ_Z_B') > cluster_redshift + 0.15
            ) * (data_all.field('BPZ_Z_B') < 3) * (
                mask
            )  #*(data_all.field(prefix('W-J-V')) < 25)#*(data_all.field(prefix(DETECT_FILTER)) < 25)
            data = data_all[photoz_mask]  #*(redseq_mask==False)]
        else:
            diff = (data_all.field(red_dict['c1']) -
                    data_all.field(red_dict['c2'])) - data_all.field(
                        red_dict['m']) * red_dict['slope']
            red_of_redseq_mask = scipy.logical_or(
                diff > red_dict['upper_intercept'],
                diff < red_dict['lower_intercept']
            )  #* (data_all.field(red_dict['m']) < float(red_dict['magcut']) )
            data = data_all[(red_of_redseq_mask)]
        #plot_cmd(data,red_dict,website+name.replace(' ','')+'.png',cluster_redshift,title=name,)

        plot_cc(
            data,
            red_dict,
            website + name.replace(' ', '') + '.png',
            cluster_redshift,
            title=name,
        )

        for plot_var, bins, name_plot in [[
                'BPZ_Z_B',
                scipy.arange(0, 1.2, 0.05), 'redshifts.png'
        ], [prefix(DETECT_FILTER),
                scipy.arange(19, 28, 0.2), 'mags.png']]:

            pylab.clf()
            data_save = copy(data)
            pylab.hist(data_save.field(plot_var),
                       bins=bins,
                       histtype='step',
                       label='ALL')
            #data_save = data_save[data_save.field('BPZ_ODDS') > BPZ_CUT]
            #data_save = data_save[phot_clus_mask]
            #pylab.hist(data_save.field(plot_var),bins=bins,histtype='step',label='ODDS > 0.3')
            #pylab.hist(data_save.field(plot_var),bins=bins,histtype='step',label='NO CLUSTER GALAXIES')
            #data_save = data_save[(data_save.field('BPZ_Z_B') > cluster_redshift + 0.1)*(data_save.field('BPZ_Z_B') < 1.2)]
            #pylab.hist(data_save.field(plot_var),bins=bins,histtype='step',label='Z > Z_CLUSTER + 0.1')
            #pylab.hist(data_save.field(plot_var)[data_save.field('BPZ_T_B')<3],bins=bins,histtype='step',label='EARLY T < 3')
            #pylab.hist(data_save.field(plot_var)[data_save.field('BPZ_T_B')>=3],bins=bins,histtype='step',label='LATE T >= 3')
            [xmin, xmax, ymin, ymax] = pylab.axis()
            pylab.ylim([ymin, ymax * 2.0])
            pylab.legend()
            pylab.title(name)
            pylab.xlabel(plot_var)
            pylab.ylabel('Galaxies')
            pylab.savefig(website + '/' + name + name_plot)

            mag_page.write('<br><img src=' + name + name_plot + '></img>\n')

        mag_page.write('<img src=' + name.replace(' ', '') +
                       '.png></img><br>\n')
        radius = ((data.field('Xpos') - xcen)**2. +
                  (data.field('Ypos') - ycen)**2.)**0.5

        densities = []
        densities_error = []
        radii = []
        densities_nosub = []
        objects = []
        areas_list = []

        annuli = zip(scipy.arange(0, 1950, 150), scipy.arange(150, 2100, 150))
        mask = (radius < annuli[-1][1])
        data_inside = data[mask]
        radius_inside = radius[mask]

        for low, high in annuli:  #[[0,150],[150,300],[300,600],[600,1200],[1200,1600],[1600,3000],[3000,4000]]:
            print low, high
            mask_r = (r > low) * (r < high)
            #print mask_r.shape
            #print area
            #print area.shape
            subarea = area[mask_r]
            a = scipy.sum(subarea) * (0.2 / 60)**2.
            area_nosub = math.pi * (high**2. - low**2.) * (0.2 / 60.)**2.
            areas_list.append(area_nosub)
            mask = (radius_inside > low) * (radius_inside < high)
            subset = data_inside[mask]
            print len(subset)
            density = float(len(subset)) / a
            densities.append(density)
            densities_nosub.append(len(subset) / area_nosub)
            densities_error.append(math.sqrt(len(subset)) / a)
            radii.append(scipy.average(radius_inside[mask]) * 0.2 / 60.)
            objects.append(len(subset))

            print radii, densities, len(subset), 'objects'

        plot_regions(data_inside, website)
        dict[color] = {
            'densities': densities,
            'areas': areas_list,
            'objects': objects,
            'densities_nosub': densities_nosub,
            'densities_error': densities_error,
            'radii': radii,
            'name': name
        }

    pylab.clf()
    for key in dict:
        #pylab.errorbar(dict[key]['radii'],dict[key]['objects'],yerr=(dict[key]['objects'])**0.5,fmt=None,ecolor=key)
        pylab.scatter(dict[key]['radii'],
                      dict[key]['objects'],
                      color=key,
                      label=dict[key]['name'])

    pylab.title('Area')
    pylab.xlabel('Radius (Arcmin)')
    pylab.ylabel('Objects')
    x1, x2, y1, y2 = pylab.axis()
    pylab.ylim([0, y2])
    pylab.legend()
    pylab.savefig(website + '/area.png')

    mag_page.write('<b><img src=area.png></img>\n')

    pylab.clf()
    for key in dict:
        #pylab.errorbar(dict[key]['radii'],dict[key]['objects'],yerr=(dict[key]['objects'])**0.5,fmt=None,ecolor=key)
        pylab.scatter(dict[key]['radii'],
                      dict[key]['areas'],
                      color=key,
                      label=dict[key]['name'])

    pylab.title('Number of Objects')
    pylab.xlabel('Radius (Arcmin)')
    pylab.ylabel('Objects')
    x1, x2, y1, y2 = pylab.axis()
    pylab.ylim([0, y2])
    pylab.legend()
    pylab.savefig(website + '/objects.png')

    mag_page.write('<b><img src=objects.png></img>\n')

    pylab.clf()
    for key in dict:
        pylab.errorbar(dict[key]['radii'],
                       dict[key]['densities'],
                       yerr=dict[key]['densities_error'],
                       fmt=None,
                       ecolor=key)
        pylab.scatter(dict[key]['radii'],
                      dict[key]['densities'],
                      color=key,
                      label=dict[key]['name'])

    pylab.title('Objects Subtracted')
    pylab.xlabel('Radius (Arcmin)')
    pylab.ylabel('Object Density (Objects/Arcmin^2)')
    x1, x2, y1, y2 = pylab.axis()
    pylab.ylim([0, y2])
    pylab.legend()
    pylab.savefig(website + '/sub.png')

    mag_page.write('<b><img src=sub.png></img>\n')

    pylab.clf()
    for key in dict:
        pylab.errorbar(dict[key]['radii'],
                       dict[key]['densities_nosub'],
                       yerr=dict[key]['densities_error'],
                       fmt=None,
                       ecolor=key)
        pylab.scatter(dict[key]['radii'],
                      dict[key]['densities_nosub'],
                      color=key,
                      label=dict[key]['name'])
    pylab.title('Full Annuli')
    pylab.xlabel('Radius (Arcmin)')
    pylab.ylabel('Object Density (Objects/Arcmin^2)')
    x1, x2, y1, y2 = pylab.axis()
    pylab.ylim([0, y2])
    pylab.legend()
    pylab.savefig(website + 'full.png')

    mag_page.write('<b><img src=full.png></img>\n')

    reg = open(imdir + 'all.reg', 'w')
    reg.write(
        'global color=green font="helvetica 10 normal" select=1 highlite=1 edit=1 move=1 delete=1 include=1 fixed=0 source\nphysical\n'
    )
    for i in range(len(data.field('Xpos'))):
        reg.write('circle(' + str(data.field('Xpos')[i]) + ',' +
                  str(data.field('Ypos')[i]) + ',' + str(5) +
                  ') # color=red width=2 text={' +
                  str(data.field('BPZ_Z_B')[i]) + '}\n')
    reg.close()
示例#41
0
文件: cf.py 项目: philippedavid/picca
def fill_dmat(l1, l2, r1, r2, z1, z2, w1, w2, ang, wdm, dm, rpeff, rteff, zeff,
              weff, same_half_plate, order1, order2):

    rp = (r1[:, None] - r2) * sp.cos(ang / 2)
    if not x_correlation:
        rp = abs(rp)
    rt = (r1[:, None] + r2) * sp.sin(ang / 2)
    z = (z1[:, None] + z2) / 2.

    w = (rp < rp_max) & (rt < rt_max) & (rp >= rp_min)

    bp = sp.floor((rp - rp_min) / (rp_max - rp_min) * np).astype(int)
    bt = (rt / rt_max * nt).astype(int)
    bins = bt + nt * bp
    bins = bins[w]

    m_bp = sp.floor((rp - rp_min) / (rp_max - rp_min) * npm).astype(int)
    m_bt = (rt / rt_max * ntm).astype(int)
    m_bins = m_bt + ntm * m_bp
    m_bins = m_bins[w]

    sw1 = w1.sum()
    sw2 = w2.sum()

    ml1 = sp.average(l1, weights=w1)
    ml2 = sp.average(l2, weights=w2)

    dl1 = l1 - ml1
    dl2 = l2 - ml2

    slw1 = (w1 * dl1**2).sum()
    slw2 = (w2 * dl2**2).sum()

    n1 = len(l1)
    n2 = len(l2)
    ij = sp.arange(n1)[:, None] + n1 * sp.arange(n2)
    ij = ij[w]

    we = w1[:, None] * w2
    we = we[w]

    if remove_same_half_plate_close_pairs and same_half_plate:
        wsame = abs(rp[w]) < (rp_max - rp_min) / np
        we[wsame] = 0.

    c = sp.bincount(m_bins, weights=we * rp[w])
    rpeff[:c.size] += c
    c = sp.bincount(m_bins, weights=we * rt[w])
    rteff[:c.size] += c
    c = sp.bincount(m_bins, weights=we * z[w])
    zeff[:c.size] += c
    c = sp.bincount(m_bins, weights=we)
    weff[:c.size] += c

    c = sp.bincount(bins, weights=we)
    wdm[:len(c)] += c
    eta1 = sp.zeros(npm * ntm * n1)
    eta2 = sp.zeros(npm * ntm * n2)
    eta3 = sp.zeros(npm * ntm * n1)
    eta4 = sp.zeros(npm * ntm * n2)
    eta5 = sp.zeros(npm * ntm)
    eta6 = sp.zeros(npm * ntm)
    eta7 = sp.zeros(npm * ntm)
    eta8 = sp.zeros(npm * ntm)

    c = sp.bincount(ij % n1 + n1 * m_bins,
                    weights=(sp.ones(n1)[:, None] * w2)[w] / sw2)
    eta1[:len(c)] += c
    c = sp.bincount((ij - ij % n1) // n1 + n2 * m_bins,
                    weights=(w1[:, None] * sp.ones(n2))[w] / sw1)
    eta2[:len(c)] += c
    c = sp.bincount(m_bins, weights=(w1[:, None] * w2)[w] / sw1 / sw2)
    eta5[:len(c)] += c

    if order2 == 1:
        c = sp.bincount(ij % n1 + n1 * m_bins,
                        weights=(sp.ones(n1)[:, None] * w2 * dl2)[w] / slw2)
        eta3[:len(c)] += c
        c = sp.bincount(m_bins,
                        weights=(w1[:, None] * (w2 * dl2))[w] / sw1 / slw2)
        eta6[:len(c)] += c
    if order1 == 1:
        c = sp.bincount((ij - ij % n1) // n1 + n2 * m_bins,
                        weights=((w1 * dl1)[:, None] * sp.ones(n2))[w] / slw1)
        eta4[:len(c)] += c
        c = sp.bincount(m_bins,
                        weights=((w1 * dl1)[:, None] * w2)[w] / slw1 / sw2)
        eta7[:len(c)] += c
        if order2 == 1:
            c = sp.bincount(m_bins,
                            weights=((w1 * dl1)[:, None] * (w2 * dl2))[w] /
                            slw1 / slw2)
            eta8[:len(c)] += c

    ubb = sp.unique(m_bins)
    for k, (ba, m_ba) in enumerate(zip(bins, m_bins)):
        dm[m_ba + npm * ntm * ba] += we[k]
        i = ij[k] % n1
        j = (ij[k] - i) // n1
        for bb in ubb:
            dm[bb+npm*ntm*ba] += we[k]*(eta5[bb]+eta6[bb]*dl2[j]+eta7[bb]*dl1[i]+eta8[bb]*dl1[i]*dl2[j])\
             - we[k]*(eta1[i+n1*bb]+eta3[i+n1*bb]*dl2[j]+eta2[j+n2*bb]+eta4[j+n2*bb]*dl1[i])
def parabola_guess(xs,ys):
    return {"a":1, "B":scipy.average(ys),"x0":(xs.max()+xs.min())/2.0}
示例#43
0
 def calc_avg(self):
     f_start, f_stop = self.Main.Options.preprocessing['avg_frames']
     sp.average(self.Main.Data.raw[:, :, f_start:f_stop, :], axis=2)
示例#44
0
文件: tune_bpr.py 项目: blanarik/NIC
    perf_ndcg_at_100 = []
    rec_list = []

    print(' >> took ', str(timedelta(seconds=time.time() - aux_time)))
    print(str(timedelta(seconds=time.time() - start)), ' -- config #',
          len(performance_list) + 1, ' >> evaluation starting...')
    aux_time = time.time()

    with Pool(NUM_THREADS) as p:
        perf_ndcg_at_100 = p.map(
            paralelize_ndcg,
            set(train[train.user_eval_set == 'val'].user_id.values))

    performance_list.append({
        'performance': scipy.average(perf_ndcg_at_100),
        'factors': factors,
        'learning_rate': learning_rate,
        'regularization': regularization,
        'iterations': iterations
    })
    performance_val_users = pd.DataFrame(performance_list)
    performance_val_users.to_csv('../data/fstore/base_tuning/bpr.csv',
                                 index=False)

    print(' >> took ', str(timedelta(seconds=time.time() - aux_time)))
    print(str(timedelta(seconds=time.time() - start)), ' -- config #',
          len(performance_list) + 1, ' >> results saved...')

    # generate recommendations for test users if this model is best so far
    # storing models led to Memory Error
示例#45
0
def to_grayscale(arr):
    "If arr is a color image (3D array), convert it to grayscale (2D array)."
    if len(arr.shape) == 3:
        return average(arr, -1)  # average over the last axis (color channels)
    else:
        return arr
示例#46
0
def plot_the_two_sample_t_test():
    # First, delete any existing normal-curve and mean plots from the figure
    global handle_of_group_A_conf_int
    global handle_of_group_B_conf_int
    global handle_of_group_A_mean
    global handle_of_group_B_mean
    pylab.setp(handle_of_group_A_conf_int, visible=False)
    pylab.setp(handle_of_group_B_conf_int, visible=False)
    pylab.setp(handle_of_group_A_mean, visible=False)
    pylab.setp(handle_of_group_B_mean, visible=False)
    #### Next, calculate and plot the stats
    #### We do this separately for Group A and Group B
    #### The groupes are defined by whether their x-coord is > 0 or < 0
    group_A_points = scipy.nonzero(coords_array[:, 0] < 0)
    group_B_points = scipy.nonzero(coords_array[:, 0] >= 0)
    number_of_points_A = scipy.shape(group_A_points)[1]
    y_coords_A = coords_array[
        group_A_points,
        1]  # y-coords are the second column, which is 1 in Python
    if number_of_points_A > 0:
        y_mean_A = scipy.average(y_coords_A)
        y_std_A = scipy.std(y_coords_A)
        y_ste_A = y_std_A / scipy.sqrt(
            number_of_points_A)  # ste stands for Standard Error of the Mean
    if number_of_points_A > 1:
        df_A = number_of_points_A - 1  # df stands for "degrees of freedom"
        t_crit_A = scipy.stats.t.ppf(
            0.975, df_A)  # 95% critical value of t, two-tailed
        confidence_interval_A = y_mean_A + t_crit_A * y_ste_A * scipy.array(
            [-1, 1])
        ### Now plot the mean and confidence interval for this group
        ### For more explanation of what the confidence interval means,
        ### see the accompanying script interactive_one_sample_t_test.py
        handle_of_group_A_mean = pylab.plot(-axis_x_range * 0.5,
                                            y_mean_A,
                                            'co',
                                            markersize=10)
        handle_of_group_A_conf_int = pylab.plot(
            [-axis_x_range * 0.5, -axis_x_range * 0.5],
            confidence_interval_A,
            'r-',
            linewidth=3)
    #### Now do the same for Group B
    number_of_points_B = scipy.shape(group_B_points)[1]
    y_coords_B = coords_array[
        group_B_points,
        1]  # y-coords are the second column, which is 1 in Python
    if number_of_points_B > 0:
        y_mean_B = scipy.average(y_coords_B)
        y_std_B = scipy.std(y_coords_B)
        y_ste_B = y_std_B / scipy.sqrt(
            number_of_points_B)  # ste stands for Standard Error of the Mean
    if number_of_points_B > 1:
        df_B = number_of_points_B - 1  # df stands for "degrees of freedom"
        t_crit_B = scipy.stats.t.ppf(
            0.975, df_B)  # 95% critical value of t, two-tailed
        confidence_interval_B = y_mean_B + t_crit_B * y_ste_B * scipy.array(
            [-1, 1])
        ### Now plot the mean and confidence interval for this group
        handle_of_group_B_mean = pylab.plot(axis_x_range * 0.5,
                                            y_mean_B,
                                            'ko',
                                            markersize=10)
        handle_of_group_B_conf_int = pylab.plot(
            [axis_x_range * 0.5, axis_x_range * 0.5],
            confidence_interval_B,
            'r-',
            linewidth=3)
    #### Next: if at least one of the classes has two or more points,
    #### and neither of the classes is empty, then we will perform
    #### the two-sample t-test and display the results.
    if ((scipy.maximum(number_of_points_A, number_of_points_B) > 1) &
        (scipy.minimum(number_of_points_A, number_of_points_B) > 0)):
        #### SciPy refers to its two-sample t-test as ttest_ind,
        #### meaning that this is the test two use for two independent groups.
        #### It requires that the inputs be column vectors, so we need to flip them
        y_coords_A_column_vec = y_coords_A.reshape(-1, 1)
        y_coords_B_column_vec = y_coords_B.reshape(-1, 1)
        [t_value_AvsB,
         p_value_AvsB] = scipy.stats.ttest_ind(y_coords_A_column_vec,
                                               y_coords_B_column_vec)
        #### In order to make the p-values format nicely
        #### even when they have a bunch of zeros at the start, we do this:
        p_value_string = "%1.2g" % p_value_AvsB
        pylab.xlabel(
            'Mean-A=' +
            str(round(y_mean_A, 2)) +  # The ',2' means show 2 decimal places
            ', Mean-B=' + str(round(y_mean_B, 2)) + ', t=' +
            str(round(t_value_AvsB, 2)) + ', p= ' + p_value_string)
        pylab.ylabel('n(A)=' + str(number_of_points_A) + ', n(B)=' +
                     str(number_of_points_B) + ', df=' +
                     str(number_of_points_A + number_of_points_B - 2))
        # Note: this df=n(A)+n(B)-2 formula is for the simple case
        # where the two-sample t-test assumes that the two groups
        # have equal variance, which is what we are doing here.
    # Set the axis back to its original value, in case Python has changed it during plotting
    pylab.axis(
        [-axis_x_range, axis_x_range, axis_y_lower_lim, axis_y_upper_lim])
示例#47
0
 def _forwardImplementation(self, inbuf, outbuf):
     outbuf += average(inbuf) * self.params
示例#48
0
    def update(self):
        """ former set image data."""
        """ 
        has to check: -is average? -is dFF? -flag to show? -only one?
       
        average behaviour: take all that are active, average and overlay
        
        dFF behaviour: if multiple channels are active, the dFF are over
        layed and colored according to their channel

        if only one channel is active:        
        raw is in grayscale, dFF is in glow color map
        """
        ### for implementation of global lut mod
        #        current_lut = self.LUTwidgets.currentIndex()

        # work only on those that are active
        for n in range(self.data.nFiles):
            if self.Options.view['show_flags'][n] == False:  # hide inactive
                self.ImageItems[n].hide()
                self.ImageItems_dFF[n].hide()

            if self.Options.view['show_flags'][
                    n] == True:  # work only on those that are active

                if self.Options.view['show_dFF']:  # when showing dFF

                    if self.Options.view[
                            'show_monochrome']:  # when in mono glow mode
                        self.ImageItems[n].show()
                    else:
                        self.ImageItems[n].hide()

                    if self.Options.view['show_avg']:  # when showing avg
                        self.ImageItems_dFF[n].setImage(
                            sp.average(self.data.dFF[:, :, :, n], axis=2))
                        self.ImageItems[n].setImage(
                            sp.average(self.data.raw[:, :, :, n], axis=2))

                    else:
                        self.ImageItems_dFF[n].setImage(
                            self.data.dFF[:, :, self.frame, n])
                        self.ImageItems[n].setImage(self.data.raw[:, :,
                                                                  self.frame,
                                                                  n])

                    self.ImageItems_dFF[n].show()

                else:  # when showing raw
                    self.ImageItems_dFF[n].hide()  # no dFF
                    if self.Options.view['show_avg']:
                        self.ImageItems[n].setImage(
                            sp.average(self.data.raw[:, :, :, n], axis=2))
                    else:
                        self.ImageItems[n].setImage(self.data.raw[:, :,
                                                                  self.frame,
                                                                  n])

                    self.ImageItems[n].show()

                self.ImageItems[n].setLevels(
                    self.Data_Display.LUT_Controlers.raw_levels[n])
                self.ImageItems_dFF[n].setLevels(
                    self.Data_Display.LUT_Controlers.dFF_levels[n])
        pass
示例#49
0
文件: cnn.py 项目: hieple9/volcano
                final_predict = np.array(final_predict).reshape(
                    len(final_predict), 1)
                print("0.5 threshold")
                classified = [utils.get_class(x, 0.5) for x in final_predict]
                f, _ = utils.get_score_and_confusion_matrix(labels, classified)
                print("Best threshold")
                best_threshold = utils.find_best_threshold(
                    labels, final_predict)
                best_classified = [
                    utils.get_class(x, best_threshold) for x in final_predict
                ]
                best_f, _ = utils.get_score_and_confusion_matrix(
                    labels, best_classified)

                if which_data == "training":
                    train_score.append(round(sp.average(best_f) * 100, 2))
                else:
                    validation_score.append(round(sp.average(best_f) * 100, 2))

                    # classified = [utils.get_class(x, best_threshold) for x in final_predict]
                    # # Check in all the cases we correctly predict label is explosive 1, how many have the current labels 1
                    # current_check = []
                    # current_labels = reader.current_labels.values
                    # for index, value in enumerate(classified):
                    #     if value + labels[index] == 2:  # correctly recognition 1
                    #         current = current_labels[index]
                    #         if current >= 1:
                    #             current_check.append(1)
                    #         else:
                    #             current_check.append(0)
                    # print("current_check", len(current_check))
示例#50
0
    def fit_init(self, x, y):
        """This  function computes  the  empirical  estimators of  the  mean
        vector,  the convariance  matrix  and the  proportion of  each
        class.
        :param x: The sample matrix, is of size x \times d where n is the number of samples and d is the number of variables
        :param y: The vector of corresponding labels, is of size n \times 1 in the supervised case and n \times C in the unsupervised case
        :type x: float
        :type y: int
        """
        ## Get information from the data
        n, d = x.shape  # Number of samples and number of variables
        if y.ndim == 1:  # Number of classes
            C = int(y.max(0))
        else:
            C = y.shape[1]

        if n != y.shape[0]:
            print("size of x and y should match")
            exit()

        ## Compute constant
        self.cst = d * sp.log(2 * sp.pi)

        ## Compute the whole covariance matrix
        if self.model in ('M2', 'M4', 'M6', 'M8'):
            X = (x - sp.mean(x, axis=0))
            if n >= d:  # Here use dsyrk to take benefit of the product symmetric matrices X^{t}X or XX^{t}
                self.W = dsyrk(
                    1.0 / n, X.T,
                    trans=False)  # Transpose to put in fortran order
            else:
                self.W = dsyrk(1.0 / n, X.T,
                               trans=True)  # Transpose to put in fortran order
            X = None

        ## Learn the empirical of the model for each class
        for c in xrange(C):
            if y.ndim == 1:  # Supervised case
                j = sp.where(y == (c + 1))[0]
                self.ni.append(j.size)
                self.prop.append(float(self.ni[c]) / n)
                self.mean.append(sp.mean(x[j, :], axis=0))
                X = (x[j, :] - self.mean[c])

            else:  # Unsupervised case
                self.ni.append(y[:, c].sum())
                self.prop.append(float(self.ni[c]) / n)
                self.mean.append(sp.average(x, weights=y[:, c], axis=0))
                X = (x - self.mean[c]) * sp.sqrt(y[:, c]).reshape(n, 1)

            if n >= d:  # Here use dsyrk to take benefit of the product of symmetric matrices X^{t}X or XX^{t}
                cov = dsyrk(1.0 / (self.ni[c] - 1), X.T,
                            trans=False)  # Transpose to put in fortran order
            else:
                cov = dsyrk(1.0 / (self.ni[c] - 1), X.T,
                            trans=True)  # Transpose to put in fortran order
                self.X.append(X)

            X = None
            L, Q = linalg.eigh(
                cov, lower=False
            )  # Only the upper part of cov is initialize -> dsyrk
            idx = L.argsort()[::-1]
            L, Q = L[idx], Q[:, idx]
            L[L < EPS] = EPS  # Chek for numerical errors
            self.L.append(L)
            self.Q.append(Q)
            self.trace.append(cov.trace())
示例#51
0
    def _run(self):

        prev_peak_hit = time() * 1000
        is_boi_active = False
        # Run continuously
        while self.running:
            config = self.config
            ## Part 1: Sample Audio ##

            # cur_time = time.time()
            # Get audio sample
            buf = self.stream.read(config.BUFFER_SIZE)
            data = array(struct.unpack("%dh" % (config.BUFFER_SIZE), buf))

            ## Part 2: Perform FFT and Filtering ##

            # Filter incoming data
            # data = signal.lfilter(b,a,data)

            # Generate FFT
            freqs, y = get_fft(data, config.BUFFER_SIZE, config.SAMPLE_RATE)

            # Average the samples
            # y = smoothMemory(y, 5)

            # Normalize
            y = y / 5

            DIVIDE_BY = config.MAX_CHANNEL_NO // config.CHANNEL_RANGE
            # Average into chunks of N
            yy = [
                average(y[n:int(n + DIVIDE_BY)])
                for n in range(0, len(y), DIVIDE_BY)
            ]
            # Discard half of the samples, as they are mirrored
            yy = yy[:len(yy) // 2]

            # Loudness detection
            channels_sum = sum(
                yy[config.CHANNEL_RANGE_START:config.CHANNEL_RANGE_END]) / (
                    config.CHANNEL_RANGE_END - config.CHANNEL_RANGE_START)
            loudness = thresh(channels_sum * config.GAIN, config.THRESHOLD)
            loudness = limit(loudness, 0.0, 1.0)

            # Brightness modulation
            brightness = config.MIN_BRIGHTNESS + \
                (1. - config.MIN_BRIGHTNESS) * loudness
            brightness = limit(brightness, 0.0, 1.0)

            current_time = time() * 1000
            can_hit = current_time - prev_peak_hit >= config.PEAK_TIME_MARGIN

            # Noisiness meter
            if self.falling:
                if config.AUTO_MODULATE:
                    self.noisiness -= config.DECAY
                elif loudness > config.JUMP_THRESHOLD and can_hit:
                    # self.noisiness -= loudness * config.DECAY
                    self.noisiness -= config.HUE_OFFSET + \
                        (random() * config.HUE_OFFSET)
            else:
                if config.AUTO_MODULATE:
                    self.noisiness += config.ATTACK
                elif loudness > config.JUMP_THRESHOLD and can_hit:
                    # self.noisiness += loudness * config.ATTACK
                    self.noisiness += config.HUE_OFFSET + \
                        (random() * config.HUE_OFFSET)

            if not is_boi_active:
                config.is_boi_active = loudness > config.BOI_THRESHOLD
                is_boi_active = config.is_boi_active

            if can_hit:
                prev_peak_hit = current_time
                is_boi_active = False

            self.noisiness = limit(self.noisiness, 0.0, 1.0)

            # Hue modulation (power relationship)
            # mapping = (10 ** limit(noisiness, 0.0, 1.0)) / 10.0
            # mapping = mapping * 1.1 - 0.11

            # Linear mapping
            # mapping = limit(self.noisiness, 0.0, 1.0)

            hue = mapval(self.noisiness, 0.0, 1.0, config.MIN_HUE,
                         config.MAX_HUE)

            if self.noisiness > 0.99:
                self.falling = True
            elif self.noisiness < 0.01:
                self.falling = False

            # Display colour
            red, green, blue = hsv2rgb(hue, 1.0, brightness)

            if config.DISPLAY_BARS:
                # Debug information
                labels = list(yy)
                bars = list(yy)
                labels.extend([
                    '-', 'loud', 'brght', 'noise', '-', 'hue', 'red', 'grn',
                    'blue'
                ])
                bars.extend([
                    0, loudness, brightness, self.noisiness, 0, hue / 360.0,
                    red / 255.0, green / 255.0, blue / 255.0
                ])

                update_bars(labels, bars)

            config.current_led_color = (red, green, blue)

            colors = {
                "boi": str(config.is_boi_active),
                "red": red,
                "green": green,
                "blue": blue
            }

            self.transmitter.send(colors, config.CLIENT)
示例#52
0
 def toGrayscale(self, img):
     # if image array in 3D change to 2D
     if len(img.shape) == 3:
         return average(img, -1)
     else:
         return img
示例#53
0
def to_grayscale_array(arr):
    """Convert an image to grayscale for comparison."""
    if len(arr.shape) == 3:
        return average(arr, -1)  # average over the last axis (color channel)
    return arr
示例#54
0
# select the last rank where model error does not increase by more than 1 SD

# getting the error distribution (Rss)
k = 5
ranks = list(range(2,10))
Rsss_lm, Rsss_rrr = xval_rank(Y, X, lam, ranks, k)

#r select
ix = np.argmax(np.average(Rsss_rrr, axis=0) < np.average(Rsss_lm) + np.std(Rsss_lm))
r = ranks[ix]

# %% inspect ranks - this will be an important inspection step
fig, axes = plt.subplots(figsize=[3,3])
axes.plot(ranks ,np.average(Rsss_rrr, axis=0), '.', color='k')
for i,r in enumerate(ranks):
    upper = sp.average(Rsss_rrr[:,i]) + sp.std(Rsss_rrr[:,i])
    lower = sp.average(Rsss_rrr[:,i]) - sp.std(Rsss_rrr[:,i])
    axes.plot([r,r],[lower, upper], alpha=0.5,color='k')

axes.axhline(np.average(Rsss_lm),linestyle='--',color='k')
axes.axhline(np.average(Rsss_lm) + np.std(Rsss_lm),linestyle=':',color='k')
# axes.axhline(np.average(Rsss_lm) - np.std(Rsss_lm),linestyle=':',color='k')
axes.set_xticks(ranks)
axes.set_xlabel('rank')
axes.set_ylabel('model error Rss')
axes.set_title('rank estimation')
fig.tight_layout()


# %% cheat
nLags = 200
示例#55
0
def to_grayscale(arr):
    "If arr is a color image (3D array), convert it to grayscale (2D array)."
    if len(arr.shape) == 3:
        return average(arr, -1)  # среднее по последней оси (цветные каналы)
    else:
        return arr
示例#56
0
def bayes_single(data, v):
    sigma = sp.var(data)
    mu = sp.average(data)
    return sp.e**(-1 * (v - mu)**2 / (2 * sigma)) / np.sqrt(2 * sp.pi * sigma)
示例#57
0
def smoothMemory(ffty, degree=3):
    global ffts
    ffts = ffts + [ffty]
    if len(ffts) <= degree: return ffty
    ffts = ffts[1:]
    return scipy.average(scipy.array(ffts), 0)
示例#58
0
def get_r2(x, y, ycalc):
    ymean = scipy.average(y)
    dymean2 = (y - ymean)**2
    dycalc2 = (y - ycalc)**2
    r2 = 1 - sum(dycalc2) / sum(dymean2)
    return r2
def linear_guess(xs,ys):
    return {"m":(ys.max()-ys.min())/(xs.max()-xs.min()), "c":scipy.average(ys)}
示例#60
0
def get_filters(flist = [['USDSS','SDSS-u.res'],['GSDSS','SDSS-g.res'],['RSDSS','SDSS-r.res'],['ISDSS','SDSS-i.res'],['ZSDSS','SDSS-z.res']]):

    filt_dir = os.environ['BIGMACS'] + '/FILTERS/'

    #flist = [{'mag':'USDSS','filter':'SDSS-u.res'},{'mag':'GSDSS','filter':'SDSS-g.res'},{'mag':'RSDSS','filter':'SDSS-r.res'},{'mag':'ISDSS','filter':'SDSS-i.res'},{'mag':'ZSDSS','filter':'SDSS-z.res'}]

    filters = []
    for filt_name, filt_file in flist:
        file = filt_dir + filt_file
        filt = scipy.loadtxt(file)
        step = filt[1,0] - filt[0,0]
        if filt[0,0] > filt[-1,0]:
            filt_list = filt.tolist()
            filt_list.reverse()
            filt = scipy.array(filt_list)

        filterSpline = scipy.interpolate.interp1d(filt[:,0], filt[:,1], 
                                       bounds_error = False, 
                                       fill_value = 0.)
        filters.append({'wavelength':filt[:,0],'response':filt[:,1],'spline':copy(filterSpline),'step':copy(step),'name':copy(filt_name),'center wavelength': scipy.average(filt[:,0],weights=filt[:,1])})

    return filters