Exemple #1
0
def test():

    filepath = '../pygview_test_data/se_ne5np4_test.cam.h0.0000-01-01-00000.nc'

    dslice = dataslice( filepath, 'T', time_ndx=0, level_ndx=8 )

    print '         filepath : '+filepath
    print 'dslice.data.shape : ',dslice.data.shape
    print 'dslice.structured : ',dslice.structured
    print '     dslice.units : ',dslice.units
    print '  dslice.lon.size : ',dslice.lon.size
    print '  dslice.lat.size : ',dslice.lat.size
    print '  dslice.lon : ',dslice.lon
    print '  dslice.lat : ',dslice.lat

    print '     dslice.lon.min, dslice.lon.max   : ',dslice.lon.min(), dslice.lon.max()

    print '     dslice.lon.min, dslice.lon.max   : ',numpy.amin(dslice.lon), numpy.amax(dslice.lon)
    print '     dslice.lat.min, dslice.lat.max   : ',numpy.amin(dslice.lat), numpy.amax(dslice.lat)
    print '     dslice.data.min,dslice.data.max  : ',numpy.amin(dslice.data), numpy.amax(dslice.data)

    filepath = '../pygview_test_data/fv_10x15_test.cam.h0.0000-01-01-00000.nc'

    dslice = dataslice( filepath, 'V', time_ndx=0, level_ndx=18 )
    
    print '         filepath : '+filepath
    print 'dslice.data.shape : ',dslice.data.shape
    print 'dslice.structured : ',dslice.structured
    print '     dslice.units : ',dslice.units
    print '     dslice.lon   : ',dslice.lon
    print '     dslice.lat   : ',dslice.lat
    print '     dslice.data  : ',dslice.data.min(),dslice.data.max()
Exemple #2
0
def predictSoftmax(theta,data,label, numClasses,inputSize):

    theta = theta.reshape(numClasses,inputSize+1)
    y = np.zeros((data.shape[0], numClasses))

    for i in range(len(label)):
        k = np.zeros(numClasses)
        k[label[i,0]] = 1
        y[i] = (y[i] + k).astype(int)

    theta_1 =  np.dot(data, theta.T)
    theta_1 = theta_1 - np.amax(theta_1, axis = 1).reshape(data.shape[0],1)
    prob = np.exp(theta_1) # 10000*724 * 724*10 = 10000*10
    sum_prob = np.sum(prob, axis = 1).reshape(data.shape[0],1) # 10000*1
    prob = prob/sum_prob  #10000*10

    predict = prob/np.amax(prob, axis = 1).reshape(data.shape[0],1)

    predict = (predict == 1.0 ).astype(float)

    k = 0

    for i in range(len(label)):
        if np.array_equal(predict[i,:],y[i,:]):
            k = k+1

    correctness = k/(len(label))
    return correctness
def test_more_known_parametrization_together():
    R = 1
    P = 1
    toll = 7.e-3
    intervals = 5
    vs_order = 2
    n = (intervals*(vs_order)+1-1)

    #n = 18
    ii = np.linspace(0,1,n+1)
    n_1 = 2
    n_2 = 4
    control_points_3d = np.asarray(np.zeros([n+1,n_1,n_2,3]))#[np.array([R*np.cos(5*i * np.pi / (n + 1)), R*np.sin(5*i * np.pi / (n + 1)), P * i]) for i in range(0, n+1)]
    for k in range(n_1):
        for j in range(n_2):
            control_points_3d[:,k,j,0] = np.array([R*np.cos(5*i * np.pi / (n + 1))for i in ii])
            control_points_3d[:,k,j,1] = np.array([R*np.sin(5*i * np.pi / (n + 1))for i in ii])
            control_points_3d[:,k,j,2] = np.array([(k+j+1)*P*i for i in range(n+1)])
    #vsl = IteratedVectorSpace(UniformLagrangeVectorSpace(vs_order+1), np.linspace(0,1,intervals+1))
    vsl = AffineVectorSpace(UniformLagrangeVectorSpace(n+1),0,1)
    arky = ArcLengthParametrizer(vsl, control_points_3d)
    new_control_points_3d = arky.reparametrize()

    #print control_points_3d.shape, new_control_points_3d.shape
    tt = np.linspace(0,1,128)
    for k in range(n_1):
        for j in range(n_2):
            vals = vsl.element(control_points_3d)(tt)
            new_vals = vsl.element(new_control_points_3d)(tt)
            print (np.amax(np.abs(vals-new_vals))/(k+j+1)/P, (k+j+1))
            assert np.amax(np.abs(vals-new_vals))/(k+j+1)/P < toll
def get_peaks_cf(data, win_size):
    """
    data: audio as numpy array to be analyzed
    win_size: value in samples to create the blocks for analysis
    
    Used in calc_crest_factor, this function returns an array of peak levels
    for each window.

    return: array of peak audio levels
    """
    if len(data) == 2:
        # Seperate left and right channels
        data_l = data[0,:]               
        data_r = data[1,:]

        # Buffer up the data
        data_matrix_l = librosa.util.frame(data_l, win_size, win_size)
        data_matrix_r = librosa.util.frame(data_r, win_size, win_size)

        # Get peaks for left and right channels
        peaks_l = np.amax(np.absolute(data_matrix_l), axis=0)
        peaks_r = np.amax(np.absolute(data_matrix_r), axis=0)
        return np.maximum(peaks_l, peaks_r)

    else:
        data_matrix = librosa.util.frame(data, win_size, win_size)
        return np.amax(np.absolute(data_matrix), axis=0)
Exemple #5
0
def Seuil_var(img):
    """
    This fonction compute threshold value. In first the image's histogram is calculated. The threshold value is set to the first indexe of histogram wich respect the following criterion : DH > 0, DH(i)/H(i) > 0.1 , H(i) < 0.01 % of the Norm. 

    In : img : ipl Image : image to treated
    Out: seuil : Int : Value of the threshold 
    """
    dim=255
    MaxValue=np.amax(np.asarray(img[:]))
    Norm = np.asarray(img[:]).shape[0]*np.asarray(img[:]).shape[1]
    scale=MaxValue/dim
    Wdim=dim*scale
    MaxValue=np.amax(np.asarray(img[:]))
    bins= [float(x) for x in range(dim)]
    hist,bin_edges = np.histogram(np.asarray(img[:]), bins)
    Norm = Norm -hist[0]
    median=np.median(hist)
    mean=0
    var=0
    i=1
    som = 0
    while (som < 0.8*Norm and i <len(hist)-1):
      som = som + hist[i]
      i=i+1
    while ((hist[i]-hist[i-1] < 0 or (hist[i]-hist[i-1])/hist[i-1]>0.1 or hist[i]> 0.01*Norm ) and i < len(hist)-1):
      i=i+1
    if( i == len(hist)-1):
      seuil=0
      

    seuil = i
    var = 0
    return seuil
Exemple #6
0
 def set_data(self, zname, zdata, zcolor):
     if zdata!=None:
         if self.overall_plot_type=="polygon":
            if zname not in self.clts: #plottables['plotted']:#self.pd.list_data():
                clt=PolyCollection(zdata, alpha=0.5, antialiased=True)#, rasterized=False, antialiased=False)
                clt.set_color(colorConverter.to_rgba(zcolor))                
                self.clts[zname]=clt
                self.axe.add_collection(self.clts[zname], autolim=True)
            else:                
                self.clts[zname].set_verts(zdata)
         if self.overall_plot_type=="XY":
             if zname not in self.clts:
                 clt = LineCollection(zdata)#, offsets=offs)
                 clt.set_color(colors)
                 #print dir(clt)
                 self.clts[zname]=clt
                 self.axe.add_collection(self.clts[zname], autolim=True)
                 self.axe.autoscale_view()
             else:
                 self.clts[zname].set_segments(zdata)
         if self.overall_plot_type=="img":
             if zname not in self.clts:
                 axeimg=self.axe.imshow( Magvec, 
                                        vmin=amin(Magvec),
                                        vmax=0.001, #amax(Magvec), 
                                        aspect="auto", origin="lower",
                                 extent=[amin(yoko),amax(yoko), amin(freq),amax(freq)],
                                 #cmap='RdBu'
                                 )
                 self.fig.colorbar(axeimg)
def test_known_parametrization():
    R = 1
    P = 1
    toll = 2.e-3

    n = 10
    ii = np.linspace(0,1,n+1)
    control_points_3d = np.asarray(np.zeros([n+1,3]))#[np.array([R*np.cos(5*i * np.pi / (n + 1)), R*np.sin(5*i * np.pi / (n + 1)), P * i]) for i in range(0, n+1)]
    print (control_points_3d.shape)
    control_points_3d[:,0] = np.array([R*np.cos(5*i * np.pi / (n + 1))for i in ii])
    control_points_3d[:,1] = np.array([R*np.sin(5*i * np.pi / (n + 1))for i in ii])
    control_points_3d[:,2] = np.array([P*i for i in range(n+1)])
    vsl = AffineVectorSpace(UniformLagrangeVectorSpace(n+1),0,1)
    arky = ArcLengthParametrizer(vsl, control_points_3d)
    new_control_points_3d = arky.reparametrize()

    #new_arky = ArcLengthParametrizer(vsl, new_control_points_3d)
    #new_new_control_points_3d = arky.reparametrize()
    tt = np.linspace(0, 1, 128)

    vals = vsl.element(control_points_3d)(tt)
    #print vals
    new_vals = vsl.element(new_control_points_3d)(tt)
    #print vals.shape, new_vals.shape
    print (np.amax((np.abs(vals-new_vals))))
    assert np.amax(np.abs(control_points_3d-new_control_points_3d))/P < toll
Exemple #8
0
def xyzrgb_segment_kmeans(pc,colors,histograms,hist_bin_size=4):
    """Input:
    - pc is a numpy array of points of size N * 3
    - colors is an array of colors of size N * 3
    - historgrams is a list of uv histograms
    Output (labels,M,cost):
    - labels: an N*1 vector of integers 0,...,M
    - M: the number of histograms
    - cost: a number denoting cost of the histogram.
      better values are lower.
    """
    assert pc.shape[1]==3
    xscale = 2.0
    yscale = 2.0
    zscale = 1.0
    hscale = np.amax(np.amax(pc,0)-np.amin(pc,0))
    assert colors.shape[1]==3
    features = np.zeros((pc.shape[0],3+len(histograms)))
    features[:,0] = pc[:,0]*xscale
    features[:,1] = pc[:,1]*yscale
    features[:,2] = pc[:,2]*zscale
    h = []
    for i in xrange(pc.shape[0]):
        uv = rgb_to_yuv(*colors[i,:])[1:3]
        hi = [eval_uv_hist(hj,uv,hist_bin_size)*hscale for hj in histograms]
        h.append(hi)
    features[:,3:] = np.array(h)
    naive_labeling = []
    for i in xrange(pc.shape[0]):
        hi,index = max((v,j) for (j,v) in enumerate(h[i]))
        naive_labeling.append(index)
    labels,quality = kmeans(features,len(histograms),initial=naive_labeling)
    return (labels,len(histograms),quality)
Exemple #9
0
def CAOSpy_run(tstart,tstop,mc,pdyn,particles,leftover,drained):
    timenow=tstart
    #loop through time
    while timenow < tstop:
        print 'time:',timenow
        [thS,npart]=pdyn.gridupdate_thS(particles.lat,particles.z,mc)
        #define dt as Curant criterion
        dt_D=(mc.mgrid.vertfac.values[0])**2 / (2*np.amax(mc.D[np.amax(thS),:]))
        dt_ku=-mc.mgrid.vertfac.values[0]/np.amax(mc.ku[np.amax(thS),:])
        dt=np.amin([dt_D,dt_ku])
        #INFILT
        p_inf=cinf.pmx_infilt(timenow,precTS,mc,dt,leftover)
        #print timenow
        #print p_inf
        particlesnow=pd.concat([particles,p_inf])
        #p_backup=particlesnow.copy()
        #DIFFUSION
        [particlesnow,thS,npart,phi_mx]=pdyn.part_diffusion_split(particlesnow,npart,thS,mc,dt,True,10)
        #ADVECTION
        particlesnow=pdyn.mac_advection(particlesnow,mc,thS,dt)
        #drained particles
        drained=drained.append(particlesnow[particlesnow.flag==len(mc.maccols)+1])
        particlesnow=particlesnow[particlesnow.flag!=len(mc.maccols)+1]
        #MX-MAC-INTERACTION
        pdyn.mx_mp_interact(particlesnow,npart,thS,mc,dt)
        pondparts=(particlesnow.z<0.)
        leftover=np.count_nonzero(-pondparts)
        particles=particlesnow[pondparts]
        timenow=timenow+dt

    return(particles,npart,thS,leftover,drained,timenow)
def plot_Nhden(elem,N,hcol,hden,bounds=False):
    for i in to_plot[elem]:
        plt.clf()
        x = np.array(hden,dtype=np.float)
        y = np.array(N[i])
        #x,y,hcol = trim(x,y,hcol)
        y = hcol[0] - y
        xlims=[0.75*np.amin(x), 1.25*np.amax(x)]
        ylims=[0.75*np.amin(y), 1.25*np.amax(y)]
        try:
            if bounds: 
                l = minNHI - observed[elem][i]["column"][2] 
                if observed[elem][i]["column"][0]==-30.:
                    u=maxNHI
                else:
                    u = maxNHI - observed[elem][i]["column"][0]
                plt.fill([-30.,30., 30., -30.], [l,l,u,u], '0.50', alpha=0.2, edgecolor='b')

                #plt.fill_between(np.arange(xlims[0],xlims[1]),lower,upper,color='0.50')
        except KeyError:
            pass
        plt.plot(x, y, color_map[i],label=ion_state(i,elem))
        plt.ylabel(r"log $N_{HI}/N_{%s}$"%(str(elem)+str(roman[i])))
        plt.xlabel("log $n_{H}$")
        plt.minorticks_on()

        makedir('hden')

        f=os.path.join(paths["plot_path"],"hden", elem+roman[i]+"N_Nhden.png")

        plt.xlim([-3.,0.])
        #plt.ylim(ylims)
        plt.savefig(f)
        plt.show()
        plt.close()
Exemple #11
0
def run_sim(R_star, transit_duration, bodies):
    """Run 3-body sim and convert results to TTV + TDV values in [minutes]"""

    # Run 3-body sim for one full orbit of the outermost moon
    loop(bodies, orbit_duration)
    

    # Move resulting data from lists to numpy arrays
    ttv_array = numpy.array([])
    ttv_array = ttv_list
    tdv_array = numpy.array([])
    tdv_array = tdv_list

    # Zeropoint correction
    middle_point =  numpy.amin(ttv_array) + numpy.amax(ttv_array)
    ttv_array = numpy.subtract(ttv_array, 0.5 * middle_point)
    ttv_array = numpy.divide(ttv_array, 1000)  # km/s

    # Compensate for barycenter offset of planet at start of simulation:
    planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
    stretch_factor = 1 / ((planet.px / 1000) / numpy.amax(ttv_array))
    ttv_array = numpy.divide(ttv_array, stretch_factor)

    # Convert to time units, TTV
    ttv_array = numpy.divide(ttv_array, R_star)
    ttv_array = numpy.multiply(ttv_array, transit_duration * 60 * 24)  # minutes

    # Convert to time units, TDV
    oldspeed = (2 * R_star / transit_duration) * 1000 / 24 / 60 / 60  # m/sec
    newspeed = oldspeed - numpy.amax(tdv_array)
    difference = (transit_duration - (transit_duration * newspeed / oldspeed)) * 24 * 60
    conversion_factor = difference / numpy.amax(tdv_array)
    tdv_array = numpy.multiply(tdv_array, conversion_factor)

    return ttv_array, tdv_array
Exemple #12
0
def fit_min_max(args,p,max_iter,proj_list,proj_axis):
	mins = numpy.array([])
	maxs = numpy.array([])
	
	kind = [item for item in args.kind.split(' ')]
	
	for i in xrange(int(args.fmin)+int(args.step),max_iter+1,int(args.step)):
		args.proj = proj_list[p]
		axis = proj_axis[args.proj]
		dat = load_map(args,p,i)
		unit_l, unit_d, unit_t, unit_m = load_units(i, args)

		if kind[p] == 'dens':
			dat *= unit_d	# in g/cc
		if kind[p] in ['vx','vy','vz']:
			dat *= (unit_l/unit_t)/1e5 # in km/s
		if kind[p] in ['stars','dm']:
			dat += 1e-12
		
		if args.logscale:
			mins = numpy.append(mins,numpy.log10(numpy.amin(dat)))
			maxs = numpy.append(maxs,numpy.log10(numpy.amax(dat)))
		else:
			mins = numpy.append(mins,numpy.amin(dat))
			maxs = numpy.append(maxs,numpy.amax(dat))
		
	ii = range(int(args.fmin)+int(args.step),max_iter+1,int(args.step))
	cmin = polyfit(ii,mins,args.poly)	
	cmax = polyfit(ii,maxs,args.poly)

	return p, cmin, cmax
Exemple #13
0
def iff_filter(sig, scale, plot_show = 0):
    
    order = max(sig.size*scale,90)
    #order = 80
    # Extend signal on both sides for removing boundary effect in convolution
    sig_extend = np.ones(sig.size+int(order/2)*2)
    sig_extend[int(order/2):(sig.size+int(order/2))] = sig
    sig_extend[0:int(order/2)] = sig[(sig.size-int(order/2)):sig.size]
    sig_extend[(sig.size+int(order/2)):sig_extend.size] = sig[0:int(order/2)]
    
    # convolve with hamming window and normalize
    smooth_sig = np.convolve(sig_extend,np.hamming(order),'same')
    smooth_sig = smooth_sig[int(order/2):(sig.size+int(order/2))]
    smooth_sig = np.amax(sig)/np.amax(smooth_sig)*smooth_sig

    # Plot signal for debug
    if(plot_show == 1):
        fig, ax = plt.subplots(ncols=2)
        ax[0].plot(sig)
        ax[0].plot(smooth_sig,'-r')
        ax[0].plot(med_sig,'black')
        ax[1].loglog(rfft(sig))
        ax[1].loglog(rfft(smooth_sig),'-r')
        ax[1].loglog(rfft(med_sig),'black')
        plt.show()
        
    return smooth_sig
Exemple #14
0
 def produce_video(self, interval=200, repeat_delay=2000, filename='video_output.gif', override_min_max=None):
     """
     Finalize and save the video of the data.
     
     interval and repeat_delay are the interval between frames and the repeat
         delay before restart, both in milliseconds.
     filename is the name of the file to save in the present working 
         directory. At present, only .gifs will implement reliably without
         tweaking Python's PATHs.
     override_min_max allows the user to set their own maximum and minimum
         for the scale on the plot. Use a len-2 tuple, (min, max).
     """
     #find the limits for the plot:
     if not override_min_max:
         self.min_limit = np.amin(self.data_list[0])
         self.max_limit = np.amax(self.data_list[0])
         assert len(self.data_list) > 1, 'You must include at least two frames to make an animation!'
         for i in self.data_list[1:]: #assumes there is more than one frame in the loop
             self.min_limit = min((self.min_limit, np.amin(i)))
             self.max_limit = max((self.max_limit, np.amax(i)))
     else:
         self.min_limit=override_min_max[0]
         self.max_limit=override_min_max[1]
         
     self.fig.colorbar(self.plotfunc(self.grid, self.data_list[0],limits=(self.min_limit,self.max_limit),allow_colorbar=False, **self.kwds))
     ani = animation.FuncAnimation(self.fig, _make_image, frames=self._yield_image, interval=interval, blit=True, repeat_delay=repeat_delay)
     ani.save(filename, fps=1000./interval)
def create_histogram (mu, sigma, weights, bin_size, low_spec, high_spec, cu1_accepted, t1_failure_pos):
  p1 = figure(title="Normal Distribution",tools = "pan,box_select,box_zoom,xwheel_zoom,reset,save,resize", background_fill="#E8DDCB")

  measured = np.random.normal(mu, sigma, 1000)
  hist, edges = np.histogram(weights, density=True, bins=bin_size)

  x = np.linspace(np.amin(weights), np.amax(weights), 1000)
  pdf = 1/(sigma * np.sqrt(2*np.pi)) * np.exp(-(x-mu)**2 / (2*sigma**2))
  cdf = (1+scipy.special.erf((x-mu)/np.sqrt(2*sigma**2)))/2

  p1.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
       fill_color="#036564", line_color="#033649",\
  )

  sort_weights = sorted(weights)

  cu1_yield = round(float(len(cu1_accepted))/(float(len(cu1_accepted)) + float(len(t1_failure_pos))),2)

  p1.line(x, pdf, line_color="#D95B43", line_width=8, alpha=0.7, legend="PDF")
  p1.line(low_spec, y=[0, np.amax(hist)], line_dash=[4, 4], line_color="orange", line_width=3, alpha=.5)
  p1.line(high_spec, y=[0, np.amax(hist)], line_dash=[4, 4], line_color="orange", line_width=3, alpha=.5)
  p1.line(weights[0], 0, line_width=1, legend='Mean = ' + str(round(mu, 3))) #daily rejected
  p1.line(weights[0], 0, line_width=1, legend='2*Std (Std = ' + str(round(sigma, 3)) + ")") #daily accepted
  p1.line(weights[0], 0, line_width=1, legend='Yield: ' + str(cu1_yield)) #daily rejected
  p1.line(weights[0], 0, line_width=1, legend='Accepted: ' + str(len(cu1_accepted))) #daily accepted
  p1.line(weights[0], 0, line_width=1, legend='Rejected: ' + str(len(t1_failure_pos))) #daily rejected

  p1.xaxis.bounds = (np.amin(weights), np.amax(weights))

  p1.legend.orientation = "top_left"
  p1.xaxis.axis_label = 'Weight (g)'
  p1.yaxis.axis_label = 'Pr(x)'
  return p1
Exemple #16
0
 def c7_eval(self, dis1, dis2):
     """
     ********************* Peak Displacement *********************
     """
     pgd1 = np.amax(dis1)
     pgd2 = np.amax(dis2)
     return self.eval_func(pgd1, pgd2)
Exemple #17
0
def testRotMatOfExpMap(numpts):
    """Test rotation matrix from axial vector"""

    print '* checking case of 1D vector input'
    map = numpy.zeros(3)
    rmat_1 = rotMatOfExpMap_orig(map)
    rmat_2 = rotMatOfExpMap_opt(map)
    print 'resulting shapes:  ', rmat_1.shape, rmat_2.shape
    #
    #
    map = numpy.random.rand(3, numPts)
    map = numpy.zeros([3, numPts])
    map[0, :] = numpy.linspace(0, numpy.pi, numPts)
    #
    print '* testing rotMatOfExpMap with %d random points' % numPts
    #
    t0 = time.clock()
    rmat_1 = rotMatOfExpMap_orig(map)
    et1 = time.clock() - t0
    #
    t0 = time.clock()
    rmat_2 = rotMatOfExpMap_opt(map)
    et2 = time.clock() - t0
    #
    print '   timings:\n   ... original ', et1
    print '   ... optimized', et2
    #
    drmat = numpy.absolute(rmat_2 - rmat_1)
    print 'maximum difference between results'
    print numpy.amax(drmat, 0)

    return
Exemple #18
0
 def c5_eval(self, acc1, acc2):
     """
     ********************* Peak Acceleration *********************
     """
     pga1 = np.amax(acc1)
     pga2 = np.amax(acc2)
     return self.eval_func(pga1, pga2)
Exemple #19
0
 def c6_eval(self, vel1, vel2):
     """
     ********************* Peak Velocity *********************
     """
     pgv1 = np.amax(vel1)
     pgv2 = np.amax(vel2)
     return self.eval_func(pgv1, pgv2)
def backward_sparse(sparse_transition,reward_f,conv=5,discount = 1.,length = 15,z_states = None):
    num_states = sparse_transition.shape[1]; num_actions = sparse_transition.shape[0]/num_states
    #if reward_f.shape[0] ==num_actions:
    #  state_action = True
    #else: state_action =False
    z_actions = np.zeros(num_actions*num_states)
    if z_states==None:
      z_states = np.zeros(num_states)
    #Backward - - - - - - - - - - - - - - - - - - - - - - - - - -
    #print "Caus Ent Backward"
    count = 0
    delta = 0
    reward_temp = reward_f.reshape(num_actions*num_states,order="F")
    #while True:
    alpha = (1/(num_states*100))*0
    for i in range(length):
      prev = np.zeros(z_states.shape)
      prev += z_states
      #print gamma*sparse_transition.dot(z_states)
      z_actions = discount*((1-alpha)*sparse_transition.dot(z_states) + alpha*np.sum(z_states) ) +reward_temp
      m = np.amax(z_actions.reshape(num_actions,num_states,order="F"),axis = 0)
      z_states = m + np.log(np.sum(np.exp(z_actions.reshape(num_actions,num_states,order="F")-m),axis = 0))
      count+=1
      #Action Probability Computation - - - - - - - - - - - - - - - -
      delta = np.amax(np.absolute(prev-z_states))
      #print delta
      #if count>2 and delta<conv:
        #print "Count and delta", count,delta
      policy= np.exp(z_actions.reshape(num_actions,num_states,order="F")-z_states)
        #print "Policyyy",policy
        #break
    return policy,np.log(policy),z_states
def caus_ent_backward(transition,reward_f,conv=5,discount = 0.9,z_states = None):
    num_actions = transition.tot_actions;num_states = transition.tot_states
    if reward_f.shape[0] ==num_actions:
      state_action = True
    else: state_action =False
    z_actions = np.zeros([num_actions,num_states])
    if z_states==None:
      z_states = np.zeros(num_states)
    #Backward - - - - - - - - - - - - - - - - - - - - - - - - - -
    #print "Caus Ent Backward"
    count = 0
    delta = 0
    while True:
      prev = np.zeros(z_states.shape)
      prev += z_states
      for i in range(num_states):
        tr = transition.dense_backward[i]
        ch = transition.chunks_backward[i]
        out = discount*np.array(sum_chunks(tr[2,:]*z_states[map(int,tr[1,:])],ch))
        z_actions[:,i] = out +reward_f[:,i]
      m = np.amax(z_actions,axis = 0)
      z_states = m + np.log(np.sum(np.exp(z_actions-m),axis = 0))
      count+=1
      #Action Probability Computation - - - - - - - - - - - - - - - -
      delta = np.amax(np.absolute(prev-z_states))
      if count == 50:
        #print "Count and delta", count,delta
        z_actions = z_actions
        m = np.amax(z_actions,axis = 0)
        z_states = m + np.log(np.sum(np.exp(z_actions-m),axis = 0))
        policy= np.exp(z_actions-z_states)
        break
    return policy,np.log(policy),z_states
Exemple #22
0
def CostVariancePlot(funct,args):
	pl=args[0]
	x=np.array([])
	y=np.array([])
	f=np.array([])
	z=np.array([])
	x=np.append(x,funct.rmsSet[:,0])
	y=np.append(y,funct.rmsSet[:,3])
	f=np.append(f,funct.rmsSet[:,5])
	z=np.append(z,funct.rmsSet[:,4])
	v=np.array([])
	v=np.append(v,[0])
	i=0
	while i<len(x):
		v=np.append(v,(z[i]/f[i])-(y[i]/f[i])*(y[i]/f[i]))
		i+=1
	v=np.delete(v,0)
	if centroidP(x,v):
		pl.set_yscale('log')
		pl.set_xscale('log') 
	else:
		pl.ticklabel_format(axis='both', style='sci', scilimits=(-2,5),pad=5,direction="bottom")
	pl.axis([0, np.amax(x)+(10*np.amax(x)/100), 0, np.amax(v)+(10*np.amax(v)/100)])
	pl.set_xlabel("read memory size",fontsize=8)
	pl.set_ylabel("cost",fontsize=8)
	pl.set_title("Variance Cost",fontsize=14)
	pl.grid(True)
	pl.tick_params(axis='x', labelsize=7)
	pl.tick_params(axis='y', labelsize=7)
	sc=pl.scatter(x,v,c=f,s=6,marker = 'o',lw=0.0,cmap=cmap,norm=norm)
	pylab.close()		
Exemple #23
0
def main(X, Xtest, time):
	global cut
	global count
	cut = 0
	count = 0
	root = node()
	root.trainData = X
	root.testData = Xtest
	print("shape of xtest in main: ",np.shape(Xtest))
	x1 = min(np.amin(X[:,[0]]), np.amin(Xtest[:,[0]]))-.05
	x2 = max(np.amax(X[:,[0]]), np.amax(Xtest[:,[0]]))+.1
	y1 = min(np.amin(X[:,[1]]), np.amin(Xtest[:,[1]]))-.05
	y2 = max(np.amax(X[:,[1]]), np.amax(Xtest[:,[1]]))+.1
	plt.figure()
	plt.axis([x1,x2,y1,y2])
	print("x1 x2 y1 y2: ",x1,x2,y1,y2)
	root.coordinates.append([x1,x2])
	root.coordinates.append([y1,y2])
	leaves = []
	MP(root,time,leaves)
	point_index = {}
	train_key = list(map(tuple,X))
	test_key = list(map(tuple,Xtest))
	x_shape = np.shape(X)
	for i in range(x_shape[0]):
		point_index[train_key[i]] = i
	Xtest_shape = np.shape(Xtest)
	for i in range(0,Xtest_shape[0]):
		point_index[test_key[i]] = i
	# plt.show()
	plt.close()
	return feature(leaves, point_index)
def checkcl(cluster_run, verbose = False):
    """Ensure that a cluster labelling is in a valid format. 

    Parameters
    ----------
    cluster_run : array of shape (n_samples,)
        A vector of cluster IDs for each of the samples selected for a given
        round of clustering. The samples not selected are labelled with NaN.

    verbose : Boolean, optional (default = False)
        Specifies if status messages will be displayed
        on the standard output.

    Returns
    -------
    cluster_run : array of shape (n_samples,)
        The input vector is modified in place, such that invalid values are
        either rejected or altered. In particular, the labelling of cluster IDs
        starts at zero and increases by 1 without any gap left.
    """
    
    cluster_run = np.asanyarray(cluster_run)

    if cluster_run.size == 0:
        raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
                         "empty vector provided as input.\n")
    elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
        raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
                         "problem in dimensions of the cluster label vector "
                         "under consideration.\n")
    elif np.where(np.isnan(cluster_run))[0].size != 0:
        raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
                         "labellings provided as input contains at least one 'NaN'.\n")
    else:
        min_label = np.amin(cluster_run)
        if min_label < 0:
            if verbose:
                print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
                      "as cluster labellings.")

            cluster_run -= min_label

            if verbose:
                print("\nINFO: Cluster_Ensembles: checkcl: "
                      "offset to a minimum value of '0'.")

        x = one_to_max(cluster_run) 
        if np.amax(cluster_run) != np.amax(x):
            if verbose:
                print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
                      "labellings provided is not a dense integer mapping.")

            cluster_run = x

            if verbose:
                print("INFO: Cluster_Ensembles: checkcl: brought modification "
                      "to this vector so that its labels range "
                      "from 0 to {0}, included.\n".format(np.amax(cluster_run)))

    return cluster_run
Exemple #25
0
def test_make_tone_regular_at_caldb():
    fq = 15000
    db = 100
    fs = 100000
    dur = 1
    risefall = 0.002
    calv = 0.1
    caldb = 100
    npts = fs*dur

    tone, timevals = tools.make_tone(fq, db, dur, risefall, fs, caldb, calv)

    assert len(tone) == npts
    assert len(timevals) == npts

    spectrum = np.fft.rfft(tone)
    peak_idx = (abs(spectrum - max(spectrum))).argmin()
    freq_idx = np.around(fq*(float(npts)/fs))
    assert peak_idx == freq_idx

    if tools.USE_RMS is True:
        print 'tone max', np.around(np.amax(tone), 5), calv*np.sqrt(2)
        assert np.around(np.amax(tone), 5) == np.around(calv*np.sqrt(2),5)
    else:
        assert np.around(np.amax(tone), 5) == calv

    assert timevals[-1] == dur - (1./fs)
Exemple #26
0
def basemap_raster_mercator(lon, lat, grid, cmap = None):
  """
  Render a raster in mercator projection.  Locations with no values are
  rendered transparent.
  """
  # longitude/latitude extent
  lons = (np.amin(lon), np.amax(lon))
  lats = (np.amin(lat), np.amax(lat))

  if cmap is None:
    cmap = mpl.cm.jet
    cmap.set_bad('w', 1.0)

  # construct spherical mercator projection for region of interest
  m = Basemap(projection='merc',llcrnrlat=lats[0], urcrnrlat=lats[1],
              llcrnrlon=lons[0],urcrnrlon=lons[1])

  vmin,vmax = np.nanmin(grid),np.nanmax(grid)
  masked_grid = np.ma.array(grid,mask=np.isnan(grid))
  fig = plt.figure(frameon=False)
  plt.axis('off')
  m.pcolormesh(lon,lat,masked_grid,latlon=True,cmap=cmap,vmin=vmin,vmax=vmax)

  str_io = StringIO.StringIO()
  plt.savefig(str_io,bbox_inches='tight',format='png',pad_inches=0,transparent=True)
  bounds = [ (lons[0],lats[0]),(lons[1],lats[0]),(lons[1],lats[1]),(lons[0],lats[1]) ]

  return str_io.getvalue(), bounds
def _write_data(lock, im, index, outfile, outshape, outtype, rescale_factor, logfilename, cputime, itime):    	      

	lock.acquire()
	try:        
		t0 = time() 			
		f_out = getHDF5(outfile, 'a')					 
		f_out_dset = f_out.require_dataset('exchange/data', outshape, outtype, chunks=tdf.get_dset_chunks(outshape[0])) 
		im = im * rescale_factor
		tdf.write_tomo(f_out_dset,index,im.astype(outtype))
					
		# Set minimum and maximum:
		if (amin(im[:]) < float(f_out_dset.attrs['min'])):
			f_out_dset.attrs['min'] = str(amin(im[:]))
		if (amax(im[:]) > float(f_out_dset.attrs['max'])):
			f_out_dset.attrs['max'] = str(amax(im[:]))		
		f_out.close()			
		t1 = time() 

		# Print out execution time:
		log = open(logfilename,"a")
		log.write(linesep + "\ttomo_%s processed (CPU: %0.3f sec - I/O: %0.3f sec)." % (str(index).zfill(4), cputime, t1 - t0 + itime))
		log.close()	

	finally:
		lock.release()	
Exemple #28
0
def mamPlot(funct,args):
	pl=args[0]
	x=np.array([])
	ymin=np.array([])
	yavg=np.array([])
	ymax=np.array([])
	f=np.array([])
	x=np.append(x,funct.rmsSet[:,0])
	ymin=np.append(ymin,funct.rmsSet[:,1])
	ymax=np.append(ymax,funct.rmsSet[:,2])
	t1=funct.rmsSet[:,3]
	t2=funct.rmsSet[:,5]
	yavg=np.append(yavg,t1/t2)
	f=np.append(f,funct.rmsSet[:,5])
	if centroidP(x,yavg):
		pl.set_yscale('log')
		pl.set_xscale('log')
	else:
		pl.ticklabel_format(axis='both', style='sci', scilimits=(-2,5),pad=5,direction="bottom")
	pl.axis([0, np.amax(x)+(2*np.amax(x)/100), 0, np.amax(ymax)+(2*np.amax(ymax)/100)])
	pl.set_xlabel('read memory size',fontsize=8)
	pl.set_ylabel("cost",fontsize=8)
	pl.grid(True)
	pl.set_title("Min/Avg/Max Cost",fontsize=14)
	pl.tick_params(axis='x', labelsize=7)
	pl.tick_params(axis='y', labelsize=7)
	sc=pl.scatter(x,ymax,s=7,c='r', marker = 'o',lw=0.0)
	sc1=pl.scatter(x,yavg,s=5.5,c='g', marker = 'o',lw=0.0)	
	sc2=pl.scatter(x,ymin,s=4,c='b', marker = 'o',lw=0.0)	
	pl.legend((sc2,sc1,sc),("Min","Avg","Max"),scatterpoints=1,ncol=3,bbox_to_anchor=[0.5, mamAdjust],loc="lower center",fontsize=8)
	pylab.close()
Exemple #29
0
def statprint(host_per_pg, pg_per_host):
    val = pg_per_host.values()  # sets val to a list of the values in pg_per_host
    mean = numpy.mean(val)
    maxvalue = numpy.amax(val)
    minvalue = numpy.amin(val)
    std = numpy.std(val)
    median = numpy.median(val)
    variance = numpy.var(val)
    print("for placement groups on hosts: ")
    print( "the mean is: ", mean)
    print( "the max value is: ", maxvalue)
    print( "the min value is: ", minvalue)
    print( "the standard deviation is: ", std)
    print( "the median is: ", median)
    print( "the variance is: ", variance)
    # prints statements for stats
    host_mean = numpy.mean(host_per_pg)
    host_max = numpy.amax(host_per_pg)
    host_min = numpy.amin(host_per_pg)
    host_std = numpy.std(host_per_pg)
    host_median = numpy.median(host_per_pg)
    host_variance = numpy.var(host_per_pg)
    # these are the variables for hosts/pgs
    print("hosts per placement group: ")
    print("the mean is: ", host_mean)
    print("the max value is: ", host_max)
    print("the min value is: ", host_min)
    print("the standard deviation is: ", host_std)
    print("the median is: ", host_median)
    print("the variance is: ", host_variance)
def caus_ent_backward_nodisount(transition,reward_f,steps):
    num_actions = transition.tot_actions;num_states = transition.tot_states
    if reward_f.shape[0] ==num_actions:
      state_action = True
    else: state_action =False
    gamma = discount
    z_actions = np.zeros([num_actions,num_states])
    z_states = np.zeros(num_states)
    #Backward - - - - - - - - - - - - - - - - - - - - - - - - - -
    print "Caus Ent Backward"
    count = 0
    delta = 0
    for j in range(steps):
      prev = np.zeros(z_states.shape)
      prev += z_states
      for i in range(num_states):
        tr = transition.dense_backward[i]
        ch = transition.chunks_backward[i]
        out = gamma*np.array(sum_chunks(tr[2,:]*z_states[map(int,tr[1,:])],ch))
        z_actions[:,i] = out +reward_f[:,i]
      m = np.amax(z_actions)
      z_states = m + np.log(np.sum(np.exp(z_actions-m),axis = 0))
      count+=1
      #Action Probability Computation - - - - - - - - - - - - - - - -
      delta = np.sum(np.sum(np.absolute(prev-z_states)))
      #delta +=1 
      #print "DElta cause",delta,delta2
      if j==steps-1:
        z_actions = z_actions
        m = np.amax(z_actions)
        z_states = m + np.log(np.sum(np.exp(z_actions-m),axis = 0))
        policy= np.exp(z_actions-z_states)
    return policy,np.log(policy),z_states
    atm[-1] -= center_about
'''
File Output
'''
f = open('FEASST_' + TRAPPE_fname.split('_')[-1].split('.')[0] + '.in', "w")

#Heading
f.write('# FEASST data file \n# %s \n# %s \n# %s \n' %
        (TRAPPE_name[0], TRAPPE_fname, SMILES))

#Preface
f.write('\n%s atoms \n%s bonds \n\n%s atom types \n%s bond types \n' %
        (len(TRAPPE_atoms[:, 0]), len(TRAPPE_bonds[:, 0]),
         len(TRAPPE_atoms[:, 1]), len(TRAPPE_bonds[:, 2])))
f.write('\n%s %s xlo xhi \n%s %s ylo yhi \n%s %s zlo zhi \n' % (
    np.amin(psuedo_atoms.positions[:, 0]), np.amax(
        psuedo_atoms.positions[:, 0]), np.amin(psuedo_atoms.positions[:, 1]),
    np.amax(psuedo_atoms.positions[:, 1]), np.amin(
        psuedo_atoms.positions[:, 2]), np.amax(psuedo_atoms.positions[:, 2])))

#Masses
f.write('\nMasses\n\n')
for i, atm in enumerate(organized_ASE_atoms):
    f.write('%s %s \n' % (atm[0], atm[1]))

#Pair Coeffs
f.write('\nPair Coeffs\n\n')
for i, line in enumerate(TRAPPE_atoms):
    f.write('%s %s %s \n' % (TRAPPE_atoms[i, 0], float(TRAPPE_atoms[i, 3]) *
                             kB[0] * NA[0] / 1000, TRAPPE_atoms[i, 4]))

#Bond Coeffs
Exemple #32
0
def run_MLP_pipeline(args, train_adata, test_adata, result_dir, prefix=""):
    ''' Run MLP pipeline for left out cell types

    @arg: argparse object
        - args.leaveout: indicate up to how many cell types will be randomly left out
        - args.threshold: use threshold to decide unassigned cells
    '''
    batch_size = 128
    celltype_cols = "cell.type"
    ## Hyperparameters for network
    if train_adata.shape[0] >= 5000:
        ## === parameters for mousebrain (high cell number)
        dims = [128, 32]
        MLP_dims = [128, 64, 32, 16, 8]
    else:
        ## === parameters for PBMC datasets (low cell number)
        dims = [16]
        MLP_dims = [64, 16]

    ## OneHotEncoding the celltypes
    enc = OneHotEncoder(handle_unknown='ignore')
    if scipy.sparse.issparse(train_adata.X):
        x_train = train_adata.X.toarray()
    else:
        x_train = train_adata.X
    y_train = enc.fit_transform(train_adata.obs[[celltype_cols]]).toarray()
    if scipy.sparse.issparse(test_adata.X):
        x_test = test_adata.X.toarray()
    else:
        x_test = test_adata.X

    ### --- run MLP
    print("\n\n=== MLP\n")
    start = time.time()
    y_pred = method_utils.run_MLP(x_train, y_train, x_test, 
            dims=MLP_dims, batch_size=batch_size, seed=RANDOM_SEED) ## run MLP
    end = time.time()
    print("\n\n=== Run time:", end-start)

    ### draw unassigned cells lower than a certain threshold
    thres = args.threshold
    test_adata.obs['pred_celltypes'] = 'unassigned'
    assigned_idx = np.where(np.amax(y_pred, 1) >= thres)[0]
    unassigned_idx = np.where(np.amax(y_pred, 1) < thres)[0]
    ## print out unassigned cells number
    print(test_adata.obs[celltype_cols].value_counts())
    print("Unassigned cells number:", len(unassigned_idx))
    print(test_adata[unassigned_idx].obs[celltype_cols].value_counts())
    ## get assigned cell labels
    assigned_labels = y_pred[assigned_idx].argmax(1)
    n_clusters = len(set(train_adata.obs[celltype_cols]))
    assigned_onehot = np.zeros((assigned_labels.size, n_clusters))
    assigned_onehot[np.arange(assigned_labels.size), assigned_labels] = 1
    assigned_celltypes = enc.inverse_transform(assigned_onehot)
    assigned_cells = test_adata[assigned_idx].obs_names
    test_adata.obs.loc[assigned_cells, 'pred_celltypes'] = assigned_celltypes
    test_adata.obs.to_csv(result_dir+os.sep+prefix+"_predicted_obs.csv")
    ## visualization of unassigned cells
    plt.figure(args.leaveout+1)
    sc.pl.tsne(test_adata, color=[celltype_cols, "pred_celltypes"], size=15)
    plt.savefig(result_dir+os.sep+prefix+'_'+str(args.threshold)+"_prediction_result.png")
    print("=== Finish visualizing..")
    return y_pred
    acc_flr = flrs_results.mean()
    acc_bf = (blds_results*flrs_results).mean()
    # rfps_results = (np.equal(np.argmax(test_labels[:, 8:118], axis=1), np.argmax(preds[:, 8:118], axis=1))).astype(int)
    # acc_rfp = rfps_results.mean()
    # acc = (blds_results*flrs_results*rfps_results).mean()
    
    # calculate positioning error when building and floor are correctly estimated
    mask = np.logical_and(blds_results, flrs_results) # mask index array for correct location of building and floor
    x_test_utm = x_test_utm[mask]
    y_test_utm = y_test_utm[mask]
    blds = blds[mask]
    flrs = flrs[mask]
    rfps = (preds[mask])[:, 8:118]

    n_success = len(blds)       # number of correct building and floor location
    blds = np.greater_equal(blds, np.tile(np.amax(blds, axis=1).reshape(n_success, 1), (1, 3))).astype(int) # set maximum column to 1 and others to 0 (row-wise)
    flrs = np.greater_equal(flrs, np.tile(np.amax(flrs, axis=1).reshape(n_success, 1), (1, 5))).astype(int) # ditto

    n_loc_failure = 0
    sum_pos_err = 0.0
    sum_pos_err_weighted = 0.0
    idxs = np.argpartition(rfps, -N)[:, -N:]  # (unsorted) indexes of up to N nearest neighbors
    threshold = scaling*np.amax(rfps, axis=1)
    for i in range(n_success):
        xs = []
        ys = []
        ws = []
        for j in idxs[i]:
            rfp = np.zeros(110)
            rfp[j] = 1
            rows = np.where((train_labels == np.concatenate((blds[i], flrs[i], rfp))).all(axis=1)) # tuple of row indexes
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

x = np.load('xdata400.dat')
y = np.load('ydata400.dat')
z = np.load('zdata400.dat')
mag = np.load('magdata400.dat')

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')

for a in range(0, len(mag)):
	for b in range(0, len(mag)):
		for c in range(0, len(mag)):
			ax.scatter(x[a][b][c],y[a][b][c],z[a][b][c],marker='o',
			           alpha=(mag[a][b][c]/np.amax(mag)))

plt.show()
def activelearning(x_train, y_train, x_test, y_test, start_num_samples, end_num_samples , step_size, epochs, n_average, criteria ):

    print("===================================")

    submission = pd.DataFrame(columns=['#','accuracy', 'best_accuracy'])
    

    cur_x = x_train[0:start_num_samples,:,:]
    cur_y = y_train[0:start_num_samples]
    
    #iterative retrain after applying the network to trained model 
    back_x = x_train[start_num_samples:,:,:]
    back_y = y_train[start_num_samples:]

    n_iteration = (end_num_samples-start_num_samples)//step_size

    for j in range(n_iteration):
        
        avg_acc = 0 

        best_acc = 0 

        for k in range(n_average):
            model = get_model()
        
            callbacks = build_callbacks()

            model.fit(cur_x, cur_y,  callbacks=[callbacks], epochs=epochs, validation_data=(x_test, y_test), verbose = 0)

            model =tf.keras.models.load_model("params.h5")

            score, acc = model.evaluate(x_test, y_test, verbose=0)

            avg_acc += acc 

            if best_acc < acc : 
                best_acc = acc 
                bestmodel = tf.keras.models.load_model("params.h5")

        submission = submission.append(pd.DataFrame({'#': cur_x.shape[0], 'accuracy': [avg_acc/n_average] , 'best_accuracy':[best_acc] }))
            
        # use the remaining trainning datasets 
        predictions = bestmodel.predict (back_x)
        
        prob = tf.nn.softmax(predictions)

        if criteria == "en":
            index = entropy_sampling(prob)
        elif criteria == "rs":
           index = random_sampling(prob)
        elif criteria == "ms":
            index = margin_sampling(prob)
        elif criteria == "lc":
            index = least_confidence(prob)
        else:
            raise ValueError("Unknow criteria value ")


        # pick up step_size worst samples to train 
        cur_x = np.concatenate((cur_x, back_x[index[0:step_size], :,:]) )
        cur_y = np.concatenate((cur_y, back_y[index[0:step_size]]) )

        back_x  = back_x[index[step_size:],:,:]
        back_y  = back_y[index[step_size:]]
        


    # autolabel : use the remaining data 
    #     back_x, back_y 
    
    size = back_x.shape[0]
    n_iteration = size//step_size

    
     # reduce the size to save time for quick testing, add in the end , we can add during the sampling steps above 
    n_iteration = min(n_iteration, 32)
    for j in range(n_iteration):

        predictions = bestmodel.predict (back_x)
        
        prob = tf.nn.softmax(predictions)
        

        y_autolabeled = np.argmax(prob, axis=1)


        pmax = np.amax(prob, axis=1)
        pidx = np.argsort(pmax)
        back_x = back_x[pidx]
        y_autolabeled = y_autolabeled[pidx]

        cur_x = np.concatenate([cur_x, back_x[-step_size:]])
        cur_y = np.concatenate([cur_y, y_autolabeled[-step_size:]])
        back_x = back_x[:-step_size]

        avg_acc = 0 

        best_acc = 0 

        for k in range(n_average):
            model = get_model()
        
            callbacks = build_callbacks()

            model.fit(cur_x, cur_y,  callbacks=[callbacks], epochs=epochs, validation_data=(x_test, y_test), verbose = 0)

            model =tf.keras.models.load_model("params.h5")

            score, acc = model.evaluate(x_test, y_test, verbose=0)

            avg_acc += acc 

            if best_acc < acc : 
                best_acc = acc 
                bestmodel = tf.keras.models.load_model("params.h5")
    
    
    submission = submission.append(pd.DataFrame({'#': cur_x.shape[0], 'accuracy': [avg_acc/n_average] , 'best_accuracy':[best_acc] }))


    submission.to_csv('method_' + criteria + ".csv", index=False)
    def perturb(self, params):
        """
        Unlike in C++, this takes a numpy array of parameters as input,
        and modifies it in-place. The return value is still logH.
        """
        logH = 0.0

        reps = 1;
        if(rng.rand() < 0.5):
            reps += np.int(np.power(100.0, rng.rand()));

        # print "going to perturb %d reps" % reps

        for i in range(reps):
            # print "   rep iteration %d" % i
            which = rng.randint(len(params))

            if which == 0:
              rad_idx = 0
              theta_idx =  2

              theta = params[theta_idx]

              #FIND THE MAXIMUM RADIUS STILL INSIDE THE DETECTOR
              theta_eq = np.arctan(detector.detector_length/detector.detector_radius)
              theta_taper = np.arctan(detector.taper_length/detector.detector_radius)
            #   print "theta: %f pi" % (theta / np.pi)
              if theta <= theta_taper:
                 z = np.tan(theta)*(detector.detector_radius - detector.taper_length) / (1-np.tan(theta))
                 max_rad = z / np.sin(theta)
              elif theta <= theta_eq:
                  max_rad = detector.detector_radius / np.cos(theta)
                #   print "max rad radius: %f" %  max_rad
              else:
                  theta_comp = np.pi/2 - theta
                  max_rad = detector.detector_length / np.cos(theta_comp)
                #   print "max rad length: %f" %  max_rad

              #AND THE MINIMUM (from PC dimple)
              #min_rad  = 1./ ( np.cos(theta)**2/detector.pcRad**2  +  np.sin(theta)**2/detector.pcLen**2 )

              min_rad = np.amax([detector.pcRad, detector.pcLen])

              total_max_rad = np.sqrt(detector.detector_length**2 + detector.detector_radius**2 )

              params[which] += total_max_rad*dnest4.randh()
              params[which] = dnest4.wrap(params[which] , min_rad, max_rad)

            elif which ==2: #theta
              rad_idx = 0
              rad = params[rad_idx]

            #   print "rad: %f" % rad
              if rad < np.amin([detector.detector_radius - detector.taper_length, detector.detector_length]):
                  max_val = np.pi/2
                  min_val = 0
                #   print "theta: min %f pi, max %f pi" % (min_val, max_val)
              else:
                  if rad < detector.detector_radius - detector.taper_length:
                      #can't possibly hit the taper
                    #   print "less than taper adjustment"
                      min_val = 0
                  elif rad < np.sqrt(detector.detector_radius**2 + detector.taper_length**2):
                      #low enough that it could hit the taper region
                    #   print "taper adjustment"
                      a = detector.detector_radius - detector.taper_length
                      z = 0.5 * (np.sqrt(2*rad**2-a**2) - a)
                      min_val = np.arcsin(z/rad)
                  else:
                      #longer than could hit the taper
                    #   print  " longer thantaper adjustment"
                      min_val = np.arccos(detector.detector_radius/rad)

                  if rad < detector.detector_length:
                      max_val = np.pi/2
                  else:
                      max_val = np.pi/2 - np.arccos(detector.detector_length/rad)
                #   print "theta: min %f pi, max %f pi" % (min_val, max_val)

              params[which] += np.pi/2*dnest4.randh()
              params[which] = dnest4.wrap(params[which], min_val, max_val)

            # if which == 0:
            #     params[which] += (detector.detector_radius)*dnest4.randh()
            #     params[which] = dnest4.wrap(params[which] , 0, detector.detector_radius)
            elif which == 1:
                max_val = np.pi/4
                params[which] += np.pi/4*dnest4.randh()
                params[which] = dnest4.wrap(params[which], 0, max_val)
                if params[which] < 0 or params[which] > np.pi/4:
                    print "wtf phi"
                #params[which] = np.clip(params[which], 0, max_val)
            # elif which == 2:
            #     params[which] += (detector.detector_length)*dnest4.randh()
            #     params[which] = dnest4.wrap(params[which] , 0, detector.detector_length)

            elif which == 3: #scale
                min_scale = wf.wfMax - 0.01*wf.wfMax
                max_scale = wf.wfMax + 0.005*wf.wfMax
                params[which] += (max_scale-min_scale)*dnest4.randh()
                params[which] = dnest4.wrap(params[which], min_scale, max_scale)
            #   print "  adjusted scale to %f" %  ( params[which])

            elif which == 4: #t0
              params[which] += 1*dnest4.randh()
              params[which] = dnest4.wrap(params[which], min_maxt, max_maxt)
            elif which == 5: #smooth
              params[which] += 0.1*dnest4.randh()
              params[which] = dnest4.wrap(params[which], 0, 25)
                #   print "  adjusted smooth to %f" %  ( params[which])

                # elif which == 6: #wf baseline slope (m)
                #     logH -= -0.5*(params[which]/1E-4)**2
                #     params[which] += 1E-4*dnest4.randh()
                #     logH += -0.5*(params[which]/1E-4)**2
                # elif which == 7: #wf baseline incercept (b)
                #     logH -= -0.5*(params[which]/1E-2)**2
                #     params[which] += 1E-2*dnest4.randh()
                #     logH += -0.5*(params[which]/1E-2)**2

                #   params[which] += 0.01*dnest4.randh()
                #   params[which]=dnest4.wrap(params[which], -1, 1)
                #   print "  adjusted b to %f" %  ( params[which])

            else: #velocity or rc params: cant be below 0, can be arb. large
                print "which value %d not supported" % which
                exit(0)


        return logH
Exemple #37
0
    quantized_model.eval()
    with torch.no_grad():
        student_output_img = quantized_model(input_img.cpu())
    # save imgs:
    epoch = 0
    images = {
        'input_img': input_img,
        'teacher_output_img': teacher_output_img,
        'student_output_img': student_output_img
    }
    for key in images:
        img_np = images[key].detach().cpu().numpy()
        img_np = np.moveaxis(img_np, 1, -1)
        img_np = (img_np + 1) / 2  # (-1,1) -> (0,1)
        img_big = fourD2threeD(img_np, n_row=1)
        print(key, img_big.shape, np.amax(img_big), np.amin(img_big))
        imsave(os.path.join(img_dir, 'epoch%d_%s.png' % (epoch, key)),
               img_as_ubyte(img_big))

###### Training ######

print('dataloader:', len(dataloader))  # 1334
for epoch in range(start_epoch, args.epochs):
    epoch += 1
    if args.lq and epoch == args.start_lq:
        netG.apply(learn_quant.enable_param_learning)
    netG.cuda()
    start_time = time.time()
    netG.train(), netD.train()
    # define average meters:
    loss_G_meter, loss_G_perceptual_meter, loss_G_GAN_meter, loss_D_meter = \
Exemple #38
0
def show3dlidar(pointpaht, detections, calib):
    pointcloud = np.fromfile(pointpaht, dtype=np.float32).reshape(-1, 4)
    x = pointcloud[:, 0]  # x position of point
    xmin = np.amin(x, axis=0)
    xmax = np.amax(x, axis=0)
    y = pointcloud[:, 1]  # y position of point
    ymin = np.amin(y, axis=0)
    ymax = np.amax(y, axis=0)
    z = pointcloud[:, 2]  # z position of point
    zmin = np.amin(z, axis=0)
    zmax = np.amax(z, axis=0)
    d = np.sqrt(x**2 + y**2)  # Map Distance from sensor
    vals = 'height'
    if vals == "height":
        col = z
    else:
        col = d
    fig = mayavi.mlab.figure(bgcolor=(0, 0, 0), size=(640, 500))
    mayavi.mlab.points3d(
        x,
        y,
        z,
        col,  # Values used for Color
        mode="point",
        colormap='Blues',  # 'bone', 'copper', 'gnuplot'
        # color=(0, 1, 0),   # Used a fixed (r,g,b) instead
        figure=fig,
    )
    mayavi.mlab.points3d(0,
                         0,
                         0,
                         color=(1, 1, 1),
                         mode="sphere",
                         scale_factor=0.2)

    print(detections.shape)

    detections[:, 1:] = lidar_to_camera_box(detections[:, 1:], calib.V2C,
                                            calib.R0, calib.P2)

    for i in range(detections.shape[0]):

        h = float(detections[i][4])
        w = float(detections[i][5])
        l = float(detections[i][6])

        x = float(detections[i][1])
        y = float(detections[i][2])
        z = float(detections[i][3])
        x_corners = [
            l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2
        ]
        y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
        z_corners = [
            w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2
        ]
        #print(x_corners)
        #print(detections[i])
        R = roty(float(detections[i][7]))
        corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
        # print corners_3d.shape
        #corners_3d = np.zeros((3,8))
        corners_3d[0, :] = corners_3d[0, :] + x
        corners_3d[1, :] = corners_3d[1, :] + y
        corners_3d[2, :] = corners_3d[2, :] + z
        corners_3d = np.transpose(corners_3d)
        box3d_pts_3d_velo = project_rect_to_velo(corners_3d)
        #x1, y1, z1 = box3d_pts_3d_velo[0, :]
        #x2, y2, z2 = box3d_pts_3d_velo[1, :]
        if detections[i][0] == 1.0:
            draw_gt_boxes3d([box3d_pts_3d_velo], 1, color=(1, 0, 0), fig=fig)
        else:
            draw_gt_boxes3d([box3d_pts_3d_velo], 1, color=(0, 1, 0), fig=fig)

    mayavi.mlab.show()
Exemple #39
0
def onehot(y):
    y_onehot = np.array(y)
    oh = np.zeros((len(y_onehot) ,np.amax(y_onehot)+1)) 
    oh[np.arange(y_onehot.size),y_onehot]=1
    print(oh)
    return oh   
Exemple #40
0
import numpy
from matplotlib.colors import LogNorm
from velociraptor.tools.lines import binned_median_line as bml
import unyt
pyplot.rcParams.update({'font.size': 40})
import adi_gas_spread_density
import adi_gas_coordinate_density
import gas_spread_density
import gas_coordinate_density
import simba_gas_spread_density
import simba_gas_coordinate_density

adi_spread = adi_gas_spread_density.spread
adi_dens = adi_gas_spread_density.density
adi_xbins = numpy.logspace(numpy.log10(numpy.amin(adi_dens)),
                           numpy.log10(numpy.amax(adi_dens)),
                           num=50)
adi_ybins = numpy.logspace(numpy.log10(numpy.amin(adi_spread)),
                           numpy.log10(numpy.amax(adi_spread)),
                           num=50)
adi_bins = unyt.unyt_array(numpy.logspace(numpy.log10(numpy.amin(adi_dens)),
                                          numpy.log10(numpy.amax(adi_dens)),
                                          num=20),
                           units=adi_dens.units)
adi_x = adi_gas_coordinate_density.x
adi_y = adi_gas_coordinate_density.y
adic_xbins = numpy.linspace(numpy.amin(adi_x), numpy.amax(adi_x), num=26)
adic_ybins = numpy.linspace(numpy.amin(adi_y), numpy.amax(adi_y), num=26)
adi_centers, adi_med, adi_err = bml(adi_dens, adi_spread, x_bins=adi_bins)

eagle_spread = gas_spread_density.spread
Exemple #41
0
        ht = cv2.resize(ht, (0, 0),
                        fx=float(heatmap_size) / img_size,
                        fy=float(heatmap_size) / img_size,
                        interpolation=cv2.INTER_LANCZOS4)
        output_heatmaps[:, :, i] = ht

    cropping_param = [
        int(float(cur_hand_bbox[0])),
        int(float(cur_hand_bbox[1])),
        int(float(cur_hand_bbox[2])),
        int(float(cur_hand_bbox[3])), offset_x, offset_y, img_scale
    ]

    # Create background map
    output_background_map = np.ones(
        (heatmap_size, heatmap_size)) - np.amax(output_heatmaps, axis=2)
    output_heatmaps = np.concatenate((output_heatmaps,
                                      output_background_map.reshape(
                                          (heatmap_size, heatmap_size, 1))),
                                     axis=2)

    # coords_set = np.concatenate(
    #     (np.reshape(cur_hand_joints_x, (num_of_joints, 1)), np.reshape(cur_hand_joints_y, (num_of_joints, 1))), axis=1)

    output_image_raw = output_image.astype(np.uint8).tostring()
    output_heatmaps_raw = output_heatmaps.flatten().tolist()
    # output_coords_raw = coords_set.flatten().tolist()
    output_cropping_param_raw = cropping_param
    output_R_raw = camera_ref.R.flatten().tolist()
    output_C_raw = camera_ref.C.flatten().tolist()
    output_K_raw = camera_ref.K.flatten().tolist()
#i_factor = 50 / np.mean(samples[:, 2])
#z_factor = 50 / np.mean(samples[:, 3])
#samples[:, 0] *= np.repeat([g_factor], samples.shape[0], axis = 0)
#samples[:, 1] *= np.repeat([r_factor], samples.shape[0], axis = 0)
#samples[:, 2] *= np.repeat([i_factor], samples.shape[0], axis = 0)
#samples[:, 3] *= np.repeat([z_factor], samples.shape[0], axis = 0)
#print(samples[:,0])
#g_diff = np.amax(samples[:,0]) - np.amin(samples[:,0])
#r_diff = np.amax(samples[:,1]) - np.amin(samples[:,1])
#i_diff = np.amax(samples[:,2]) - np.amin(samples[:,2])
#z_diff = np.amax(samples[:,3]) - np.amin(samples[:,3])
g_min = np.amin(samples[:,0])
r_min = np.amin(samples[:,1])
i_min = np.amin(samples[:,2])
z_min = np.amin(samples[:,3])
g_max = np.amax(samples[:,0])
r_max = np.amax(samples[:,1])
i_max = np.amax(samples[:,2])
z_max = np.amax(samples[:,3])

g_min_u = np.amin(old_samples[:,0])
r_min_u = np.amin(old_samples[:,1])
i_min_u = np.amin(old_samples[:,2])
z_min_u = np.amin(old_samples[:,3])
g_max_u = np.amax(old_samples[:,0])
r_max_u = np.amax(old_samples[:,1])
i_max_u = np.amax(old_samples[:,2])
z_max_u = np.amax(old_samples[:,3])

#print(g_min,r_min,i_min,z_min,g_max,r_max,i_max,z_max)
#g_min = 0.0
Exemple #43
0
    def __start_node(self):
        """ Start a node
        
        This functions will start up a node, based on the sliding midpoint rule.
        
        Returns
        -------
        KDTree
            self
        
        """

        # Checking if this could be a leaf node
        sum = 0
        if self.left_indexes is not None:
            sum += len(self.left_indexes)
        if self.right_indexes is not None:
            sum += len(self.right_indexes)
        if sum <= self.leaf_size:
            self.is_leaf = True
            self.left_subtree = None
            self.right_subtree = None
            if self.left_indexes is None and self.right_indexes is not None:
                self.leaf_indexes = self.right_indexes
            elif self.left_indexes is not None and self.right_indexes is None:
                self.leaf_indexes = self.left_indexes
            else:
                self.leaf_indexes = list(set().union(self.left_indexes,
                                                     self.right_indexes))

        else:
            # Handling left subtree
            if self.left_indexes is not None:
                if len(self.left_indexes) > 0:
                    aux_X = np.asarray(
                        [self.data[index] for index in self.left_indexes])
                    maxes = np.amax(aux_X, axis=0)
                    mins = np.amin(aux_X, axis=0)

                    # Getting longest side
                    d = np.argmax(maxes - mins)

                    maxval = maxes[d]
                    minval = mins[d]

                    data = aux_X[:, d]

                    # Get the split point
                    split = (maxval + minval) / 2

                    # Split the indexes between left and right child
                    left = np.nonzero(data < split)[0]
                    left = np.asarray([self.left_indexes[k] for k in left])
                    right = np.nonzero(data >= split)[0]
                    right = np.asarray([self.left_indexes[k] for k in right])

                    # If there's a child with no indexes, while the other has more than
                    # leaf_size indexes, we slide the cutting point towards the 'fat'
                    # size until there's at least one index on each child.
                    if (len(right) == 0) or (len(left) == 0):
                        if (len(right) == 0) and (len(left) > self.leaf_size):
                            split = np.amax(data[data != np.amax(data)])
                            left = np.nonzero(data < split)[0]
                            left = np.asarray(
                                [self.left_indexes[k] for k in left])
                            right = np.nonzero(data >= split)[0]
                            right = np.asarray(
                                [self.left_indexes[k] for k in right])
                        elif (len(right)
                              == 0) and (len(left) <= self.leaf_size):
                            right = None
                        elif (len(left)
                              == 0) and (len(right) > self.leaf_size):
                            split = np.amin(data[data != np.amin(data)])
                            left = np.nonzero(data < split)[0]
                            left = np.asarray(
                                [self.left_indexes[k] for k in left])
                            right = np.nonzero(data >= split)[0]
                            right = np.asarray(
                                [self.left_indexes[k] for k in right])
                        elif (len(left)
                              == 0) and (len(right) <= self.leaf_size):
                            left = None

                    # Creates the left subtree
                    self.left_subtree = KDTreeNode(
                        data=self.data,
                        left_indexes=left,
                        right_indexes=right,
                        split_axis=d,
                        split_value=split,
                        distance_function=self.distance_function,
                        leaf_size=self.leaf_size,
                        **self.kwargs)

                else:
                    self.left_subtree = None
            else:
                self.left_subtree = None

            # Handling right subtree
            if self.right_indexes is not None:
                if len(self.right_indexes) > 0:
                    aux_X = np.asarray(
                        [self.data[index] for index in self.right_indexes])
                    maxes = np.amax(aux_X, axis=0)
                    mins = np.amin(aux_X, axis=0)

                    # Getting longest side
                    d = np.argmax(maxes - mins)

                    maxval = maxes[d]
                    minval = mins[d]

                    data = aux_X[:, d]

                    # Get the split point
                    split = (maxval + minval) / 2

                    # Split the indexes between left and right child
                    left = np.nonzero(data < split)[0]
                    left = np.asarray([self.right_indexes[k] for k in left])
                    right = np.nonzero(data >= split)[0]
                    right = np.asarray([self.right_indexes[k] for k in right])

                    # If there's a child with no indexes, while the other has more than
                    # leaf_size indexes, we slide the cutting point towards the 'fat'
                    # size until there's at least one index on each child.
                    if (len(right) == 0) or (len(left) == 0):
                        if (len(right) == 0) and (len(left) > self.leaf_size):
                            split = np.amax(data[data != np.amax(data)])
                            left = np.nonzero(data < split)[0]
                            left = np.asarray(
                                [self.right_indexes[k] for k in left])
                            right = np.nonzero(data >= split)[0]
                            right = np.asarray(
                                [self.right_indexes[k] for k in right])
                        elif (len(right)
                              == 0) and (len(left) <= self.leaf_size):
                            right = None
                        elif (len(left)
                              == 0) and (len(right) > self.leaf_size):
                            split = np.amin(data[data != np.amin(data)])
                            left = np.nonzero(data < split)[0]
                            left = np.asarray(
                                [self.right_indexes[k] for k in left])
                            right = np.nonzero(data >= split)[0]
                            right = np.asarray(
                                [self.right_indexes[k] for k in right])
                        elif (len(left)
                              == 0) and (len(right) <= self.leaf_size):
                            left = None

                    # Creates the right subtree
                    self.right_subtree = KDTreeNode(
                        data=self.data,
                        left_indexes=left,
                        right_indexes=right,
                        split_axis=d,
                        split_value=split,
                        distance_function=self.distance_function,
                        leaf_size=self.leaf_size,
                        **self.kwargs)

                else:
                    self.right_subtree = None
            else:
                self.right_subtree = None

        return self
Exemple #44
0
    E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]

    G = nx.Graph()
    G.add_nodes_from(V)
    G.add_weighted_edges_from(E)

    step_size = 0.1

    a_gamma = np.arange(0, np.pi, step_size)
    a_beta = np.arange(0, np.pi, step_size)
    a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)

    F1 = 3 - (np.sin(2 * a_beta)**2 * np.sin(2 * a_gamma)**2 - 0.5 * np.sin(
        4 * a_beta) * np.sin(4 * a_gamma)) * (1 + np.cos(4 * a_gamma)**2)

    result = np.where(F1 == np.amax(F1))
    a = list(zip(result[0], result[1]))[0]

    gamma = a[0] * step_size
    beta = a[1] * step_size

    prog = make_circuit(4)
    sample_shot = 5600
    writefile = open("../data/startQiskit_QC484.csv", "w")
    # prog.draw('mpl', filename=(kernel + '.png'))
    IBMQ.load_account()
    provider = IBMQ.get_provider(hub='ibm-q')
    provider.backends()
    backend = provider.get_backend("ibmq_5_yorktown")

    circuit1 = transpile(prog, FakeYorktown())
Exemple #45
0
def evaluate_error(gt, pred, criteria):
    """
    Calculates various types of error for between ground truth and prediction. The various errors are intended to
    showcase different strengths and weaknesses in the predictions.
    @param gt: Ground truth depth
    @param pred: Predicted depth
    @param criteria: dict with criteria as the keys
    @return: updated criteria dict
    """

    zero_mask = gt > 0
    gt = gt[zero_mask]
    criteria['n_pixels'] += gt.shape[0]
    pred = pred[zero_mask]
    gt_rescaled = gt * 80.
    pred_rescaled = pred * 80.

    # Mean Absolute Relative Error
    rel = np.abs(gt - pred) / gt  # compute errors
    abs_rel_sum = np.sum(rel)
    criteria['err_absRel'] += abs_rel_sum

    # Square Mean Relative Error
    s_rel = ((gt_rescaled - pred_rescaled) * (gt_rescaled - pred_rescaled)) / (
        gt_rescaled * gt_rescaled)  # compute errors
    squa_rel_sum = np.sum(s_rel)
    criteria['err_squaRel'] += squa_rel_sum

    # Root Mean Square error
    square = (gt_rescaled - pred_rescaled)**2
    rms_squa_sum = np.sum(square)
    criteria['err_rms'] += rms_squa_sum

    # Log Root Mean Square error
    log_square = (np.log(gt_rescaled) - np.log(pred_rescaled))**2
    log_rms_sum = np.sum(log_square)
    criteria['err_logRms'] += log_rms_sum

    # Scale invariant error
    diff_log = np.log(pred_rescaled) - np.log(gt_rescaled)
    diff_log_sum = np.sum(diff_log)
    criteria['err_silog'] += diff_log_sum
    diff_log_2 = diff_log**2
    diff_log_2_sum = np.sum(diff_log_2)
    criteria['err_silog2'] += diff_log_2_sum

    # Mean log10 error
    log10_sum = np.sum(np.abs(np.log10(gt) - np.log10(pred)))
    criteria['err_log10'] += log10_sum

    # Deltas
    gt_pred = gt_rescaled / pred_rescaled
    pred_gt = pred_rescaled / gt_rescaled
    gt_pred = np.reshape(gt_pred, (1, -1))
    pred_gt = np.reshape(pred_gt, (1, -1))
    gt_pred_gt = np.concatenate((gt_pred, pred_gt), axis=0)
    ratio_max = np.amax(gt_pred_gt, axis=0)

    delta_1_sum = np.sum(ratio_max < 1.25)
    criteria['err_delta1'] += delta_1_sum
    delta_2_sum = np.sum(ratio_max < 1.25**2)
    criteria['err_delta2'] += delta_2_sum
    delta_3_sum = np.sum(ratio_max < 1.25**3)
    criteria['err_delta3'] += delta_3_sum
    return criteria
    def test_pt_tf_model_equivalence(self):
        if not is_torch_available():
            return

        import torch
        import transformers

        config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()

        for model_class in self.all_model_classes:
            pt_model_class_name = model_class.__name__[2:]  # Skip the "TF" at the beggining
            pt_model_class = getattr(transformers, pt_model_class_name)

            config.output_hidden_states = True
            tf_model = model_class(config)
            pt_model = pt_model_class(config)

            # Check we can load pt model in tf and vice-versa with model => model functions
            tf_model = transformers.load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=inputs_dict)
            pt_model = transformers.load_tf2_model_in_pytorch_model(pt_model, tf_model)

            # Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
            pt_model.eval()
            pt_inputs_dict = dict(
                (name, torch.from_numpy(key.numpy()).to(torch.long)) for name, key in inputs_dict.items()
            )
            with torch.no_grad():
                pto = pt_model(**pt_inputs_dict)
            tfo = tf_model(inputs_dict, training=False)
            tf_hidden_states = tfo[0].numpy()
            pt_hidden_states = pto[0].numpy()

            pt_hidden_states[np.isnan(tf_hidden_states)] = 0
            tf_hidden_states[np.isnan(tf_hidden_states)] = 0
            pt_hidden_states[np.isnan(pt_hidden_states)] = 0
            tf_hidden_states[np.isnan(pt_hidden_states)] = 0

            max_diff = np.amax(np.abs(tf_hidden_states - pt_hidden_states))
            # Debug info (remove when fixed)
            if max_diff >= 2e-2:
                print("===")
                print(model_class)
                print(config)
                print(inputs_dict)
                print(pt_inputs_dict)
            self.assertLessEqual(max_diff, 2e-2)

            # Check we can load pt model in tf and vice-versa with checkpoint => model functions
            with tempfile.TemporaryDirectory() as tmpdirname:
                pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
                torch.save(pt_model.state_dict(), pt_checkpoint_path)
                tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(tf_model, pt_checkpoint_path)

                tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
                tf_model.save_weights(tf_checkpoint_path)
                pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path)

            # Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
            pt_model.eval()
            pt_inputs_dict = dict(
                (name, torch.from_numpy(key.numpy()).to(torch.long)) for name, key in inputs_dict.items()
            )
            with torch.no_grad():
                pto = pt_model(**pt_inputs_dict)
            tfo = tf_model(inputs_dict)
            tfo = tfo[0].numpy()
            pto = pto[0].numpy()
            tfo[np.isnan(tfo)] = 0
            pto[np.isnan(pto)] = 0
            max_diff = np.amax(np.abs(tfo - pto))
            self.assertLessEqual(max_diff, 2e-2)
Exemple #47
0
def test_3_fct_tot(data):
    sut = data[2]
    rms = 0.00012049  # TODO
    np.testing.assert_approx_equal(np.amin(sut), 1)
    np.testing.assert_approx_equal(np.amax(sut), 4.26672894, significant=2)
Exemple #48
0
a = np.sqrt(WP**2 * (8 * k**2 * v0**2 + WP**2))
omega = np.lib.scimath.sqrt((2 * k**2 * v0**2 + WP**2 - a) / 2)

om_im = omega.imag

f = Figure(figsize=(5, 4), dpi=100)

plt.plot(k, om_im, linewidth=2)

plt.xlabel('k')

plt.ylabel('omega')

plt.show()

gamma_max = np.amax(om_im)
print 'Maximum growth rate: ', gamma_max
idx = np.argmax(om_im)
k_max = k[idx]
print 'k for maximizing growth rate: ', k_max

L = 3 * 2 * np.pi / (np.sqrt(3.0 / 2) / 2.)
mode = 3
k_sim = 2 * np.pi / L * mode
a_sim = np.sqrt(WP**2 * (8 * k_sim**2 * v0**2 + WP**2))
omega_sim = np.lib.scimath.sqrt((2 * k_sim**2 * v0**2 + WP**2 - a_sim) / 2)

print 'Growth rate: ', omega_sim

raw_input('Press enter...')
Exemple #49
0
def test_upwind(data):
    sut = data[0]
    assert np.amin(sut) == 1
    np.testing.assert_approx_equal(np.amax(sut), 4.796, significant=4)
Exemple #50
0
def test_2_fct_iga(data):
    sut = data[3]
    rms = 0.00026658  # TODO
    np.testing.assert_approx_equal(np.amin(sut), 1)
    np.testing.assert_approx_equal(np.amax(sut), 4.25518091, significant=2)
Exemple #51
0
def sweep(duration, dt, f,
          autocorrelate=True,
          return_t=False,
          taper='blackman',
          **kwargs):
    """
    Generates a linear frequency modulated wavelet (sweep). Wraps
    scipy.signal.chirp, adding dimensions as necessary.

    Args:
        duration (float): The length in seconds of the wavelet.
        dt (float): is the sample interval in seconds (usually 0.001, 0.002,
            or 0.004)
        f (ndarray): Any sequence like (f1, f2). A list of lists will create a
            wavelet bank.
        autocorrelate (bool): Whether to autocorrelate the sweep(s) to create
            a wavelet. Default is `True`.
        return_t (bool): If True, then the function returns a tuple of
            wavelet, time-basis, where time is the range from -duration/2 to
            duration/2 in steps of dt.
        taper (str or function): The window or tapering function to apply.
            To use one of NumPy's functions, pass 'bartlett', 'blackman' (the
            default), 'hamming', or 'hanning'; to apply no tapering, pass
            'none'. To apply your own function, pass a function taking only
            the length of the window and returning the window function.
        **kwargs: Further arguments are passed to scipy.signal.chirp. They are
            `method` ('linear','quadratic','logarithmic'), `phi` (phase offset
            in degrees), and `vertex_zero`.

    Returns:
        ndarray: The waveform.
    """
    t0, t1 = -duration/2, duration/2
    t = np.arange(t0, t1, dt)

    f = np.asanyarray(f).reshape(-1, 1)
    f1, f2 = f

    c = [scipy.signal.chirp(t, f1_+(f2_-f1_)/2., t1, f2_, **kwargs)
         for f1_, f2_
         in zip(f1, f2)]

    if autocorrelate:
        w = [np.correlate(c_, c_, mode='same') for c_ in c]

    w = np.squeeze(w) / np.amax(w)

    if taper:
        funcs = {
            'bartlett': np.bartlett,
            'blackman': np.blackman,
            'hamming': np.hamming,
            'hanning': np.hanning,
            'none': lambda x: x,
        }
        func = funcs.get(taper, taper)
        w *= func(t.size)

    if return_t:
        Sweep = namedtuple('Sweep', ['amplitude', 'time'])
        return Sweep(w, t)
    else:
        return w
Exemple #52
0
def test_2_fct(data):
    sut = data[1]
    rms = 0.00036731  # TODO
    np.testing.assert_approx_equal(np.amin(sut), 1)
    np.testing.assert_approx_equal(np.amax(sut), 3.52544410, significant=2)
Exemple #53
0
        metric = [param.metric]
        txt_col = [METRICS[param.metric]['col']]

    results = [np.loadtxt(f, usecols=txt_col) for f in result_files]

    # Reduce length of all folds to the minimum length
    epochs = [len(r) for r in results]
    e_min = min(epochs)
    results = [r[:e_min] for r in results]

    # res_array.shape = (k , e_min, len(txt_col))
    res_array = np.array(results)
    # to obtain one measurement for all folds: p = arr[:,:,index_p_in_txt_col]
    means = np.mean(res_array, axis=0)
    mins = np.amin(res_array, axis=0)
    maxs = np.amax(res_array, axis=0)

    if len(metric) > 1:
        n_cols = 2
        n_rows = - (-len(metric) // 2)
        fig1, axes = plt.subplots(nrows=n_rows, ncols=n_cols, sharex='all')  # squeeze=False??
        epochs = np.arange(e_min)

        for i in range(n_rows):
            for j in range(n_cols):
                axes[i, j].plot(epochs, means[:, i * n_cols + j])  # [:, i * n_cols + j]
                axes[i, j].fill_between(epochs, mins[:, i * n_cols + j], maxs[:, i * n_cols + j], alpha=.5)
    else:
        epochs = np.arange(e_min)
        fig1 = plt.plot(epochs, means)
        plt.fill_between(epochs, mins, maxs, alpha=.5)
def plot_fem_solution(self, kx=0.):
    if self.plot[5]: # Determination of the maximum value of the pressure
        p_max = 0
        p_min = 1e308
        for _en in self.entities:
            if isinstance(_en, FluidFem):
                for _elem in _en.elements:
                    _, __, p_elem = _elem.display_sol(3)
                    _max = np.amax(np.abs(p_elem))
                    _min = np.amin(np.abs(p_elem))
                    if _max >p_max: p_max = _max
                    if _min <p_min: p_min = _min

    if any(self.plot[3:]):
        x, y, u_x, u_y, pr = [], [], [], [], []
    for _en in self.entities:
        if isinstance(_en, FluidFem):
            if any(self.plot[2::3]): # Plot of pressure  == True
                for ie, _elem in enumerate(_en.elements):
                    # print(ie/len(_en.elements))
                    x_elem, y_elem, p_elem = _elem.display_sol(3)
                    p_elem = p_elem[:, 0]
                    p_elem *= np.exp(1j*kx*x_elem)
                    if self.plot[2]:
                        plt.figure("Pressure")
                        plt.plot(y_elem, np.abs(p_elem), 'r+')
                        plt.plot(y_elem, np.imag(p_elem), 'm.')
                    if self.plot[5]:
                        triang = mtri.Triangulation(x_elem, y_elem)
                        plt.figure("Pressure map")
                        plt.tricontourf(triang, np.abs(p_elem), cmap=cm.jet, levels=np.linspace(p_min, p_max,40))
                        # x.extend(list(x_elem))
                        # y.extend(list(y_elem))
                        # pr.extend(list(p_elem))
        elif isinstance(_en, PemFem):
            if any(self.plot): # Plot of pressure  == True
                for _elem in _en.elements:
                    x_elem, y_elem, f_elem = _elem.display_sol([0, 1, 3])
                    ux_elem = f_elem[:, 0]*np.exp(1j*kx*x_elem)
                    uy_elem = f_elem[:, 1]*np.exp(1j*kx*x_elem)
                    p_elem = f_elem[:, 2]*np.exp(1j*kx*x_elem)
                    if self.plot[0]:
                        plt.figure("Solid displacement along x")
                        plt.plot(y_elem, np.abs(ux_elem), 'r+')
                        plt.plot(y_elem, np.imag(ux_elem), 'm.')
                    if self.plot[1]:
                        plt.figure("Solid displacement along y")
                        plt.plot(y_elem, np.abs(uy_elem), 'r+')
                        plt.plot(y_elem, np.imag(uy_elem), 'm.')
                    if self.plot[2]:
                        plt.figure("Pressure")
                        plt.plot(y_elem, np.abs(p_elem), 'r+')
                        plt.plot(y_elem, np.imag(p_elem), 'm.')
                    if self.plot[5]:
                        x.extend(list(x_elem))
                        y.extend(list(y_elem))
                        pr.extend(list(p_elem))
        elif isinstance(_en, ElasticFem):
            if any(self.plot): # Plot of pressure  == True
                for _elem in _en.elements:
                    x_elem, y_elem, f_elem = _elem.display_sol([0, 1, 3])
                    ux_elem = f_elem[:, 0]*np.exp(1j*kx*x_elem)
                    uy_elem = f_elem[:, 1]*np.exp(1j*kx*x_elem)
                    if self.plot[0]:
                        plt.figure("Solid displacement along x")
                        plt.plot(y_elem, np.abs(ux_elem), 'r+')
                        plt.plot(y_elem, np.imag(ux_elem), 'm.')
                    if self.plot[1]:
                        plt.figure("Solid displacement along y")
                        plt.plot(y_elem, np.abs(uy_elem), 'r+')
                        plt.plot(y_elem, np.imag(uy_elem), 'm.')

    if any(self.plot[3:]):
        # triang = mtri.Triangulation(x, y)
        if self.plot[5]:
            plt.figure("Pressure map")
            # plt.tricontourf(triang, np.abs(pr), 40, cmap=cm.jet)
        # self.display_mesh()
        # plt.colorbar()
        plt.axis("off")
        plt.axis('equal')
Exemple #55
0
    def extract_particles(self, segmentation):
        """
        Saves particle centers into output .star file, after dismissing regions
        that are too big to contain a particle.

        Args:
            segmentation: Segmentation of the micrograph into noise and particle projections.
        """
        segmentation = segmentation[self.query_size // 2 - 1:-self.query_size // 2,
                                    self.query_size // 2 - 1:-self.query_size // 2]
        labeled_segments, _ = ndimage.label(segmentation, np.ones((3, 3)))
        values, repeats = np.unique(labeled_segments, return_counts=True)

        values_to_remove = np.where(repeats > self.max_size ** 2)
        values = np.take(values, values_to_remove)
        values = np.reshape(values, (1, 1, np.prod(values.shape)), 'F')

        labeled_segments = np.reshape(labeled_segments, (labeled_segments.shape[0],
                                                         labeled_segments.shape[1], 1), 'F')
        matrix1 = np.repeat(labeled_segments, values.shape[2], 2)
        matrix2 = np.repeat(values, matrix1.shape[0], 0)
        matrix2 = np.repeat(matrix2, matrix1.shape[1], 1)

        matrix3 = np.equal(matrix1, matrix2)
        matrix4 = np.sum(matrix3, 2)

        segmentation[np.where(matrix4 == 1)] = 0
        labeled_segments, _ = ndimage.label(segmentation, np.ones((3, 3)))

        max_val = np.amax(np.reshape(labeled_segments, (np.prod(labeled_segments.shape))))
        center = center_of_mass(segmentation, labeled_segments, np.arange(1, max_val))
        center = np.rint(center)

        img = np.zeros((segmentation.shape[0], segmentation.shape[1]))
        img[center[:, 0].astype(int), center[:, 1].astype(int)] = 1
        y, x = np.ogrid[-self.moa:self.moa+1, -self.moa:self.moa+1]
        element = x*x+y*y <= self.moa * self.moa
        img = binary_dilation(img, structure=element)
        labeled_img, _ = ndimage.label(img, np.ones((3, 3)))
        values, repeats = np.unique(labeled_img, return_counts=True)
        y = np.where(repeats == np.count_nonzero(element))
        y = np.array(y)
        y = y.astype(int)
        y = np.reshape(y, (np.prod(y.shape)), 'F')
        y -= 1
        center = center[y, :]

        center = center + (self.query_size // 2 - 1) * np.ones(center.shape)
        center = center + (self.query_size // 2 - 1) * np.ones(center.shape)
        center = center + np.ones(center.shape)

        center = config.apple.mrc_shrink_factor * center

        # swap columns to align with Relion
        center = center[:, [1, 0]]

        # first column is x; second column is y - offset by margins that were discarded from the image
        center[:, 0] += config.apple.mrc_margin_left
        center[:, 1] += config.apple.mrc_margin_top

        if self.output_directory is not None:
            basename = os.path.basename(self.filename)
            name_str, ext = os.path.splitext(basename)

            applepick_path = os.path.join(self.output_directory, "{}_applepick.star".format(name_str))
            with open(applepick_path, "w") as f:
                np.savetxt(f, ["data_root\n\nloop_\n_rlnCoordinateX #1\n_rlnCoordinateY #2"], fmt='%s')
                np.savetxt(f, center, fmt='%d %d')

        return center
Exemple #56
0
cols = data.shape[1]  
X = data.iloc[:,0:cols-1]  
y = data.iloc[:,cols-1:cols] 

# convert from data frames to numpy matrices
X = np.array(X.values)  
y = np.array(y.values)
y = y.flatten()

X_tf = tf.constant(X)
Y_tf = tf.constant(y)
#Train a Linear Classifier

# initialize parameters randomly
D = X.shape[1]
K = np.amax(y) + 1

# initialize parameters in such a way to play nicely with the gradient-check!
#W = 0.01 * np.random.randn(D,K)
#b = np.zeros((1,K)) + 1.0

initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=21131186) #This might differ based on tf.version. 
# Play with stddev and see how your model performance changes.
# Change initialization to Xavier and orthogonal and analysis change in accuracy.
#If using other init compare that with Guassian init and report your findings
W = tf.Variable(initializer([D, K]))
W = tf.cast(W,dtype = tf.double)
b = tf.Variable(tf.random.normal([K])) # You can also try tf.zeros and tf.ones, report your findings.
theta = (W,b)

# some hyperparameters
Exemple #57
0
def check_image(image):
    assert isinstance(image, np.ndarray)
    assert image.shape == (512, 512)
    assert np.amin(image) >= 0 - DELTA and np.amax(image) <= 1 + DELTA
def normalize_coordinates(list_residues_coord):
    xyz_max = np.amax(list_residues_coord, axis=0)[0:3]
    list_residues_coord[:, 0:3] /= xyz_max
    return list_residues_coord
def main():
    # Training settings
    parser = argparse.ArgumentParser(
        description='prediction from cognitive memory')

    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')

    parser.add_argument(
        '--layer',
        type=int,
        default=0,
        metavar='N',
        help=
        'select a layer 0-4, which represents the first CNN, 3 composite layers and the final FC'
    )

    #    parser.add_argument('--threshold', type=int, default=0, metavar='N',
    #                        help='threshold values used when cogmemry is generated')

    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    device = torch.device("cuda" if use_cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    test_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
        root='./data',
        train=False,
        transform=transforms.Compose([transforms.ToTensor(), normalize])),
                                              batch_size=10000,
                                              shuffle=False,
                                              **kwargs)

    coglayer = args.layer
    #threshold_={0:0.3,1:0.75,2:0.6,3:0.57, 4:0.88}
    threshold_0 = [0.18, 0.2, 0.22, 0.24, 0.26, 0.3, 0.32, 0.34]
    threshold_1 = [0.64, 0.66, 0.68, 0.70, 0.72, 0.74, 0.76, 0.78]
    threshold_2 = [0.48, 0.5, 0.52, 0.54, 0.56, 0.58, 0.6, 0.62]
    threshold_3 = [0.5, 0.52, 0.54, 0.56, 0.58, 0.6, 0.62, 0.64]
    threshold_4 = [0.8, 0.82, 0.84, 0.86, 0.88, 0.9, 0.92, 0.94]

    if coglayer == 0:
        thresholds_ = threshold_0
    elif coglayer == 1:
        thresholds_ = threshold_1
    elif coglayer == 2:
        thresholds_ = threshold_2
    elif coglayer == 3:
        thresholds_ = threshold_3
    elif coglayer == 4:
        thresholds_ = threshold_4

    pred_n = torch.load('test_prediction_resnet.pt',
                        map_location=lambda storage, loc: storage)

    print(pred_n.size())

    for data, target in test_loader:
        labels = target
    labels_ = []
    for xi in labels:
        temp_ = np.zeros(10)
        temp_[xi.item()] = 1.0
        labels_.append(temp_)
    labels_ = np.array(labels_)
    print('label.shape', labels_.shape)

    if os.path.exists("confusion_sel"):
        pass
    else:
        os.mkdir("confusion_sel")

    results = {}
    for th in thresholds_:
        layer_sel_ = coglayer
        temp_dict = {}

        roV = load_test_image(layer_sel_)
        roV = roV.to(device)
        act_map = torch.load('coglayer/map_association_' + str(coglayer) +
                             '_' + str(th) + '.pt',
                             map_location=lambda storage, loc: storage)
        sel = act_map.map
        sel = sel.cpu().numpy()

        wm = torch.load('coglayer/wm_' + str(layer_sel_) + '_' + str(th) +
                        '.pt',
                        map_location=lambda storage, loc: storage)

        wm = wm.to(device)
        cog = CogMem_load(wm)
        #cog=CogMem_load(wm,label)

        cog.forward(roV)
        #pred=cog.pred.long()
        #pred=cog.pred.long().cpu().numpy()

        total_1 = 0
        total_2 = 0
        total_3 = 0
        total_4 = 0
        total_5 = 0
        cons1 = 0
        cons2 = 0
        temp = 0
        corr = np.zeros((10, 10))
        cnt = 0
        #mem=[]
        #print ('sel shape',sel.shape)
        #print (cog.image.size())
        for xi, xin in enumerate(pred_n):
            cls = xin.item()
            label_t = labels[xi].long().item()
            v2 = cog.image[:, xi]
            #mem.append(v2.cpu().numpy())
            idx = torch.argsort(v2).cpu().numpy()
            idx = np.flip(idx, 0)[:8]

            temp_v = np.zeros(10)
            for zin in idx:
                temp_v = temp_v + sel[zin, :] * v2[zin].item()

            idx2 = np.argmax(temp_v)
            idx3 = np.argsort(temp_v)
            idx3 = np.flip(idx3, 0)[:3]
            sum_v = np.sum(np.exp(temp_v))

            #print (xi, idx, cls, idx3, idx2)
            # cls: prediction, idx2: max from association, idx3, label from truth, idx_truth: ground truth
            if cls == idx2:
                total_1 = total_1 + 1
            if label_t == cls:
                total_2 = total_2 + 1

            if label_t == idx2:
                total_3 = total_3 + 1

            if label_t != cls:
                temp = temp + 1
                if cls == idx2:
                    total_4 = total_4 + 1
            else:
                temp = temp + 1
                if cls == idx2:
                    total_5 = total_5 + 1

            if cls in idx3:
                cons1 = cons1 + 1
            if label_t in idx3:
                cons2 = cons2 + 1
            if idx2 == label_t:
                c1 = idx2
                cnt = cnt + 1
                for c2 in range(10):
                    #if c1!=c2:
                    corr[c1, c2] = corr[c1, c2] + np.exp(temp_v[c2]) / np.exp(
                        temp_v[c1])
        print(cnt)
        for c1 in range(10):
            corr[c1, :] = corr[c1, :] / float(cnt)
        for c1 in range(10):
            corr[c1, c1] = 0
        max_v = np.amax(corr)
        #corr=corr/500.0
        print(layer_sel_, total_1, total_2, total_3)
        print('cons1', cons1, 'cons2', cons2)
        #pylab.figure(ttin+1)
        pylab.imshow(corr, cmap='jet', vmax=max_v)
        pylab.colorbar()
        pylab.savefig('confusion_sel/L' + str(layer_sel_) + '_' + str(th) +
                      '.png')
        pylab.savefig('confusion_sel/L' + str(layer_sel_) + '_' + str(th) +
                      '.eps')
        pylab.close()
        temp_dict = {
            'max_pred': total_1,
            'ref_accuracy': total_2,
            'max_accuracy': total_3,
            'consist_pred': cons1,
            'consist_accuracy': cons2,
            'cog_size': wm.size()
        }
        #mem=np.array(mem)
        #np.savetxt('mem_'+str(layer_sel_)+'.txt',mem)
        torch.cuda.empty_cache()
        del cog, roV, sel, wm, act_map
        results[str(th)] = temp_dict
    fp = open('confusion_sel/prediction' + str(layer_sel_) + '.json', 'w')
    json.dump(results, fp)
    fp.close
            list_key.extend(re.findall(r'\d+',line))

mean_array_agree = np.array(list_agreement).astype(np.float)
mean_agree = np.mean(mean_array_agree)

print ("Time generating key agreements:")
print(np.sum(mean_array_agree))

print ("Mean generating key agreement:")
print (mean_agree)
print ("Standard deviation of key agreement:")
print (np.std(mean_array_agree))
print ("Fastest time generating key agreement:")
print (np.amin(mean_array_agree))
print ("Slowest time generating key agreement:")
print (np.amax(mean_array_agree))

print ("")


mean_array_key = np.array(list_key).astype(np.float)
mean_key = np.mean(mean_array_key)

print ("Time setting key:")
print(np.sum(mean_array_key))

print ("Mean setting key:")
print (mean_key)

print ("Standard deviation of setting key:")
print (np.std(mean_array_key))