コード例 #1
1
ファイル: item.py プロジェクト: cellcraft/cellcraft
 def make_grid(self):
     amax = np.amax([np.amax(coor, axis=0) for coor in self.coordinates], axis=0)
     amin = np.amin([np.amin(coor, axis=0) for coor in self.coordinates], axis=0)
     self.x = np.arange(amin[0], amax[0] + self.blocksize, self.blocksize)
     self.y = np.arange(amin[1], amax[1] + self.blocksize, self.blocksize)
     self.z = np.arange(amin[2], amax[2] + self.blocksize, self.blocksize)
     self.values = np.zeros((self.x.shape[0] - 1, self.y.shape[0] - 1, self.z.shape[0] - 1))
コード例 #2
0
ファイル: designs.py プロジェクト: aglaws/active_subspaces
def _maximin_design_obj(y, vert=None):
    """
    Objective function for the maximin design optimization.

    :param ndarray y: Contains the coordinates of the points in the design. If
        there are N points in n dimensions then `y` is shape ((Nn, )).
    :param ndarray vert: Contains the fixed vertices defining the zonotope.

    **Notes**

    This function returns the minimum squared distance between all points in
    the design and between points and vertices.
    """
    Ny, n = vert.shape
    N = y.size / n
    Y = y.reshape((N, n))

    # get minimum distance among points
    D0 = distance_matrix(Y, Y) + 1e5*np.eye(N)
    d0 = np.power(D0.flatten(), 2)
    d0star = np.amin(d0)

    # get minimum distance between points and vertices
    D1 = distance_matrix(Y, vert)
    d1 = np.power(D1.flatten(), 2)
    d1star = np.amin(d1)
    dstar = np.amin([d0star, d1star])
    return -dstar
コード例 #3
0
ファイル: band_gaps_rigid.py プロジェクト: certik/sfepy
def extend_cell_data( data, pb, rname, val = None ):
    n_el = pb.domain.shape.n_el
    if data.shape[0] == n_el: return data

    if val is None:
        if data.shape[2] > 1: # Vector.
            val = nm.amin( nm.abs( data ) )
        else: # Scalar.
            val = nm.amin( data )

    edata = nm.empty( (n_el,) + data.shape[1:], dtype = nm.float64 )
    edata.fill( val )
    region = pb.domain.regions[rname]
    offs = region.get_cell_offsets()
    eoffs = pb.domain.get_cell_offsets()
##     print offs
##     print eoffs
##     print pb.domain.mat_ids_to_i_gs
##     pause()
    for group in pb.domain.iter_groups():
        ig = group.ig
        ii = eoffs[ig]
        if ig in region.igs:
            n_cell = region.shape[ig].n_cell
            ir = offs[ig]
            edata[ii+region.cells[ig]] = data[ir:ir+n_cell]
    return edata
コード例 #4
0
ファイル: grid.py プロジェクト: jingzhiyou/octant
 def _get_ind_under_point(self, event):
     'get the index of the vertex under point if within epsilon tolerance'
     try:
         x, y = zip(*self._poly.xy)
         
         # display coords
         xt, yt = self._poly.get_transform().numerix_x_y(x, y)
         d = np.sqrt((xt-event.x)**2 + (yt-event.y)**2)
         indseq = np.nonzero(np.equal(d, np.amin(d)))
         ind = indseq[0]
     
         if d[ind]>=self._epsilon:
             ind = None
     
         return ind
     except:
         # display coords
         xy = np.asarray(self._poly.xy)
         xyt = self._poly.get_transform().transform(xy)
         xt, yt = xyt[:, 0], xyt[:, 1]
         d = np.sqrt((xt-event.x)**2 + (yt-event.y)**2)
         indseq = np.nonzero(np.equal(d, np.amin(d)))[0]
         ind = indseq[0]
         
         if d[ind]>=self._epsilon:
             ind = None
         
         return ind
コード例 #5
0
ファイル: binding_pocket.py プロジェクト: AhlamMD/deepchem
def extract_active_site(protein_file, ligand_file, cutoff=4):
  """Extracts a box for the active site."""
  protein_coords = rdkit_util.load_molecule(
      protein_file, add_hydrogens=False)[0]
  ligand_coords = rdkit_util.load_molecule(
      ligand_file, add_hydrogens=True, calc_charges=True)[0]
  num_ligand_atoms = len(ligand_coords)
  num_protein_atoms = len(protein_coords)
  pocket_inds = []
  pocket_atoms = set([])
  for lig_atom_ind in range(num_ligand_atoms):
    lig_atom = ligand_coords[lig_atom_ind]
    for protein_atom_ind in range(num_protein_atoms):
      protein_atom = protein_coords[protein_atom_ind]
      if np.linalg.norm(lig_atom - protein_atom) < cutoff:
        if protein_atom_ind not in pocket_atoms:
          pocket_atoms = pocket_atoms.union(set([protein_atom_ind]))
  # Should be an array of size (n_pocket_atoms, 3)
  pocket_atoms = list(pocket_atoms)
  n_pocket_atoms = len(pocket_atoms)
  pocket_coords = np.zeros((n_pocket_atoms, 3))
  for ind, pocket_ind in enumerate(pocket_atoms):
    pocket_coords[ind] = protein_coords[pocket_ind]

  x_min = int(np.floor(np.amin(pocket_coords[:, 0])))
  x_max = int(np.ceil(np.amax(pocket_coords[:, 0])))
  y_min = int(np.floor(np.amin(pocket_coords[:, 1])))
  y_max = int(np.ceil(np.amax(pocket_coords[:, 1])))
  z_min = int(np.floor(np.amin(pocket_coords[:, 2])))
  z_max = int(np.ceil(np.amax(pocket_coords[:, 2])))
  return (((x_min, x_max), (y_min, y_max), (z_min, z_max)), pocket_atoms,
          pocket_coords)
コード例 #6
0
ファイル: a_Plotter.py プロジェクト: priyanka27s/TA_software
 def set_data(self, zname, zdata, zcolor):
     if zdata!=None:
         if self.overall_plot_type=="polygon":
            if zname not in self.clts: #plottables['plotted']:#self.pd.list_data():
                clt=PolyCollection(zdata, alpha=0.5, antialiased=True)#, rasterized=False, antialiased=False)
                clt.set_color(colorConverter.to_rgba(zcolor))                
                self.clts[zname]=clt
                self.axe.add_collection(self.clts[zname], autolim=True)
            else:                
                self.clts[zname].set_verts(zdata)
         if self.overall_plot_type=="XY":
             if zname not in self.clts:
                 clt = LineCollection(zdata)#, offsets=offs)
                 clt.set_color(colors)
                 #print dir(clt)
                 self.clts[zname]=clt
                 self.axe.add_collection(self.clts[zname], autolim=True)
                 self.axe.autoscale_view()
             else:
                 self.clts[zname].set_segments(zdata)
         if self.overall_plot_type=="img":
             if zname not in self.clts:
                 axeimg=self.axe.imshow( Magvec, 
                                        vmin=amin(Magvec),
                                        vmax=0.001, #amax(Magvec), 
                                        aspect="auto", origin="lower",
                                 extent=[amin(yoko),amax(yoko), amin(freq),amax(freq)],
                                 #cmap='RdBu'
                                 )
                 self.fig.colorbar(axeimg)
コード例 #7
0
def plot_Nhden(elem,N,hcol,hden,bounds=False):
    for i in to_plot[elem]:
        plt.clf()
        x = np.array(hden,dtype=np.float)
        y = np.array(N[i])
        #x,y,hcol = trim(x,y,hcol)
        y = hcol[0] - y
        xlims=[0.75*np.amin(x), 1.25*np.amax(x)]
        ylims=[0.75*np.amin(y), 1.25*np.amax(y)]
        try:
            if bounds: 
                l = minNHI - observed[elem][i]["column"][2] 
                if observed[elem][i]["column"][0]==-30.:
                    u=maxNHI
                else:
                    u = maxNHI - observed[elem][i]["column"][0]
                plt.fill([-30.,30., 30., -30.], [l,l,u,u], '0.50', alpha=0.2, edgecolor='b')

                #plt.fill_between(np.arange(xlims[0],xlims[1]),lower,upper,color='0.50')
        except KeyError:
            pass
        plt.plot(x, y, color_map[i],label=ion_state(i,elem))
        plt.ylabel(r"log $N_{HI}/N_{%s}$"%(str(elem)+str(roman[i])))
        plt.xlabel("log $n_{H}$")
        plt.minorticks_on()

        makedir('hden')

        f=os.path.join(paths["plot_path"],"hden", elem+roman[i]+"N_Nhden.png")

        plt.xlim([-3.,0.])
        #plt.ylim(ylims)
        plt.savefig(f)
        plt.show()
        plt.close()
コード例 #8
0
ファイル: doe_add.py プロジェクト: nikitask/TractorMaster
def construct(phi1, phi2, nomod = 0, amp1 =[], amp2=[], eta = 0, ampout= 0): #does ampout need to be there?
	if len(amp1) > 0 or len(amp2) > 0:
		tempshape = phi1.shape
		w = tempshape[1]
		h = tempshape[0]
		if len(amp1) == 0:
			temp1 = np.ones(w)
			temp2 = np.ones(h)
			for r in temp2:
				amp1 += [temp1]
		if len(amp2) == 0:
			temp1 = np.ones(w)
			temp2 = np.ones(h)
			for r in temp2:
				amp2 += [temp1]
		psi1 = amp1 * np.exp(1j*phi1)
		psi2 = amp2 * np.exp(1j*phi2)
		psi = psi1 * psi2
		psi = np.array(psi)
		apsi = abs(psi)
		psi = psi/(np.amax(abs(psi)))
		phi = np.arctan2(sp.real(psi),sp.imag(psi))
		phi -= np.amin(phi)
		phi = phi % (2.*np.pi)
		eta = 2*np.median(abs(psi))
		randarray = np.array([[random.random() for i in range(w)] for j in range(h)])
		shape = (abs(psi) >= (eta*randarray)) 
		index = np.where(shape == False)
		phi[index] = 0
		ampout = abs(psi)
	else:
		phi = phi1 + phi2
		phi = phi - np.amin(phi)
		phi = phi % (2.*np.pi)
	return phi
コード例 #9
0
def create_histogram (mu, sigma, weights, bin_size, low_spec, high_spec, cu1_accepted, t1_failure_pos):
  p1 = figure(title="Normal Distribution",tools = "pan,box_select,box_zoom,xwheel_zoom,reset,save,resize", background_fill="#E8DDCB")

  measured = np.random.normal(mu, sigma, 1000)
  hist, edges = np.histogram(weights, density=True, bins=bin_size)

  x = np.linspace(np.amin(weights), np.amax(weights), 1000)
  pdf = 1/(sigma * np.sqrt(2*np.pi)) * np.exp(-(x-mu)**2 / (2*sigma**2))
  cdf = (1+scipy.special.erf((x-mu)/np.sqrt(2*sigma**2)))/2

  p1.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
       fill_color="#036564", line_color="#033649",\
  )

  sort_weights = sorted(weights)

  cu1_yield = round(float(len(cu1_accepted))/(float(len(cu1_accepted)) + float(len(t1_failure_pos))),2)

  p1.line(x, pdf, line_color="#D95B43", line_width=8, alpha=0.7, legend="PDF")
  p1.line(low_spec, y=[0, np.amax(hist)], line_dash=[4, 4], line_color="orange", line_width=3, alpha=.5)
  p1.line(high_spec, y=[0, np.amax(hist)], line_dash=[4, 4], line_color="orange", line_width=3, alpha=.5)
  p1.line(weights[0], 0, line_width=1, legend='Mean = ' + str(round(mu, 3))) #daily rejected
  p1.line(weights[0], 0, line_width=1, legend='2*Std (Std = ' + str(round(sigma, 3)) + ")") #daily accepted
  p1.line(weights[0], 0, line_width=1, legend='Yield: ' + str(cu1_yield)) #daily rejected
  p1.line(weights[0], 0, line_width=1, legend='Accepted: ' + str(len(cu1_accepted))) #daily accepted
  p1.line(weights[0], 0, line_width=1, legend='Rejected: ' + str(len(t1_failure_pos))) #daily rejected

  p1.xaxis.bounds = (np.amin(weights), np.amax(weights))

  p1.legend.orientation = "top_left"
  p1.xaxis.axis_label = 'Weight (g)'
  p1.yaxis.axis_label = 'Pr(x)'
  return p1
コード例 #10
0
ファイル: map2mp4.py プロジェクト: yangyha/Ramses
def fit_min_max(args,p,max_iter,proj_list,proj_axis):
	mins = numpy.array([])
	maxs = numpy.array([])
	
	kind = [item for item in args.kind.split(' ')]
	
	for i in xrange(int(args.fmin)+int(args.step),max_iter+1,int(args.step)):
		args.proj = proj_list[p]
		axis = proj_axis[args.proj]
		dat = load_map(args,p,i)
		unit_l, unit_d, unit_t, unit_m = load_units(i, args)

		if kind[p] == 'dens':
			dat *= unit_d	# in g/cc
		if kind[p] in ['vx','vy','vz']:
			dat *= (unit_l/unit_t)/1e5 # in km/s
		if kind[p] in ['stars','dm']:
			dat += 1e-12
		
		if args.logscale:
			mins = numpy.append(mins,numpy.log10(numpy.amin(dat)))
			maxs = numpy.append(maxs,numpy.log10(numpy.amax(dat)))
		else:
			mins = numpy.append(mins,numpy.amin(dat))
			maxs = numpy.append(maxs,numpy.amax(dat))
		
	ii = range(int(args.fmin)+int(args.step),max_iter+1,int(args.step))
	cmin = polyfit(ii,mins,args.poly)	
	cmax = polyfit(ii,maxs,args.poly)

	return p, cmin, cmax
コード例 #11
0
ファイル: Mondrian.py プロジェクト: Saket97/Report
def main(X, Xtest, time):
	global cut
	global count
	cut = 0
	count = 0
	root = node()
	root.trainData = X
	root.testData = Xtest
	print("shape of xtest in main: ",np.shape(Xtest))
	x1 = min(np.amin(X[:,[0]]), np.amin(Xtest[:,[0]]))-.05
	x2 = max(np.amax(X[:,[0]]), np.amax(Xtest[:,[0]]))+.1
	y1 = min(np.amin(X[:,[1]]), np.amin(Xtest[:,[1]]))-.05
	y2 = max(np.amax(X[:,[1]]), np.amax(Xtest[:,[1]]))+.1
	plt.figure()
	plt.axis([x1,x2,y1,y2])
	print("x1 x2 y1 y2: ",x1,x2,y1,y2)
	root.coordinates.append([x1,x2])
	root.coordinates.append([y1,y2])
	leaves = []
	MP(root,time,leaves)
	point_index = {}
	train_key = list(map(tuple,X))
	test_key = list(map(tuple,Xtest))
	x_shape = np.shape(X)
	for i in range(x_shape[0]):
		point_index[train_key[i]] = i
	Xtest_shape = np.shape(Xtest)
	for i in range(0,Xtest_shape[0]):
		point_index[test_key[i]] = i
	# plt.show()
	plt.close()
	return feature(leaves, point_index)
コード例 #12
0
ファイル: video_out.py プロジェクト: geonik84/landlab
 def produce_video(self, interval=200, repeat_delay=2000, filename='video_output.gif', override_min_max=None):
     """
     Finalize and save the video of the data.
     
     interval and repeat_delay are the interval between frames and the repeat
         delay before restart, both in milliseconds.
     filename is the name of the file to save in the present working 
         directory. At present, only .gifs will implement reliably without
         tweaking Python's PATHs.
     override_min_max allows the user to set their own maximum and minimum
         for the scale on the plot. Use a len-2 tuple, (min, max).
     """
     #find the limits for the plot:
     if not override_min_max:
         self.min_limit = np.amin(self.data_list[0])
         self.max_limit = np.amax(self.data_list[0])
         assert len(self.data_list) > 1, 'You must include at least two frames to make an animation!'
         for i in self.data_list[1:]: #assumes there is more than one frame in the loop
             self.min_limit = min((self.min_limit, np.amin(i)))
             self.max_limit = max((self.max_limit, np.amax(i)))
     else:
         self.min_limit=override_min_max[0]
         self.max_limit=override_min_max[1]
         
     self.fig.colorbar(self.plotfunc(self.grid, self.data_list[0],limits=(self.min_limit,self.max_limit),allow_colorbar=False, **self.kwds))
     ani = animation.FuncAnimation(self.fig, _make_image, frames=self._yield_image, interval=interval, blit=True, repeat_delay=repeat_delay)
     ani.save(filename, fps=1000./interval)
コード例 #13
0
ファイル: raster_renderer.py プロジェクト: vejmelkam/fdsys
def basemap_raster_mercator(lon, lat, grid, cmap = None):
  """
  Render a raster in mercator projection.  Locations with no values are
  rendered transparent.
  """
  # longitude/latitude extent
  lons = (np.amin(lon), np.amax(lon))
  lats = (np.amin(lat), np.amax(lat))

  if cmap is None:
    cmap = mpl.cm.jet
    cmap.set_bad('w', 1.0)

  # construct spherical mercator projection for region of interest
  m = Basemap(projection='merc',llcrnrlat=lats[0], urcrnrlat=lats[1],
              llcrnrlon=lons[0],urcrnrlon=lons[1])

  vmin,vmax = np.nanmin(grid),np.nanmax(grid)
  masked_grid = np.ma.array(grid,mask=np.isnan(grid))
  fig = plt.figure(frameon=False)
  plt.axis('off')
  m.pcolormesh(lon,lat,masked_grid,latlon=True,cmap=cmap,vmin=vmin,vmax=vmax)

  str_io = StringIO.StringIO()
  plt.savefig(str_io,bbox_inches='tight',format='png',pad_inches=0,transparent=True)
  bounds = [ (lons[0],lats[0]),(lons[1],lats[0]),(lons[1],lats[1]),(lons[0],lats[1]) ]

  return str_io.getvalue(), bounds
コード例 #14
0
def _write_data(lock, im, index, outfile, outshape, outtype, rescale_factor, logfilename, cputime, itime):    	      

	lock.acquire()
	try:        
		t0 = time() 			
		f_out = getHDF5(outfile, 'a')					 
		f_out_dset = f_out.require_dataset('exchange/data', outshape, outtype, chunks=tdf.get_dset_chunks(outshape[0])) 
		im = im * rescale_factor
		tdf.write_tomo(f_out_dset,index,im.astype(outtype))
					
		# Set minimum and maximum:
		if (amin(im[:]) < float(f_out_dset.attrs['min'])):
			f_out_dset.attrs['min'] = str(amin(im[:]))
		if (amax(im[:]) > float(f_out_dset.attrs['max'])):
			f_out_dset.attrs['max'] = str(amax(im[:]))		
		f_out.close()			
		t1 = time() 

		# Print out execution time:
		log = open(logfilename,"a")
		log.write(linesep + "\ttomo_%s processed (CPU: %0.3f sec - I/O: %0.3f sec)." % (str(index).zfill(4), cputime, t1 - t0 + itime))
		log.close()	

	finally:
		lock.release()	
コード例 #15
0
ファイル: DataSlice.py プロジェクト: fvitt/pygview
def test():

    filepath = '../pygview_test_data/se_ne5np4_test.cam.h0.0000-01-01-00000.nc'

    dslice = dataslice( filepath, 'T', time_ndx=0, level_ndx=8 )

    print '         filepath : '+filepath
    print 'dslice.data.shape : ',dslice.data.shape
    print 'dslice.structured : ',dslice.structured
    print '     dslice.units : ',dslice.units
    print '  dslice.lon.size : ',dslice.lon.size
    print '  dslice.lat.size : ',dslice.lat.size
    print '  dslice.lon : ',dslice.lon
    print '  dslice.lat : ',dslice.lat

    print '     dslice.lon.min, dslice.lon.max   : ',dslice.lon.min(), dslice.lon.max()

    print '     dslice.lon.min, dslice.lon.max   : ',numpy.amin(dslice.lon), numpy.amax(dslice.lon)
    print '     dslice.lat.min, dslice.lat.max   : ',numpy.amin(dslice.lat), numpy.amax(dslice.lat)
    print '     dslice.data.min,dslice.data.max  : ',numpy.amin(dslice.data), numpy.amax(dslice.data)

    filepath = '../pygview_test_data/fv_10x15_test.cam.h0.0000-01-01-00000.nc'

    dslice = dataslice( filepath, 'V', time_ndx=0, level_ndx=18 )
    
    print '         filepath : '+filepath
    print 'dslice.data.shape : ',dslice.data.shape
    print 'dslice.structured : ',dslice.structured
    print '     dslice.units : ',dslice.units
    print '     dslice.lon   : ',dslice.lon
    print '     dslice.lat   : ',dslice.lat
    print '     dslice.data  : ',dslice.data.min(),dslice.data.max()
コード例 #16
0
ファイル: py3x.py プロジェクト: stfc/ceph-pg-analyst
def statprint(host_per_pg, pg_per_host):
    val = pg_per_host.values()  # sets val to a list of the values in pg_per_host
    mean = numpy.mean(val)
    maxvalue = numpy.amax(val)
    minvalue = numpy.amin(val)
    std = numpy.std(val)
    median = numpy.median(val)
    variance = numpy.var(val)
    print("for placement groups on hosts: ")
    print( "the mean is: ", mean)
    print( "the max value is: ", maxvalue)
    print( "the min value is: ", minvalue)
    print( "the standard deviation is: ", std)
    print( "the median is: ", median)
    print( "the variance is: ", variance)
    # prints statements for stats
    host_mean = numpy.mean(host_per_pg)
    host_max = numpy.amax(host_per_pg)
    host_min = numpy.amin(host_per_pg)
    host_std = numpy.std(host_per_pg)
    host_median = numpy.median(host_per_pg)
    host_variance = numpy.var(host_per_pg)
    # these are the variables for hosts/pgs
    print("hosts per placement group: ")
    print("the mean is: ", host_mean)
    print("the max value is: ", host_max)
    print("the min value is: ", host_min)
    print("the standard deviation is: ", host_std)
    print("the median is: ", host_median)
    print("the variance is: ", host_variance)
コード例 #17
0
ファイル: data_viz.py プロジェクト: gt-ros-pkg/hrl-assistive
    def kinematics_test(self):
        
        success_list, failure_list = util.getSubjectFileList(self.record_root_path, [self.subject], self.task)

        for fileName in failure_list:
            d = ut.load_pickle(fileName)
            print d.keys()


            time_max = np.amax(d['kinematics_time'])
            time_min = np.amin(d['kinematics_time'])

            ee_pos   = d['kinematics_ee_pos']
            x_max = np.amax(ee_pos[0,:])
            x_min = np.amin(ee_pos[0,:])

            y_max = np.amax(ee_pos[1,:])
            y_min = np.amin(ee_pos[1,:])

            z_max = np.amax(ee_pos[2,:])
            z_min = np.amin(ee_pos[2,:])
            
            fig = plt.figure()            
            ax  = fig.add_subplot(111, projection='3d')
            ax.plot(ee_pos[0,:], ee_pos[1,:], ee_pos[2,:])
            
            plt.show()
コード例 #18
0
ファイル: DBfunctions.py プロジェクト: suchyta1/BalrogRandoms
def GetHealPixRectangles(nside, dbrange, nest):
    hpindex = np.arange(hp.nside2npix(nside))

    vec_corners = hp.boundaries(nside, hpindex, nest=nest)
    vec_corners = np.transpose(vec_corners, (0,2,1))
    vec_corners = np.reshape(vec_corners, (vec_corners.shape[0]*vec_corners.shape[1], vec_corners.shape[2]))
   
    theta_corners, phi_corners = hp.vec2ang(vec_corners)
    theta_corners = np.reshape(theta_corners, (theta_corners.shape[0]/4, 4))
    phi_corners = np.reshape(phi_corners, (phi_corners.shape[0]/4, 4))

    ra_corners = np.degrees(phi_corners)
    dec_corners = 90.0 - np.degrees(theta_corners)

    rainside = ( (ra_corners > dbrange[0]) & (ra_corners < dbrange[1]) )
    rakeep = np.sum(rainside, axis=-1)
    decinside = ( (dec_corners > dbrange[2]) & (dec_corners < dbrange[3]) )
    deckeep = np.sum(decinside, axis=-1)
    keep = ( (rakeep > 0) & (deckeep > 0) )
    ra_corners, dec_corners, hpindex = Cut(ra_corners, dec_corners, hpindex, cut=keep)

    ramin = np.amin(ra_corners, axis=-1)
    ramax = np.amax(ra_corners, axis=-1)
    decmin = np.amin(dec_corners, axis=-1)
    decmax = np.amax(dec_corners, axis=-1)

    return ramin, ramax, decmin, decmax, hpindex
コード例 #19
0
ファイル: metrics.py プロジェクト: Conxz/nipype
    def _eucl_max(self, nii1, nii2):
        origdata1 = nii1.get_data()
        origdata1 = np.logical_not(
            np.logical_or(origdata1 == 0, np.isnan(origdata1)))
        origdata2 = nii2.get_data()
        origdata2 = np.logical_not(
            np.logical_or(origdata2 == 0, np.isnan(origdata2)))

        if isdefined(self.inputs.mask_volume):
            maskdata = nb.load(self.inputs.mask_volume).get_data()
            maskdata = np.logical_not(
                np.logical_or(maskdata == 0, np.isnan(maskdata)))
            origdata1 = np.logical_and(maskdata, origdata1)
            origdata2 = np.logical_and(maskdata, origdata2)

        if origdata1.max() == 0 or origdata2.max() == 0:
            return np.NaN

        border1 = self._find_border(origdata1)
        border2 = self._find_border(origdata2)

        set1_coordinates = self._get_coordinates(border1, nii1.affine)
        set2_coordinates = self._get_coordinates(border2, nii2.affine)
        distances = cdist(set1_coordinates.T, set2_coordinates.T)
        mins = np.concatenate(
            (np.amin(distances, axis=0), np.amin(distances, axis=1)))

        return np.max(mins)
コード例 #20
0
ファイル: utils.py プロジェクト: renatocoutinho/sfepy
def extend_cell_data( data, domain, rname, val = None ):
    """Extend cell data defined in a region rname to the whole domain using the
    value val, or the smallest value in data if val is None."""
    n_el = domain.shape.n_el
    if data.shape[0] == n_el: return data

    if val is None:
        if data.shape[2] > 1: # Vector.
            val = nm.amin( nm.abs( data ) )
        else: # Scalar.
            val = nm.amin( data )

    edata = nm.empty( (n_el,) + data.shape[1:], dtype = nm.float64 )
    edata.fill( val )

    region = domain.regions[rname]
    offs = region.get_cell_offsets()
    eoffs = domain.get_cell_offsets()
##     print offs
##     print eoffs
##     print domain.mat_ids_to_i_gs
##     pause()

    for group in domain.iter_groups():
        ig = group.ig
        ii = eoffs[ig]
        if ig in region.igs:
            n_cell = region.shape[ig].n_cell
            ir = offs[ig]
            edata[ii+region.cells[ig]] = data[ir:ir+n_cell]
    return edata
コード例 #21
0
ファイル: Bravais.py プロジェクト: PMEAL/OpenPNM
    def add_boundary_pores(self, labels, spacing):
        r"""
        Add boundary pores to the specified faces of the network

        Pores are offset from the faces by 1/2 of the given ``spacing``, such
        that they lie directly on the boundaries.

        Parameters
        ----------
        labels : string or list of strings
            The labels indicating the pores defining each face where boundary
            pores are to be added (e.g. 'left' or ['left', 'right'])

        spacing : scalar or array_like
            The spacing of the network (e.g. [1, 1, 1]).  This must be given
            since it can be quite difficult to infer from the network,
            for instance if boundary pores have already added to other faces.

        """
        spacing = np.array(spacing)
        if spacing.size == 1:
            spacing = np.ones(3)*spacing
        for item in labels:
            Ps = self.pores(item)
            coords = np.absolute(self['pore.coords'][Ps])
            axis = np.count_nonzero(np.diff(coords, axis=0), axis=0) == 0
            offset = np.array(axis, dtype=int)/2
            if np.amin(coords) == np.amin(coords[:, np.where(axis)[0]]):
                offset = -1*offset
            topotools.add_boundary_pores(network=self, pores=Ps, offset=offset,
                                         apply_label=item + '_boundary')
コード例 #22
0
ファイル: _nj.py プロジェクト: RNAer/scikit-bio
def _lowest_index(dm):
    """Return the index of the lowest value in the input distance matrix.

    If there are ties for the lowest value, the index of top-left most
    occurrence of that value will be returned.

    This should be ultimately be replaced with a new DistanceMatrix object
    method (#228).

    """
    # get the positions of the lowest value
    results = np.vstack(np.where(dm.data == np.amin(dm.condensed_form()))).T
    # select results in the bottom-left of the array
    results = results[results[:, 0] > results[:, 1]]
    # calculate the distances of the results to [0, 0]
    res_distances = np.sqrt(results[:, 0]**2 + results[:, 1]**2)
    # detect distance ties & return the point which would have
    # been produced by the original function
    if np.count_nonzero(res_distances == np.amin(res_distances)) > 1:
        eqdistres = results[res_distances == np.amin(res_distances)]
        res_coords = eqdistres[np.argmin([r[0] for r in eqdistres])]
    else:
        res_coords = results[np.argmin(res_distances)]

    return tuple([res_coords[0], res_coords[1]])
コード例 #23
0
 def createCube(self, cellSize_xy):
     cellNumber_x = round((self.extent.XMax - self.extent.XMin) / cellSize_xy)
     cellNumber_y = round((self.extent.YMax - self.extent.YMin) / cellSize_xy)
     X = self.ssdo.xyCoords[:,0]
     Y = self.ssdo.xyCoords[:,1]
     time = self.ssdo.fields[self.timeField].data
     time = NUM.array([i for i in time], NUM.datetime64)
     startDateTime = NUM.datetime64('1970-01-01 00:00:00')
     T = time - startDateTime
     self.startTime = NUM.amin(T) + NUM.datetime64('1970-01-01 00:00:00')
     T = NUM.array([i.item().days for i in T], int)
     startT = NUM.amin(T)
     endT = NUM.amax(T)
     cellNumber_t = round((endT - startT) / self.cellSize_t) + 1
     X = (X - self.extent.XMin) / self.cellSize_xy
     Y = (self.extent.YMax - Y) / self.cellSize_xy
     T = (T - startT) / self.cellSize_t
     X = NUM.floor(X)
     Y = NUM.floor(Y)
     T = NUM.floor(T)
     CellIdList = (cellNumber_x * cellNumber_y * T) + (cellNumber_x * Y) + X
     BothEnds = NUM.array([0, (cellNumber_t * cellNumber_x * cellNumber_y -1)])
     CellIdList = NUM.concatenate((CellIdList, BothEnds), axis=0)
     CellIdList = NUM.array(CellIdList, dtype = 'int32')
     counts = NUM.bincount(CellIdList)
     counts[BothEnds[0]] = counts[BothEnds[0]] - 1
     counts[BothEnds[1]] = counts[BothEnds[1]] - 1
     return counts.reshape(cellNumber_t, cellNumber_x, cellNumber_y)
コード例 #24
0
ファイル: fluxtool.py プロジェクト: mahmoud-lsw/swiftmonitor
    def __init__(self, template=None, off_pulse_bins=None, 
                       off_pulse_auto_margin=0., 
                       correlation_harmonics=None):

        self.template = template
        self.off_pulse_auto_margin = off_pulse_auto_margin

        if template is not None:
            self.name = "Minimum estimator using cross-correlator"
        elif off_pulse_bins is not None:
            self.name = "Minimum estimator using known minimum"
        else:
            self.name = "Minimum estimator finding minimum"
        self.RMS = False

        if off_pulse_bins is None:
            if template is not None:
                self.off_pulse_bins = list(where(
                    template-amin(template) <= off_pulse_auto_margin*(amax(template)-amin(template))
                    )[0])
            else:
                self.off_pulse_bins = None
        else:
            self.off_pulse_bins = list(off_pulse_bins)
            if not self.off_pulse_bins:
                raise ValueError, "No off-pulse bins specified!"
        self.correlation_harmonics = correlation_harmonics 
コード例 #25
0
ファイル: colorbar.py プロジェクト: gkliska/razvoj
    def _config_axes(self, X, Y):
        '''
        Make an axes patch and outline.
        '''
        ax = self.ax
        ax.set_frame_on(False)
        ax.set_navigate(False)
        x, y = self._outline(X, Y)
        ax.set_xlim(npy.amin(x), npy.amax(x))
        ax.set_ylim(npy.amin(y), npy.amax(y))
        ax.update_datalim_numerix(x, y)
        self.outline = lines.Line2D(x, y, color=mpl.rcParams['axes.edgecolor'],
                                    linewidth=mpl.rcParams['axes.linewidth'])
        ax.add_artist(self.outline)
        c = mpl.rcParams['axes.facecolor']
        self.patch = patches.Polygon(zip(x,y), edgecolor=c,
                 facecolor=c,
                 linewidth=0.01,
                 zorder=-1)
        ax.add_artist(self.patch)
        ticks, ticklabels, offset_string = self._ticker()
        if self.orientation == 'vertical':
            ax.set_xticks([])
            ax.yaxis.set_label_position('right')
            ax.yaxis.set_ticks_position('right')
            ax.set_yticks(ticks)
            ax.set_yticklabels(ticklabels)
            ax.yaxis.get_major_formatter().set_offset_string(offset_string)

        else:
            ax.set_yticks([])
            ax.xaxis.set_label_position('bottom')
            ax.set_xticks(ticks)
            ax.set_xticklabels(ticklabels)
            ax.xaxis.get_major_formatter().set_offset_string(offset_string)
コード例 #26
0
ファイル: utility.py プロジェクト: gmaher/tcl_code
def normalize_images(images, normalize='max'):
    '''
    Max/min normalizes a set of images

    args:
    	@a images shape = (N,W,H,C) where C is number of channels
    '''
    N, Pw, Ph = images.shape
    images_norm = np.zeros((N,Pw,Ph))

    if normalize=='max':
        maxs = np.amax(images, axis=(1,2))
        mins = np.amin(images,axis=(1,2))

        for i in range(0,N):
            images_norm[i,:] = (images[i]-mins[i])/(maxs[i]-mins[i]+1e-6)
        return images_norm

    if normalize == 'global_max':
        max_ = np.amax(images)
        min_ = np.amin(images)

        images_norm = (images-min_)/(max_-min_)
        return images_norm
    if normalize=='mean':
        pass
コード例 #27
0
ファイル: bz.py プロジェクト: TRIQS/triqs
def make_plottable(self, method="cubic", nk=50):
   ''' 
   :param method: cubic|nearest|linear
   :param nk: number of k points
   :return: x,y,z, zmin, zmax
   '''
   pl = np.zeros((len(self.mesh), 2))
   ik=0
   for k in self.mesh:
    pl[ik, 0]=k[0]
    pl[ik, 1]=k[1]
    ik+=1
   x = pl[:,0]
   y = pl[:,1]
   xi = np.linspace(min(x), max(x),nk)
   yi = np.linspace(min(y), max(y),nk)
   zmin,zmax=np.zeros((self.data.shape[1], self.data.shape[2]), np.complex64), np.zeros((self.data.shape[1], self.data.shape[2]), np.complex64)
   zi=[]
   for ind_x in range(self.data.shape[1]):
    zi.append([])
    for ind_y in range(self.data.shape[2]):

     z = self.data[:,ind_x,ind_y]
     zmin[ind_x,ind_y]=np.amin(z.real)+np.amin(z.imag)*1j
     zmax[ind_x,ind_y]=np.amax(z.real)+np.amax(z.imag)*1j
     zi[ind_x].append(griddata((x, y), z, (xi[None,:], yi[:,None]), method=method))
   return xi,yi,np.array(zi),zmin,zmax
コード例 #28
0
ファイル: fig_spline.py プロジェクト: piScope/piScope
    def get_data_extent(self):
        if (self._data_extent is not None and
            self._data_extent_checked):
            return self._data_extent
        if (self._data_extent is not None and
            self.isempty()):
            return self._data_extent
        x, y = self._eval_xy()
        if self.isempty():
            if x is None:
                self._data_extent=[0, len(y), np.min(y), np.max(y)]
            else:               
                self._data_extent=[np.min(x), np.max(x), np.min(y), np.max(y)]
        else:
            xr = (np.inf, -np.inf)
            yr = (np.inf, -np.inf)
            for a in self._artists:
                xdata = a.get_xdata()
                ydata = a.get_ydata()
                xr = (min((xr[0],np.amin(xdata))), 
                      max((xr[1],np.amax(xdata))))
                yr = (min((yr[0],np.amin(ydata))), 
                      max((yr[1],np.amax(ydata))))

            self._data_extent=[min((min(xr), np.amin(x))),
                               max((max(xr), np.amax(x))),
                               min((min(yr), np.amin(y))),
                               max((max(yr), np.amax(y)))]
            self._data_extent_checked = True
        return self._data_extent
コード例 #29
0
def plot_field(X, Y, U, V, filename):
    '''
    Function to plot the potential field.

    Args:
        X (numpy.ndarray): X component of the sample points.
        Y (numpy.ndarray): Y component of the sample points.
        U (numpy.ndarray): X component of field at sample points.
        V (numpy.ndarray): Y component of field at sample points.
    '''
    # Generate plot.
    padding = 0.5
    plt.figure()
    plt.quiver(X, Y, U, V,
               color='#007ce8',
               units='x',
               pivot='tail')
    plt.axis('equal')
    plt.axis([np.amin(X) - padding,
              np.amax(X) + padding,
              np.amin(Y) - padding,
              np.amax(Y) + padding])
    # plt.savefig("potential_field_back1.svg", format='svg')
    plt.savefig(filename + ".svg", format='svg')
    plt.show()
コード例 #30
0
ファイル: fig_contour.py プロジェクト: piScope/piScope
    def get_crange(self, crange=[None,None], 
                         xrange=[None,None], 
                         yrange=[None,None], scale = 'linear'):

        x, y, z = self._eval_xyz()
        if ( x is None or
             y is None) : 
             x = np.arange(z.shape[1])
             y = np.arange(z.shape[0])
        if (self.getvar('offset') is not None and
            (self.getvar('zdir') == 'x' or 
             self.getvar('zdir') == 'y')):
            crange = self._update_range(crange, (np.amin(z), np.amax(z)))
        elif (xrange[0] is not None and
            xrange[1] is not None and
            yrange[0] is not None and
            yrange[1] is not None):
            zt = np.ma.masked_array(z)
            if y.ndim == 1:
               zt[(y < yrange[0]) | (y > yrange[1]),:] = np.ma.masked
            else:
               zt[(y < yrange[0]) | (y > yrange[1])] = np.ma.masked
            if x.ndim == 1:
               zt[:,(x < xrange[0]) | (x > xrange[1])] = np.ma.masked
            else:
               zt[(x < xrange[0]) | (x > xrange[1])] = np.ma.masked
            if scale == 'log': zt[z <= 0] = np.ma.masked
            crange = self._update_range(crange, (np.amin(zt), np.amax(zt)))


        return crange
コード例 #31
0
ファイル: bài 2.py プロジェクト: huyreeve/b-i-t-p-lap
import numpy as np
m, n = map(int, input().split())#
arr=[int(i) for i in input().strip().split()[:m*n]]
a=np.array(arr) # chuyển từ list sang array numpy
#đưa numpy 1 chiều về 2 chiều gồm m hàng n cột
a=a.reshape(m,n)
print( np.amax(a, axis=1) )# 1: ngang (hàng)
print( np.amin(a,axis=0))  #0 : đứng (cột)
# tổng đường chéo chính => các a[i][i] 
k=min(m,n)
res = sum(a[i][i] for i in range(k))
print(res)
コード例 #32
0
ファイル: image_util2.py プロジェクト: javierfs/deep-learning
 def _process_data(self, data):
     # normalization
     data = np.clip(np.fabs(data), self.a_min, self.a_max)
     data -= np.amin(data)
     data /= np.amax(data)
     return data
コード例 #33
0
	data = np.loadtxt(filename)
	
	if 'prog_h' in filename:
		#Get full depth
		pos1 = filename.find('_h')
		pos2 = filename.find('_f')
		depth = filename[pos1+2:pos2]
		print("Summing full depth")
		print(depth)
		depth=float(depth)
		data = data + depth
		#Define in km
		data = data / 1000

	#Get max/min
	cmin = np.amin(data)
	cmax = np.amax(data)
	
	#Set physical grid for axis
	x_min = 0
	x_max = 4.00316e+07/1000/1000
	y_min = 0
	y_max = 4.00316e+07/1000/1000
	n = data.shape[0]
	x = np.linspace(x_min, x_max, n)
	y = np.linspace(y_min, y_max, n)
	
	#Labels
	labelsx = np.linspace(x_min, x_max, 7)
	labelsy = np.linspace(y_min, y_max, 7)
コード例 #34
0

#series具有ndarray数组及dict字典的双重特性,可进行两类基本操作、向量化运算
import numpy as np
import pandas as pd
#先创建series
list_0=[1,2,3,0.5]
series_0=pd.Series(list_0,index=['a','b','c','d'])
print(series_0)

#进行ndarray数组基本操作
print(series_0[0])       #索引取值
print(series_0[0:2])     #切片掐头去尾
print(series_0**2)      #向量化操作
print(series_0+2)       #广播操作
print(np.amin(series_0))#函数操作

#进行dict字典基本操作
print(series_0['a'])    #键值对取值
print(series_0.get('b'))#字典的get方法

#当然series本身也可向量化运算,无需for循环逐元素处理
print(series_0+series_0)#元素间相加
print(series_0**2)     #平方


# In[ ]:


#
コード例 #35
0
    
# Sensitivity Curve for Huber's scale estimate
c =  1.3415 
SC_hub = np.zeros(len(delta_x))
std_hat = MscaleHUB(x_N_minus1,c,path='u.mat')
for ii in range(len(delta_x)):
    SC_hub[ii] = N*(MscaleHUB(np.append(x_N_minus1, delta_x[ii]),c,path='u.mat')
                    -std_hat)
    
# Sensitivity Curve for Tukey's scale estimate
c = 4.68 
SC_tuk = np.zeros(delta_x.shape)
std_hat = MscaleTUK(x_N_minus1,c,path='u.mat') # Soll: 0.8772

for ii in range(len(delta_x)):
    SC_tuk[ii] = N*(MscaleTUK(np.append(x_N_minus1, delta_x[ii]),c,path='u.mat')-std_hat)
    
plt.rcParams.update({'font.size': 18})

plt.plot(delta_x,SC_std-np.amin(SC_std), label ='Standard deviation', linewidth=2)
plt.plot(delta_x,SC_mead-np.amin(SC_mead), label='madn', linewidth=2)
plt.plot(delta_x,SC_hub-np.amin(SC_hub), label='Huber M', linewidth=2)
plt.plot(delta_x,SC_tuk-np.amin(SC_tuk), label ='Tukey M', linewidth=2)

plt.grid(True)

plt.xlabel('Outlier value')
plt.ylabel('Sensitivity curve')
plt.legend()

plt.show()
コード例 #36
0
				

		ttests['disc'+str(d)][r] = stats.ttest_ind(innervalues,outervalues)

print('BREATHE')
print(discs)

for a in range(1, len(discs)+1):
	tvals = []
	pvals = []

	for b in range(1, len(ttests['disc' + str(a)])):
		tvals.append(ttests['disc' + str(a)][b][0])
		pvals.append(ttests['disc' + str(a)][b][1])
	
	if(np.amax(tvals) > math.fabs(np.amin(tvals))):
		if(pvals[np.argmax(tvals)] < 0.005 and np.amax(tvals) > 3):
			graddists.append(((np.argmax(tvals)*2)+15))
		else:
			graddists.append(10)
		print(a, np.amax(tvals), np.amin(pvals))
	else:
		if(pvals[np.argmin(tvals)] < 0.005 and np.amin(tvals) < -3):
			graddists.append(((np.argmin(tvals)*2)+15))
		else:
			graddists.append(10)
		print(a, np.amin(tvals), np.amin(pvals))

print(graddists)

for k in range(1, len(discs)+1):
コード例 #37
0
# train_doc_value_diff /= scale_factor/100.
# vali_doc_value_diff /= scale_factor/100.

additional_train_feat = np.stack((
                            train_doc_value_diff,
                            np.abs(train_doc_value_diff),
                            train_doc_value_diff**2.,
                            np.equal(train_doc_value_diff, 0),
                            np.greater(train_doc_value_diff, 0),
                            models_train_inv_rankings[0, :],
                            models_train_inv_rankings[1, :],
                            np.zeros(data.train.num_docs())
                            ), axis=1,)
for qid in range(data.train.num_queries()):
  s_i, e_i = data.train.query_range(qid)
  additional_train_feat[s_i:e_i, :] -= np.amin(additional_train_feat[s_i:e_i, :], axis=0)[None, :]
  max_denom = np.amax(additional_train_feat[s_i:e_i, :], axis=0)
  max_denom[np.equal(max_denom, 0.)] = 1.
  additional_train_feat[s_i:e_i, :] /= max_denom[None, :]
  additional_train_feat[s_i:e_i, -1] = (s_i - e_i)
additional_vali_feat = np.stack((
                            vali_doc_value_diff,
                            np.abs(vali_doc_value_diff),
                            vali_doc_value_diff**2.,
                            np.equal(vali_doc_value_diff, 0),
                            np.greater(vali_doc_value_diff, 0),
                            models_vali_inv_rankings[0, :],
                            models_vali_inv_rankings[1, :],
                            np.zeros(data.validation.num_docs()),
                            ), axis=1,)
for qid in range(data.validation.num_queries()):
コード例 #38
0
def detect_face(img, minsize, pnet, rnet, onet, threshold, factor):
    # im: input image
    # minsize: minimum of faces' size
    # pnet, rnet, onet: caffemodel
    # threshold: threshold=[th1 th2 th3], th1-3 are three steps's threshold
    # fastresize: resize img from last scale (using in high-resolution images) if fastresize==true
    factor_count = 0
    total_boxes = np.empty((0, 9))
    h = img.shape[0]
    w = img.shape[1]
    minl = np.amin([h, w])
    m = 12.0 / minsize
    minl = minl * m
    points = np.empty(0)
    # creat scale pyramid
    scales = []
    while minl >= 12:
        scales += [m * np.power(factor, factor_count)]
        minl = minl * factor
        factor_count += 1

    # first stage
    for j in range(len(scales)):
        scale = scales[j]
        hs = int(np.ceil(h * scale))
        ws = int(np.ceil(w * scale))
        im_data = imresample(img, (hs, ws))
        im_data = (im_data - 127.5) * 0.0078125
        img_x = np.expand_dims(im_data, 0)
        img_y = np.transpose(img_x, (0, 2, 1, 3))
        out = pnet(img_y)
        out0 = np.transpose(out[0], (0, 2, 1, 3))
        out1 = np.transpose(out[1], (0, 2, 1, 3))

        boxes, _ = generateBoundingBox(out1[0, :, :, 1], out0[0, :, :, :],
                                       scale, threshold[0])

        # inter-scale nms
        pick = nms(boxes, 0.5, 'Union')
        if boxes.size > 0 and pick.size > 0:
            boxes = boxes[pick, :]
            total_boxes = np.append(total_boxes, boxes, axis=0)

    numbox = total_boxes.shape[0]
    if numbox > 0:
        pick = nms(total_boxes, 0.7, 'Union')
        total_boxes = total_boxes[pick, :]
        regw = total_boxes[:, 2] - total_boxes[:, 0]
        regh = total_boxes[:, 3] - total_boxes[:, 1]
        qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw
        qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh
        qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw
        qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh
        total_boxes = np.transpose(
            np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]]))
        total_boxes = rerec(total_boxes)
        total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32)
        dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes, w, h)

    numbox = total_boxes.shape[0]
    if numbox > 0:
        # second stage
        tempimg = np.zeros((24, 24, 3, numbox))
        for k in range(0, numbox):
            tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
            tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k],
                                                             x[k] - 1:ex[k], :]
            if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[
                    0] == 0 and tmp.shape[1] == 0:
                tempimg[:, :, :, k] = imresample(tmp, (24, 24))
            else:
                return np.empty()
        tempimg = (tempimg - 127.5) * 0.0078125
        tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
        out = rnet(tempimg1)
        out0 = np.transpose(out[0])
        out1 = np.transpose(out[1])
        score = out1[1, :]
        ipass = np.where(score > threshold[1])
        total_boxes = np.hstack(
            [total_boxes[ipass[0], 0:4],
             np.expand_dims(score[ipass], 1)])
        mv = out0[:, ipass[0]]
        if total_boxes.shape[0] > 0:
            pick = nms(total_boxes, 0.7, 'Union')
            total_boxes = total_boxes[pick, :]
            total_boxes = bbreg(total_boxes, np.transpose(mv[:, pick]))
            total_boxes = rerec(total_boxes)

    numbox = total_boxes.shape[0]
    if numbox > 0:
        # third stage
        total_boxes = np.fix(total_boxes).astype(np.int32)
        dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes, w, h)
        tempimg = np.zeros((48, 48, 3, numbox))
        for k in range(0, numbox):
            tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
            tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k],
                                                             x[k] - 1:ex[k], :]
            if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[
                    0] == 0 and tmp.shape[1] == 0:
                tempimg[:, :, :, k] = imresample(tmp, (48, 48))
            else:
                return np.empty()
        tempimg = (tempimg - 127.5) * 0.0078125
        tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
        out = onet(tempimg1)
        out0 = np.transpose(out[0])
        out1 = np.transpose(out[1])
        out2 = np.transpose(out[2])
        score = out2[1, :]
        points = out1
        ipass = np.where(score > threshold[2])
        points = points[:, ipass[0]]
        total_boxes = np.hstack(
            [total_boxes[ipass[0], 0:4],
             np.expand_dims(score[ipass], 1)])
        mv = out0[:, ipass[0]]

        w = total_boxes[:, 2] - total_boxes[:, 0] + 1
        h = total_boxes[:, 3] - total_boxes[:, 1] + 1
        points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(
            total_boxes[:, 0], (5, 1)) - 1
        points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(
            total_boxes[:, 1], (5, 1)) - 1

        if total_boxes.shape[0] > 0:
            total_boxes = bbreg(total_boxes, np.transpose(mv))
            pick = nms(total_boxes, 0.7, 'Min')
            total_boxes = total_boxes[pick, :]
            points = points[:, pick]

    return total_boxes, np.transpose(points)
コード例 #39
0
 def transform_single(self,y):
     yn = np.copy(y)
     yn = np.multiply(y - np.amin(y),
             self.YnormTo/(np.amax(y) - np.amin(y)))
     return yn
コード例 #40
0
ファイル: LASDiAScriptIgor.py プロジェクト: Gaozmmm/LASDiA
    Iincoh_Q = MainFunctions.calc_Iincoh(elementList, Q, elementParameters)
    J_Q = IgorFunctions.calc_JQ(Iincoh_Q, fe_Q)
    Sinf = MainFunctions.calc_Sinf(elementList, fe_Q, Q, Ztot, elementParameters)
    
    dampingFunction = UtilityAnalysis.calc_dampingFunction(Q, variables.dampingFactor,
        variables.QmaxIntegrate, variables.typeFunction)

    #-------------------Intra-molecular components-----------------------------

    iintra_Q = Optimization.calc_iintra(Q, fe_Q, Ztot, variables.QmaxIntegrate, 
        variables.maxQ, elementList, element, x, y, z, elementParameters)
    iintradamp_Q = UtilityAnalysis.calc_iintradamp(iintra_Q, Q, variables.QmaxIntegrate, 
        dampingFunction)
    rintra, Fintra_r = IgorFunctions.calc_FFT_QiQ(Q, Q*iintradamp_Q, variables.QmaxIntegrate)
    
    _, Fintra_r = UtilityAnalysis.rebinning(rintra, Fintra_r, np.amin(rintra), 
        np.amax(rintra), 8192)
    
    _, dampingFunction = UtilityAnalysis.rebinning(Q, dampingFunction, 0.0, 
        variables.maxQ, variables.NumPoints)
    
    # ---------------------Geometrical correction------------------------------
    
    absCorrFactor = IgorFunctions.absorption(Q)
    I_Q = I_Q/absCorrFactor
    Ibkg_Q = Ibkg_Q/absCorrFactor
    
    # ------------------------Starting minimization----------------------------
    # To keep in mind:
    # gi_1 = density0
    # gi = density
コード例 #41
0
ファイル: Evaluation.py プロジェクト: shen-hue/arena-flatland
    def evaluate_episode(self, train):
        """
        Evaluates current episode
        :return result of the episode.

        """
        result = {}
        if not train:
            result["global_path"] = self.__path
            result["agent_states"] = []

        done = False
        max_time = len(self.__path.poses) / 10 * 2  # in secs
        if not self.MODE == 2:
            start_time = self.__clock
        else:
            start_time = rospy.get_rostime()
        driven_route = Path()
        driven_route.header = self.__path.header
        poses = []
        while not done:
            [static_scan, ped_scan, merged_scan, img, wp, twist,
             goal] = self.__state_collector.get_state()
            min_obstacle_dist = np.amin(merged_scan.ranges)
            dist_to_goal = math.sqrt(
                math.pow(goal.position.x, 2) + math.pow(goal.position.y, 2))

            # Check if task over
            pose = PoseStamped()
            pose.header = self.__odom.header
            pose.pose = self.__odom.pose.pose
            poses.append(pose)
            if not self.MODE == 2:
                now = self.__clock
            else:
                now = rospy.get_rostime()
            # Check if task over
            if min_obstacle_dist <= self.__robot_radius or \
                self.__rect_robot_collision(static_scan, ped_scan, self.__robot_width, self.__robot_height):
                rospy.loginfo("Robot collided with obstacle.")
                done = True
                result["success"] = -1
            elif dist_to_goal < 0.65:
                rospy.loginfo("Goal reached.")
                done = True
                result["success"] = 1
            elif self.__done or (now - start_time).to_sec() > max_time:
                rospy.loginfo("Time exceeded.")
                done = True
                result["success"] = 0
            if (not train):
                result["agent_states"].append(self.__recent_agent_states)
            self.__sleep(0.1)

        # Info that we don't want to capture during training
        if not train:
            result["num_stat_obj"] = 0
            result["num_peds"] = 0

            #Counting number of static objects and number of dynamic objects (pedestrians)
            for topic in self.__flatland_topics:
                if topic.find("stat_obj") != -1:
                    result["num_stat_obj"] += 1
                    continue
                if topic.find("person") != -1:
                    result["num_peds"] += 1

            driven_route.poses = poses
            result["time"] = self.__clock - start_time
            result["driven_path"] = driven_route
        result["timestep"] = self.__timestep
        return result
コード例 #42
0
    def plot(self,
             key='dfn',
             axes=['R', 'Z'],
             LCFS=False,
             limiters=False,
             real_scale=False,
             colmap=cmap_default,
             transform=True,
             number_bins=20,
             fill=True,
             vminmax=None,
             ax=False,
             fig=False):
        """
        plot the distribution function

        notes:
            if external figure or axes are supplied then, if possible, function returns plottable object for use with external colorbars etc 
            if user supplies full set of indices, code assumes those slices are dimension to plot over i.e. please crop before plotting
        args:
            key - select which data to plot
            axes - define plot axes in x,y order or as full list of indices/slices (see dfn_transform())
            LCFS - object which contains LCFS data lcfs_r and lcfs_z
            limiters - object which contains limiter data rlim and zlim
            real_scale - plot to Tokamak scale
            colmap - set the colour map (use get_cmap names)
            transform - set to False if supplied dfn has already been cut down to correct dimensions
            number_bins - set number of bins or levels
            fill - toggle contour fill on 2D plots
            vminmax - set mesh Vmin/Vmax values
            ax - take input axes (can be used to stack plots)
            fig - take input fig (can be used to add colourbars etc)
        usage:
            plot_distribution_function(my_dfn) #basic default R,Z plot
            plot_distribution_function(my_dfn,axes=['E','V_pitch']) #basic pitch,energy plot
            plot_distribution_function(my_dfn,my_eq,axes=['R','Z'],LCFS=True,real_scale=True) #R,Z plotted with true scales and last closed flux surface from supplied equilibrium
            plot_distribution_function(my_dfn,my_eq,axes=['R','Z'],LCFS=True,real_scale=True,transform=False) #R,Z plot where my_dfn has already been cropped to correct dimensions
            plot_distribution_function(my_dfn,axes=[0,9,3,slice(None),slice(None)],vminmax=[0,1000],ax=my_ax,fig=my_fig) #R,Z plot at point 9,3 in E,pitch space without integrating and adding to my_ax on figure my_fig
        axes options:
            R,Z - integrate over pitch, gyrophase and velocity [m]^-3
            E,V_pitch - integrate over space and transform to [eV]^-1[dpitch]^-1 
            E - [eV]^-1
            R - [m]^-3 
            N - total #
            list of indices and slices
        """

        import scipy
        import numpy as np
        import matplotlib
        from matplotlib import cm
        import matplotlib.pyplot as plt
        from mpl_toolkits import mplot3d  #import 3D plotting axes
        from mpl_toolkits.mplot3d import Axes3D

        if ax is False:
            ax_flag = False  #need to make extra ax_flag since ax state is overwritten before checking later
        else:
            ax_flag = True

        if fig is False:
            fig_flag = False
        else:
            fig_flag = True

        #0D data
        if self[key].ndim == 0:
            print(self[key])
            return

        #>0D data is plottable
        if fig_flag is False:
            fig = plt.figure(
            )  #if user has not externally supplied figure, generate

        if ax_flag is False:  #if user has not externally supplied axes, generate them
            ax = fig.add_subplot(111)
        ax.set_title(self.ID)

        #1D data
        if self[key].ndim == 1:
            ax.plot(self[axes[0]], self[key], colmap)
            ax.set_ylabel(key)

        #plot distribution function
        elif key == 'dfn':

            #transform distribution function to the coordinates we want
            if transform is True:
                dfn_copy = processing.process_output.dfn_transform(
                    self, axes=axes
                )  #user-supplied axes are checked for validity here
            else:
                dfn_copy = copy.deepcopy(self)

            if vminmax:
                vmin = vminmax[0]
                vmax = vminmax[1]
            else:
                vmin = np.amin(dfn_copy[key])
                vmax = np.amax(dfn_copy[key])

            #check resulting dimensionality of distribution function
            if dfn_copy['dfn'].ndim == 0:  #user has given 0D dfn
                pass  #XXX incomplete - should add scatter point
            elif dfn_copy['dfn'].ndim == 1:  #user chosen to plot 1D
                ax.plot(dfn_copy[axes[0]], dfn_copy[key], colmap)
                ax.set_xlabel(axes[0])
                ax.set_ylabel(key)
            elif dfn_copy['dfn'].ndim == 2:  #user chosen to plot 2D

                if all(isinstance(axis, type('_')) for axis in
                       axes):  #user has supplied list of chars to denote axes
                    pass
                else:  #user has supplied full list of indices to slice DFN -> need to determine convetional axes names
                    axes = dfn_copy['dfn_index'][np.where(
                        [isinstance(axis, slice) for axis in axes]
                    )]  #do this by assuming that user slices over dimensions they want to plot
                    #the above line works because dfn_index is a numpy array of strings - would break for lists

                if real_scale is True:  #set x and y plot limits to real scales
                    ax.set_aspect('equal')
                else:
                    ax.set_aspect('auto')

                X = dfn_copy[axes[0]]  #make a mesh
                Y = dfn_copy[axes[1]]
                Y, X = np.meshgrid(Y,
                                   X)  #dfn is r,z so need to swap order here

                if fill:
                    ax.set_facecolor(colmap(np.amin(dfn_copy[key])))
                    mesh = ax.pcolormesh(X,
                                         Y,
                                         dfn_copy[key],
                                         cmap=colmap,
                                         vmin=vmin,
                                         vmax=vmax)
                    #mesh=ax.contourf(X,Y,dfn_copy[key],levels=np.linspace(np.amin(dfn_copy[key]),np.amax(dfn_copy[key]),num=number_bins),colours=colmap(np.linspace(0.,1.,num=number_bins)),edgecolor='none',linewidth=0,antialiased=True,vmin=vmin,vmax=vmax)
                    '''for c in mesh.collections: #for use in contourf
                        c.set_edgecolor("face")'''
                else:
                    mesh = ax.contour(
                        X,
                        Y,
                        dfn_copy[key],
                        levels=np.linspace(np.amin(dfn_copy[key]),
                                           np.amax(dfn_copy[key]),
                                           num=number_bins),
                        colors=colmap(np.linspace(0., 1., num=number_bins)),
                        edgecolor='none',
                        linewidth=0,
                        antialiased=True,
                        vmin=vmin,
                        vmax=vmax)
                    #ax.clabel(mesh,inline=1,fontsize=10)

                if fig_flag is False:
                    fig.colorbar(mesh, ax=ax, orientation='horizontal')
                ax.set_xlabel(axes[0])
                ax.set_ylabel(axes[1])

                if real_scale is True:  #set x and y plot limits to real scales
                    ax.set_aspect('equal')
                else:
                    ax.set_aspect('auto')
                if LCFS:  #plot plasma boundary
                    ax.plot(LCFS['lcfs_r'], LCFS['lcfs_z'], plot_style_LCFS)
                if limiters:  #add boundaries if desired
                    ax.plot(limiters['rlim'], limiters['zlim'],
                            plot_style_limiters)

                if ax_flag is True or fig_flag is True:  #return the plot object
                    return mesh

            else:  #user has not supplied >2D dfn
                print(
                    "ERROR: plot_distribution_function given >2D DFN - please reduce dimensionality"
                )
                return

        if ax_flag is False and fig_flag is False:
            plt.show()
コード例 #43
0
def main():
    pvc.init_pvcam()
    cam = next(Camera.detect_camera())
    cam.open()
    cam.meta_data_enabled = True
    cam.set_roi(0, 0, WIDTH, HEIGHT)
    cam.start_live(exp_time=100,
                   buffer_frame_count=BUFFER_FRAME_COUNT,
                   stream_to_disk_path=FRAME_DATA_PATH)

    # Data is streamed to disk in a C++ call-back function invoked directly by PVCAM. To not overburden the system,
    # only poll for frames in python at a slow rate, then exit when the frame count indicates all frames have been
    # written to disk
    while True:
        frame, fps, frame_count = cam.poll_frame()

        if frame_count >= NUM_FRAMES:
            low = np.amin(frame['pixel_data'])
            high = np.amax(frame['pixel_data'])
            average = np.average(frame['pixel_data'])
            print(
                'Min:{}\tMax:{}\tAverage:{:.0f}\tFrame Count:{:.0f} Frame Rate: {:.1f}'
                .format(low, high, average, frame_count, fps))
            break

        time.sleep(1)

    cam.finish()

    imageFormat = cam.get_param(const.PARAM_IMAGE_FORMAT)
    if imageFormat == const.PL_IMAGE_FORMAT_MONO8:
        BYTES_PER_PIXEL = 1
    else:
        BYTES_PER_PIXEL = 2

    cam.close()
    pvc.uninit_pvcam()

    # Read out meta data from stored frames
    FRAME_ALIGNMENT = 0  # Typically 0. 32 for Kinetix PCIe
    FRAME_BUFFER_ALIGNMENT = 4096
    META_DATA_FRAME_HEADER_SIZE = 48
    META_DATA_ROI_SIZE = 32
    META_DATA_SIZE = META_DATA_FRAME_HEADER_SIZE + META_DATA_ROI_SIZE
    pixelsPerFrame = WIDTH * HEIGHT
    bytesPerFrameUnpadded = pixelsPerFrame * BYTES_PER_PIXEL + META_DATA_SIZE
    framePad = 0 if FRAME_ALIGNMENT == 0 else FRAME_ALIGNMENT - (
        bytesPerFrameUnpadded % FRAME_ALIGNMENT)
    bytesPerFrame = bytesPerFrameUnpadded + framePad
    frameBufferAlignmentPad = FRAME_BUFFER_ALIGNMENT - (
        (BUFFER_FRAME_COUNT * bytesPerFrame) % FRAME_BUFFER_ALIGNMENT)

    with open(FRAME_DATA_PATH, "rb") as f:

        badMetaDataCount = 0
        for frame_index in range(NUM_FRAMES):
            frame_number = frame_index + 1

            # Read frame number from meta data header
            # Every time the circular buffer wraps around, bytes are padded into the file. This is a result of needing to
            # write data in chunks that are multiples of the alignment boundary.
            frameBufferPad = int(
                frame_index / BUFFER_FRAME_COUNT) * frameBufferAlignmentPad

            FRAME_NUMBER_OFFSET = 5
            filePos = frame_index * bytesPerFrame + FRAME_NUMBER_OFFSET + frameBufferPad
            f.seek(filePos, 0)
            frameNumberBytes = f.read(4)

            frame_number_meta_data = int.from_bytes(frameNumberBytes, 'little')

            if frame_number != frame_number_meta_data:
                badMetaDataCount += 1
                print('Expected: ' + str(frame_number) + ' Observed: ' +
                      str(frame_number_meta_data))

    print('Verified ' + str(NUM_FRAMES) + ' frames:')
    print('  Meta data errors: ' + str(badMetaDataCount))

    if badMetaDataCount > 0:
        print('\nMeta data error troubleshooting:')
        print('  1. Verify FRAME_ALIGNMENT for camera and interface')
        print('  2. Increase exposure time')
コード例 #44
0
ファイル: main.py プロジェクト: nerdslab/DAD
def grid_search_3D_KL(X_target, Y_source, num_A, num_T):
    mean_weight = 0.7
    KL_thr = 5
    nz_var = 0.5
    fine_grid = 10
    bsz = 50
    num_samples = 500000
    k0 = k1 = 5
    grid_size = num_A

    xx, yy, zz = np.meshgrid(np.linspace(-1, 1, grid_size),
                             np.linspace(-1, 1, grid_size),
                             np.linspace(-1, 1, grid_size))

    F_mat = np.column_stack(
        (xx.flatten('F'), yy.flatten('F'), zz.flatten('F')))
    F_mat = np.concatenate(
        (np.array([0, 0, 1])[np.newaxis, :],
         F_mat[np.linalg.norm(F_mat, ord=2, axis=1) > 0.1, :]),
        axis=0)

    if num_T > 1:
        t_vec = np.vstack(([0, 0, 0], np.random.randn(num_T, 3) * nz_var))
    else:
        t_vec = np.array([0, 0, 0])

    sample_loc = sample_from_3D_grid(bsz, num_samples)
    p_train = prob1(sample_loc, normal(X_target), k0)

    try:
        dists1 = np.load('dists1.npy')
    except IOError:
        if num_T > 0:
            dists1 = np.full((F_mat.shape[0], num_T if num_T > 1 else 1), 0.0)
        else:
            dists1 = np.full(F_mat.shape[0], 0.0)

        for i in range(F_mat.shape[0]):
            an0 = F_mat[i, :]
            Y_rot = rotate_data(Y_source, an0)

            p_rot = prob1(sample_loc, normal(Y_rot), k1)

            if num_T > 0:
                dists1[i, 0] = np.matmul(p_rot.T, np.log(p_rot / p_train))
                if dists1[i, 0] < KL_thr and num_T > 1:
                    for j in range(1, num_T):
                        Y_rot2 = Y_rot + t_vec[j, :]
                        p_rot = prob1(sample_loc, Y_rot2, k1)
                        dists1[i, j] = np.matmul(p_rot.T,
                                                 np.log(p_rot / p_train))

                    KL_thr = min(KL_thr,
                                 np.mean(dists1[dists1 != 100]) * mean_weight)
            else:
                dists1[i] = np.matmul(p_rot, np.log(p_rot / p_train))

    np.save('dists1.npy', dists1)
    plt.plot(dists1)
    plt.title('3D Grid Search')
    plt.xlabel('Rotation Angle')
    plt.ylabel('KL Divergence')
    plt.show()
    # select best angle of rotation
    values = np.amin(dists1, axis=0)
    ind = np.argmin(dists1, axis=0)

    if num_T > 1:
        ind = ind[np.argmin(values)]

    angle_ind = ind

    an0 = F_mat[angle_ind, :]
    Y_curr = rotate_data(Y_source, an0)

    if num_T > 1:
        t_curr = t_vec[ind, :]
    else:
        t_curr = [0, 0, 0]

    # final translation
    t_vec2 = np.random.randn(np.power(fine_grid, 3),
                             3) * nz_var + np.matlib.repmat(
                                 t_curr, np.power(fine_grid, 3), 1)

    dists2 = np.zeros(t_vec2.shape[0])
    for i in range(t_vec2.shape[0]):
        Y_rot2 = Y_curr + np.matlib.repmat(t_vec2[i, :], Y_curr.shape[0], 1)
        nbrs = NearestNeighbors(n_neighbors=1).fit(X_target)
        distances, dvec = nbrs.kneighbors(Y_rot2)
        dists2[i] = np.mean(dvec)

    ind = np.argmin(dists2)
    return Y_curr + np.matlib.repmat(t_vec2[ind, :], Y_curr.shape[0], 1)
コード例 #45
0
def detect_meanlines(masked_image, corners, scale=1):
    padding = PARAMS["trace-spacing"](scale) / 2

    timeStart("bound image")
    # effectively shrink the roi by a distance **padding**
    top_bound = padding + np.amax(
        [corners["top_left"][1], corners["top_right"][1]])
    bottom_bound = -padding + np.amin(
        [corners["bottom_left"][1], corners["bottom_right"][1]])
    left_bound = padding + np.amax(
        [corners["bottom_left"][0], corners["top_left"][0]])
    right_bound = -padding + np.amin(
        [corners["top_right"][0], corners["bottom_right"][0]])

    # mask all image values outside of this shrunken roi
    bounded_image = masked_image.copy()
    bounded_image[:top_bound, :] = ma.masked
    bounded_image[bottom_bound:, :] = ma.masked
    bounded_image[:, :left_bound] = ma.masked
    bounded_image[:, right_bound:] = ma.masked
    timeEnd("bound image")

    Debug.save_image("meanlines", "bounded_image", bounded_image.filled(0))

    timeStart("threshold image")
    black_and_white_image = otsu_threshold_image(bounded_image)
    timeEnd("threshold image")

    Debug.save_image("meanlines", "thresholded_image", black_and_white_image)

    timeStart("remove small objects")
    filtered_image = remove_small_objects(black_and_white_image,
                                          PARAMS["small-object-size"](scale))
    timeEnd("remove small objects")

    Debug.save_image("meanlines", "filtered_image", filtered_image)

    timeStart("get hough lines")
    roi_top_angle = np.rad2deg(
        points_to_rho_theta(corners["top_left"], corners["top_right"])[1])
    angle_padding = 2  # degrees
    min_angle = roi_top_angle - angle_padding
    max_angle = roi_top_angle + angle_padding
    min_separation_distance = int((2.0 / 3) * PARAMS["trace-spacing"](scale))
    lines = get_all_hough_lines(
        filtered_image,
        min_angle=min_angle,
        max_angle=max_angle,
        min_separation_distance=min_separation_distance,
        min_separation_angle=5)
    timeEnd("get hough lines")

    print "found %s meanlines" % len(lines)
    Record.record("num_meanlines", len(lines))

    if Debug.active:
        debug_image = gray2rgb(np.copy(masked_image))
        line_coords = [
            skidraw.line(line[0][1], line[0][0], line[1][1], line[1][0])
            for line in lines
        ]
        for line in line_coords:
            rr, cc = line
            mask = (rr >= 0) & (rr < debug_image.shape[0]) & (cc >= 0) & (
                cc < debug_image.shape[1])
            debug_image[rr[mask], cc[mask]] = [1.0, 0, 0]
        Debug.save_image("meanlines", "meanlines", debug_image)

    return lines
コード例 #46
0
ファイル: main.py プロジェクト: nerdslab/DAD
def rotated_KL_min(V, X_tr, num_A, fit_skew=0):
    k = 3
    num_peaks = 10

    if V.shape[1] != X_tr.shape[0]:
        print(V.shape, X_tr.shape)
        print('Target & test set are not the same dimension!')

    ang_vals = np.linspace(0, np.pi, num_A)
    cos_g = np.cos(ang_vals)
    sin_g = np.sin(ang_vals)
    VL_r = []
    y = np.zeros(2 * num_A)
    for p in range(2 * num_A):
        pm = p % num_A
        ps = 2 * np.floor((p - 1) / num_A) - 1
        rm = np.matmul([[ps, 0], [0, 1]],
                       [[cos_g[pm], -sin_g[pm]], [sin_g[pm], cos_g[pm]]])

        if fit_skew != 0:
            sx = 0.2 + 0.2 * np.arange(1, 8)
            sy = 0.2 + 0.2 * np.arange(1, 8)

            ys = np.zeros(7 * 7)
            VL_rs = []

            for s1 in range(0, 7):
                for s2 in range(0, 7):
                    s_mat = [[sx[s1], 0], [0, sy[s2]]]
                    VL_rs.append(normal(V * rm * s_mat))
                    ys[s1 * 7 + s2] = eval_KL(VL_rs[s1 * 7 + s2], X_tr, k)

            y[p] = np.amin(ys)
            VL_r[p] = VL_rs[np.argmin(ys)]

        else:
            VL_r.append(normal(np.matmul(V, rm)))
            ys = eval_KL(VL_r[p], X_tr.T, k)
            y[p] = np.mean(ys)

    plt.plot(y)
    plt.xlabel('Rotation Angle')
    plt.ylabel('KL Divergence')
    plt.title('2D Rotation')
    plt.axvline(x=np.argmin(y))
    plt.show()

    V_out = VL_r[np.argmin(y)]

    peak_inds, peak_properties = find_peaks((np.amax(y) - y) / np.amax(y),
                                            height=0.0)

    peak_heights = peak_properties['peak_heights']

    descending_inds = np.argsort(peak_heights)[::-1]
    flip_inds = peak_inds[descending_inds]

    #V_flip = VL_r[flip_inds.tolist()]
    V_flip = []
    for i in range(len(peak_inds)):
        V_flip.append(VL_r[peak_inds[i]])

    return V_out, V_flip, y, flip_inds
コード例 #47
0
        def HierarchicalStochasticSampleTrajMDP(self, max_epoch_per_traj,
                                                number_of_trajectories):
            traj = [[None] * 1 for _ in range(number_of_trajectories)]
            control = [[None] * 1 for _ in range(number_of_trajectories)]
            Option = [[None] * 1 for _ in range(number_of_trajectories)]
            Termination = [[None] * 1 for _ in range(number_of_trajectories)]
            flag = np.empty((0, 0), int)

            for t in range(number_of_trajectories):
                done = False
                obs = np.round(self.env.reset(), 3)
                size_input = len(obs)
                x = np.empty((0, size_input), int)
                x = np.append(x, obs.reshape((1, size_input)), axis=0)
                u_tot = np.empty((0, 0))
                o_tot = np.empty((0, 0), int)
                b_tot = np.empty((0, 0), int)

                # Initial Option
                prob_o = self.mu
                prob_o_rescaled = np.divide(prob_o, np.amin(prob_o) + 0.01)
                for i in range(1, prob_o_rescaled.shape[0]):
                    prob_o_rescaled[i] = prob_o_rescaled[i] + prob_o_rescaled[
                        i - 1]
                draw_o = np.divide(np.random.rand(), np.amin(prob_o) + 0.01)
                o = np.amin(np.where(draw_o < prob_o_rescaled))
                o_tot = np.append(o_tot, o)

                # Termination
                state = obs.reshape((1, size_input))
                prob_b = self.pi_b[o](state).numpy()
                prob_b_rescaled = np.divide(prob_b, np.amin(prob_b) + 0.01)
                for i in range(1, prob_b_rescaled.shape[1]):
                    prob_b_rescaled[
                        0,
                        i] = prob_b_rescaled[0, i] + prob_b_rescaled[0, i - 1]
                draw_b = np.divide(np.random.rand(), np.amin(prob_b) + 0.01)
                b = np.amin(np.where(draw_b < prob_b_rescaled)[1])
                b_tot = np.append(b_tot, b)
                if b == 1:
                    b_bool = True
                else:
                    b_bool = False

                o_prob_tilde = np.empty((1, self.option_space))
                if b_bool == True:
                    o_prob_tilde = self.pi_hi(state).numpy()
                else:
                    o_prob_tilde[
                        0, :] = self.zeta / self.option_space * np.ones(
                            (1, self.option_space))
                    o_prob_tilde[
                        0, o] = 1 - self.zeta + self.zeta / self.option_space

                prob_o = o_prob_tilde
                prob_o_rescaled = np.divide(prob_o, np.amin(prob_o) + 0.01)
                for i in range(1, prob_o_rescaled.shape[1]):
                    prob_o_rescaled[
                        0,
                        i] = prob_o_rescaled[0, i] + prob_o_rescaled[0, i - 1]
                draw_o = np.divide(np.random.rand(), np.amin(prob_o) + 0.01)
                o = np.amin(np.where(draw_o < prob_o_rescaled)[1])
                o_tot = np.append(o_tot, o)

                for k in range(0, max_epoch_per_traj):
                    state = obs.reshape((1, size_input))
                    # draw action
                    prob_u = self.pi_lo[o](state).numpy()
                    prob_u_rescaled = np.divide(prob_u, np.amin(prob_u) + 0.01)
                    for i in range(1, prob_u_rescaled.shape[1]):
                        prob_u_rescaled[0, i] = prob_u_rescaled[
                            0, i] + prob_u_rescaled[0, i - 1]
                    draw_u = np.divide(np.random.rand(),
                                       np.amin(prob_u) + 0.01)
                    u = np.amin(np.where(draw_u < prob_u_rescaled)[1])
                    u_tot = np.append(u_tot, u)

                    # given action, draw next state
                    action = u * 2
                    obs, reward, done, info = self.env.step(action)
                    obs = np.round(obs, 3)
                    x = np.append(x, obs.reshape((1, size_input)), axis=0)

                    if done == True:
                        u_tot = np.append(u_tot, 0.5)
                        break

                    # Select Termination
                    # Termination
                    state_plus1 = obs.reshape((1, size_input))
                    prob_b = self.pi_b[o](state_plus1).numpy()
                    prob_b_rescaled = np.divide(prob_b, np.amin(prob_b) + 0.01)
                    for i in range(1, prob_b_rescaled.shape[1]):
                        prob_b_rescaled[0, i] = prob_b_rescaled[
                            0, i] + prob_b_rescaled[0, i - 1]
                    draw_b = np.divide(np.random.rand(),
                                       np.amin(prob_b) + 0.01)
                    b = np.amin(np.where(draw_b < prob_b_rescaled)[1])
                    b_tot = np.append(b_tot, b)
                    if b == 1:
                        b_bool = True
                    else:
                        b_bool = False

                    o_prob_tilde = np.empty((1, self.option_space))
                    if b_bool == True:
                        o_prob_tilde = self.pi_hi(state_plus1).numpy()
                    else:
                        o_prob_tilde[
                            0, :] = self.zeta / self.option_space * np.ones(
                                (1, self.option_space))
                        o_prob_tilde[
                            0,
                            o] = 1 - self.zeta + self.zeta / self.option_space

                    prob_o = o_prob_tilde
                    prob_o_rescaled = np.divide(prob_o, np.amin(prob_o) + 0.01)
                    for i in range(1, prob_o_rescaled.shape[1]):
                        prob_o_rescaled[0, i] = prob_o_rescaled[
                            0, i] + prob_o_rescaled[0, i - 1]
                    draw_o = np.divide(np.random.rand(),
                                       np.amin(prob_o) + 0.01)
                    o = np.amin(np.where(draw_o < prob_o_rescaled)[1])
                    o_tot = np.append(o_tot, o)

                traj[t] = x
                control[t] = u_tot
                Option[t] = o_tot
                Termination[t] = b_tot
                flag = np.append(flag, done)

            return traj, control, Option, Termination, flag
コード例 #48
0
def embeddings_saver(embeddings, obs, sess):
  from tensorboard.plugins import projector

  # NUM_TO_VISUALISE = 1000

  np_embeddings = np.squeeze(np.array(embeddings))#[:NUM_TO_VISUALISE]
  np_obs = np.squeeze(np.array(obs, dtype=np.float32))
  
  print('embeddings.shape', np_embeddings.shape)
  print('obs.shape', np_obs.shape, np_obs.dtype, np.amin(np_obs), np.amax(np_obs))

  mult = np.array([0.25, 0.5, 0.75, 1.0])
  np_obs = np.multiply(np_obs, mult)
  print('obs.shape', np_obs.shape, np_obs.dtype, np.amin(np_obs), np.amax(np_obs))
  np_obs_flattened = np.amax(np_obs, axis=3)#[:NUM_TO_VISUALISE]
  print('np_obs_flattened.shape', np_obs_flattened.shape)

  # TENSORBOARD VISUALISATION

  # embedding_name = 'embedding'
  out_path = './embed_out/'
  image_name = 'sprite.png'

  # embedding_var = tf.Variable(np_embeddings, name=embedding_name)
  # sess.run(embedding_var.initializer)

  # config = projector.ProjectorConfig()
  # embedding = config.embeddings.add()
  # embedding.tensor_name = embedding_name
  # embedding.sprite.image_path = image_name
  # embedding.sprite.single_image_dim.extend([np_obs.shape[1], np_obs.shape[2]])

  # projector.visualize_embeddings(tf.summary.FileWriter(out_path), config)

  # saver = tf.train.Saver({embedding_name: embedding_var})
  # saver.save(sess, out_path+'model.ckpt')

  # # Save obs to sprite
  def create_sprite_image(images):
    """ Returns a sprite image consisting of images passed as argument.
        Images should be count x width x height
    """
    if isinstance(images, list):
      images = np.array(images)
    img_h = images.shape[1]
    img_w = images.shape[2]
    n_plots = int(np.ceil(np.sqrt(images.shape[0])))

    spriteimage = np.ones((img_h * n_plots, img_w * n_plots))

    for i in range(n_plots):
      for j in range(n_plots):
        this_filter = i * n_plots + j
        if this_filter < images.shape[0]:
          this_img = images[this_filter]
          this_img = this_img / (np.amax(this_img) - np.amin(this_img))
          plt.imsave(out_path+str(i * n_plots + j)+image_name, this_img, cmap='gray')
          spriteimage[i * img_h:(i + 1) * img_h,
                      j * img_w:(j + 1) * img_w] = this_img

    return spriteimage

  sprite = create_sprite_image(np_obs_flattened)
  # plt.imsave(out_path+image_name, sprite, cmap='gray')

  # SKLEARN PLOT
  def plot_with_labels(low_dim_embs, labels, filename):
    assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
    plt.figure(figsize=(18, 18))  # in inches
    plt.axis('off')
    for i, label in enumerate(labels):
      x, y = low_dim_embs[i, :]
      plt.scatter(x, y)
      plt.annotate(
          label,
          xy=(x, y),
          xytext=(5, 2),
          textcoords='offset points',
          ha='right',
          va='bottom')
    plt.savefig(filename)

  try:
    from sklearn.manifold import TSNE

    tsne = TSNE(
        perplexity=15, n_components=2, init='pca', n_iter=5000, method='exact')
    plot_only = 500
    low_dim_embs = tsne.fit_transform(np_embeddings[:plot_only, :])
    plot_with_labels(low_dim_embs, np.arange(plot_only), './fig_out/tsne.png')

  except ImportError as ex:
    print('Please install sklearn, matplotlib, and scipy to show embeddings.')
    print(ex)
コード例 #49
0
print('flux is: %5.2f' % (evaluate_gaussian(o)))
calculated.append(evaluate_gaussian(o))

z, e = fitting_gaussian(data6, x6)
print('flux is: %5.2f' % (evaluate_gaussian(e)))
calculated.append(evaluate_gaussian(e))

i, u = fitting_gaussian(data7, x7)
print('flux is: %5.2f' % (evaluate_gaussian(u)))
calculated.append(evaluate_gaussian(u))


def line(x, m, b):
    return m * x + b


popt, covar = curve_fit(line, calculated, correct_values)

x = np.linspace(np.amin(calculated), np.amax(calculated))
y = line(x, *popt)

plt.figure(figsize=(12, 8))
plt.xlabel('Calculated Flux')
plt.ylabel('Observed Flux')
plt.plot(calculated, correct_values, label='Values')
plt.plot(x, y, label='Best Fit')
plt.show()
#print(t)
#print()
#print(a)
コード例 #50
0
        def HILVideoSimulation(self, directory, max_epoch_per_traj):
            self.env._max_episode_steps = max_epoch_per_traj

            # Record the environment
            self.env = gym.wrappers.Monitor(self.env, directory, resume=True)

            for t in range(1):
                done = False
                obs = np.round(self.env.reset(), 3)
                size_input = len(obs)
                x = np.empty((0, size_input), int)
                x = np.append(x, obs.reshape((1, size_input)), axis=0)
                u_tot = np.empty((0, 0))
                o_tot = np.empty((0, 0), int)
                b_tot = np.empty((0, 0), int)

                while not done:  # Start with while True
                    self.env.render()
                    # Initial Option
                    prob_o = self.mu
                    prob_o_rescaled = np.divide(prob_o, np.amin(prob_o) + 0.01)
                    for i in range(1, prob_o_rescaled.shape[0]):
                        prob_o_rescaled[
                            i] = prob_o_rescaled[i] + prob_o_rescaled[i - 1]
                    draw_o = np.divide(np.random.rand(),
                                       np.amin(prob_o) + 0.01)
                    o = np.amin(np.where(draw_o < prob_o_rescaled))
                    o_tot = np.append(o_tot, o)

                    # Termination
                    state = obs.reshape((1, size_input))
                    prob_b = self.pi_b[o](state).numpy()
                    prob_b_rescaled = np.divide(prob_b, np.amin(prob_b) + 0.01)
                    for i in range(1, prob_b_rescaled.shape[1]):
                        prob_b_rescaled[0, i] = prob_b_rescaled[
                            0, i] + prob_b_rescaled[0, i - 1]
                    draw_b = np.divide(np.random.rand(),
                                       np.amin(prob_b) + 0.01)
                    b = np.amin(np.where(draw_b < prob_b_rescaled)[1])
                    b_tot = np.append(b_tot, b)
                    if b == 1:
                        b_bool = True
                    else:
                        b_bool = False

                    o_prob_tilde = np.empty((1, self.option_space))
                    if b_bool == True:
                        o_prob_tilde = self.pi_hi(state).numpy()
                    else:
                        o_prob_tilde[
                            0, :] = self.zeta / self.option_space * np.ones(
                                (1, self.option_space))
                        o_prob_tilde[
                            0,
                            o] = 1 - self.zeta + self.zeta / self.option_space

                    prob_o = o_prob_tilde
                    prob_o_rescaled = np.divide(prob_o, np.amin(prob_o) + 0.01)
                    for i in range(1, prob_o_rescaled.shape[1]):
                        prob_o_rescaled[0, i] = prob_o_rescaled[
                            0, i] + prob_o_rescaled[0, i - 1]
                    draw_o = np.divide(np.random.rand(),
                                       np.amin(prob_o) + 0.01)
                    o = np.amin(np.where(draw_o < prob_o_rescaled)[1])
                    o_tot = np.append(o_tot, o)

                    for k in range(0, max_epoch_per_traj):
                        state = obs.reshape((1, size_input))
                        # draw action
                        prob_u = self.pi_lo[o](state).numpy()
                        prob_u_rescaled = np.divide(prob_u,
                                                    np.amin(prob_u) + 0.01)
                        for i in range(1, prob_u_rescaled.shape[1]):
                            prob_u_rescaled[0, i] = prob_u_rescaled[
                                0, i] + prob_u_rescaled[0, i - 1]
                        draw_u = np.divide(np.random.rand(),
                                           np.amin(prob_u) + 0.01)
                        u = np.amin(np.where(draw_u < prob_u_rescaled)[1])
                        u_tot = np.append(u_tot, u)

                        # given action, draw next state
                        action = u * 2
                        obs, reward, done, info = self.env.step(action)
                        obs = np.round(obs, 3)
                        x = np.append(x, obs.reshape((1, size_input)), axis=0)

                        if done == True:
                            u_tot = np.append(u_tot, 0.5)
                            break

                        # Select Termination
                        # Termination
                        state_plus1 = obs.reshape((1, size_input))
                        prob_b = self.pi_b[o](state_plus1).numpy()
                        prob_b_rescaled = np.divide(prob_b,
                                                    np.amin(prob_b) + 0.01)
                        for i in range(1, prob_b_rescaled.shape[1]):
                            prob_b_rescaled[0, i] = prob_b_rescaled[
                                0, i] + prob_b_rescaled[0, i - 1]
                        draw_b = np.divide(np.random.rand(),
                                           np.amin(prob_b) + 0.01)
                        b = np.amin(np.where(draw_b < prob_b_rescaled)[1])
                        b_tot = np.append(b_tot, b)
                        if b == 1:
                            b_bool = True
                        else:
                            b_bool = False

                        o_prob_tilde = np.empty((1, self.option_space))
                        if b_bool == True:
                            o_prob_tilde = self.pi_hi(state_plus1).numpy()
                        else:
                            o_prob_tilde[
                                0, :] = self.zeta / self.option_space * np.ones(
                                    (1, self.option_space))
                            o_prob_tilde[
                                0,
                                o] = 1 - self.zeta + self.zeta / self.option_space

                        prob_o = o_prob_tilde
                        prob_o_rescaled = np.divide(prob_o,
                                                    np.amin(prob_o) + 0.01)
                        for i in range(1, prob_o_rescaled.shape[1]):
                            prob_o_rescaled[0, i] = prob_o_rescaled[
                                0, i] + prob_o_rescaled[0, i - 1]
                        draw_o = np.divide(np.random.rand(),
                                           np.amin(prob_o) + 0.01)
                        o = np.amin(np.where(draw_o < prob_o_rescaled)[1])
                        o_tot = np.append(o_tot, o)

            self.env.close()
            return x, u_tot, o_tot, b_tot
コード例 #51
0
            ncols, nrows = len(set(xcoord)), len(set(ycoord)) 
       
            grid_var = (mean_var.reshape((nrows, ncols), order='F'))
            grid_varU = (mean_varU.reshape((nrows, ncols), order='F'))
            grid_varV = (mean_varV.reshape((nrows, ncols), order='F'))
            grid_var_ref = (mean_var_ref.reshape((nrows, ncols), order='F'))

            grid_xcoord= (xcoord.reshape((nrows, ncols), order='F'))
            grid_ycoord= (ycoord.reshape((nrows, ncols), order='F'))


            if (exp == '1'):
              grid_var=(grid_var-8000.)/8000.
            elif (exp == '3'):
              grid_var=(grid_var-grid_var_ref)/8000.
              print(np.amin(grid_var),np.amax(grid_var))

            plotid='31'+str(plotnum)
            plt.subplot(plotid)
            plt.title(var, loc='left')
            plt.xlabel('lon', fontsize=18)
            plt.ylabel('lat', fontsize=18)
            #if (var == 'V'):
            plt.contour(grid_var, 20, linewidths=0.5, extent=(xcoord.min(), xcoord.max(), 
               ycoord.min(), ycoord.max()),
               interpolation='None', colors='black', vmin=np.amin((grid_var)), vmax=np.amax(grid_var))
            q= plt.quiver(grid_xcoord[::16, ::16],grid_ycoord[::16, ::16], grid_varU[::16, ::16],grid_varV[::16, ::16], pivot='mid', width=0.0015, scale=1/0.0009)
            if (exp == '1'):
              plt.quiverkey(q, 0.9, 1.05, 100, r'$100 \frac{m}{s}$', labelpos='E')
            elif (exp == '3'):
              plt.quiverkey(q, 0.9, 1.05, 40, r'$40 \frac{m}{s}$', labelpos='E')
コード例 #52
0
def animate_bodies(prefix):    
    
    filetype = 'single'
    xkey, ykey, ix,iy,xlabel,ylabel = select_variables(filetype)
    
    imass = singlecol['mass']
    itime = singlecol['t']
    
    
    filenames, alldata = read_all_bodyfiles(prefix)   
        
    nbodies = len(filenames)
    
    plt.ion()
    fig1 = plt.figure()
    ax1 = fig1.add_subplot(111)
                
    
    nsteps = len(alldata[0])
 
    xmax = -1.0e30
    xmin = -xmax
    
    ymin = 1.0e30
    ymax = -ymin
       
    for i in range(nbodies):
        body_xmax = np.amax(alldata[i][:,ix])
        body_ymax = np.amax(alldata[i][:,iy])
        
        body_xmin = np.amin(alldata[i][:,ix])
        body_ymin = np.amin(alldata[i][:,iy])
        
        if(body_xmax>xmax): xmax = body_xmax
        if(body_ymax>ymax): ymax = body_ymax

        if(body_xmin<xmin): xmin = body_xmin        
        if(body_ymin<ymin): ymin = body_ymin
    
    
    print ('Plot range: ')
    print (xlabel,' : ',xmin, xmax)
    print (ylabel,' : ',ymin, ymax)
    
                 
    iskip = np.zeros(nbodies)
    for j in range(nsteps):
       
        for i in range(nbodies):
            
            if(iskip[i]==1):
                continue
            
            try:
                t = alldata[i][j,itime]
            except IndexError:
                print ('No further data for ',filenames[i], ': skipping')
                iskip[i]=1
                continue
                                   
            if(alldata[i][j,imass]>0.0):
                
                ax1.scatter(alldata[i][j,ix], alldata[i][j,iy], s = np.sqrt(1.0e8*alldata[i][j,imass]))
            else:
                ax1.scatter(alldata[i][j,ix], alldata[i][j,iy], s = 5.0, color='k')
                
                        
        plt.savefig('animation_0'+str(j)+'.png', format='png')
        ax1.text(0.9, 0.9,'t={:.3E} yr'.format(t), bbox=dict(edgecolor='black',facecolor='none'), horizontalalignment='center', verticalalignment='center',transform = ax1.transAxes)
        
        ax1.set_xlabel(xlabel, fontsize = 22)
        ax1.set_ylabel(ylabel, fontsize = 22)
        ax1.set_ylim(ymin,ymax)
        ax1.set_xlim(xmin,xmax)
        #ax1.set_xscale('log')
        #ax1.set_yscale('log')
        plt.draw()
    
        ax1.clear()
コード例 #53
0
ファイル: morestats.py プロジェクト: melshaer/MiniBloq-Sparki
def probplot(x, sparams=(), dist='norm', fit=True, plot=None):
    """
    Calculate quantiles for a probability plot of sample data against a
    specified theoretical distribution.

    `probplot` optionally calculates a best-fit line for the data and plots the
    results using Matplotlib or a given plot function.

    Parameters
    ----------
    x : array_like
        Sample/response data from which `probplot` creates the plot.
    sparams : tuple, optional
        Distribution-specific shape parameters (location(s) and scale(s)).
    dist : str, optional
        Distribution function name. The default is 'norm' for a normal
        probability plot.
    fit : bool, optional
        Fit a least-squares regression (best-fit) line to the sample data if
        True (default).
    plot : object, optional
        If given, plots the quantiles and least squares fit.
        `plot` is an object with methods "plot", "title", "xlabel", "ylabel"
        and "text". The matplotlib.pyplot module or a Matplotlib axes object
        can be used, or a custom object with the same methods.
        By default, no plot is created.

    Returns
    -------
    (osm, osr) : tuple of ndarrays
        Tuple of theoretical quantiles (osm, or order statistic medians) and
        ordered responses (osr).
    (slope, intercept, r) : tuple of floats, optional
        Tuple  containing the result of the least-squares fit, if that is
        performed by `probplot`. `r` is the square root of the coefficient of
        determination.  If ``fit=False`` and ``plot=None``, this tuple is not
        returned.

    Notes
    -----
    Even if `plot` is given, the figure is not shown or saved by `probplot`;
    ``plot.show()`` or ``plot.savefig('figname.png')`` should be used after
    calling `probplot`.

    Examples
    --------
    >>> import scipy.stats as stats
    >>> nsample = 100
    >>> np.random.seed(7654321)

    A t distribution with small degrees of freedom:

    >>> ax1 = plt.subplot(221)
    >>> x = stats.t.rvs(3, size=nsample)
    >>> res = stats.probplot(x, plot=plt)

    A t distribution with larger degrees of freedom:

    >>> ax2 = plt.subplot(222)
    >>> x = stats.t.rvs(25, size=nsample)
    >>> res = stats.probplot(x, plot=plt)

    A mixture of 2 normal distributions with broadcasting:

    >>> ax3 = plt.subplot(223)
    >>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
    ...                    size=(nsample/2.,2)).ravel()
    >>> res = stats.probplot(x, plot=plt)

    A standard normal distribution:

    >>> ax4 = plt.subplot(224)
    >>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
    >>> res = stats.probplot(x, plot=plt)

    """
    N = len(x)
    Ui = zeros(N) * 1.0
    Ui[-1] = 0.5**(1.0 / N)
    Ui[0] = 1 - Ui[-1]
    i = arange(2, N)
    Ui[1:-1] = (i - 0.3175) / (N + 0.365)
    try:
        ppf_func = eval('distributions.%s.ppf' % dist)
    except AttributeError:
        raise ValueError("%s is not a valid distribution with a ppf." % dist)
    if sparams is None:
        sparams = ()
    if isscalar(sparams):
        sparams = (sparams, )
    if not isinstance(sparams, tuple):
        sparams = tuple(sparams)
    """
    res = inspect.getargspec(ppf_func)
    if not ('loc' == res[0][-2] and 'scale' == res[0][-1] and \
            0.0==res[-1][-2] and 1.0==res[-1][-1]):
        raise ValueError("Function has does not have default location "
              "and scale parameters\n  that are 0.0 and 1.0 respectively.")
    if (len(sparams) < len(res[0])-len(res[-1])-1) or \
       (len(sparams) > len(res[0])-3):
        raise ValueError("Incorrect number of shape parameters.")
    """
    osm = ppf_func(Ui, *sparams)
    osr = sort(x)
    if fit or (plot is not None):
        # perform a linear fit.
        slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
    if plot is not None:
        plot.plot(osm, osr, 'o', osm, slope * osm + intercept)
        plot.title('Probability Plot')
        plot.xlabel('Quantiles')
        plot.ylabel('Ordered Values')

        xmin = amin(osm)
        xmax = amax(osm)
        ymin = amin(x)
        ymax = amax(x)
        posx = xmin + 0.70 * (xmax - xmin)
        posy = ymin + 0.01 * (ymax - ymin)
        plot.text(posx, posy, "r^2=%1.4f" % r)
    if fit:
        return (osm, osr), (slope, intercept, r)
    else:
        return osm, osr
コード例 #54
0
# visualize the fit against the data
X_test = np.linspace(data.X.min(), data.X.max(), 100)
X_feat_new = np.expand_dims(
    X_test, axis=1
)  # we need this otherwise, the dimension is missing (turns shape(value,) to shape(value,value))
X_feat = []
for i in range(1, degree + 1):
    X_feat.append(np.power(X_feat_new, i))
X_feat = np.concatenate((X_feat), axis=1)

# apply feature map to input features x1
with tf.Session() as sess:
    plt.figure(1)
    plt.plot(X_test, sess.run(regress(X_feat, theta)), label="Model")
    plt.scatter(X[:, 0], y, edgecolor='g', s=20, label="Samples")
    plt.xlabel("x")
    plt.ylabel("y")
    plt.xlim((np.amin(X_test) - kludge, np.amax(X_test) + kludge))
    plt.ylim((np.amin(y) - kludge, np.amax(y) + kludge))
    plt.legend(loc="best")
    plt.savefig(os.getcwd() + '/Pb_2_15_Test_Fig.jpg')

    plt.figure(2)
    plt.plot(cost)
    plt.title('Loss vs Epoch')
    plt.xlabel('Number of epochs')
    plt.ylabel('Cost')
    plt.savefig(os.getcwd() + '/Pb_2_15_Loss_Fig.jpg')

plt.show()
コード例 #55
0
    def brute_force_search(self,parameters):
        '''
        This function determines the optimal solutions by brute force.
        '''

        results={}


        # Retrieve the values of the needed parameters
        lam = parameters['lam']
        D= parameters['D']
        mu = parameters['mu']
        sigma=parameters['sigma']
        T=parameters['T']
        y=parameters['y']

        best_solutions = None

        # Search the space of feasible solutions
        # The values that can be taken by Zi
        index = [-1,0, 1]

        keys = list(itertools.product(index, repeat=self.N_portfolio))

        # filter out non-feasable solutions
        feasible = []

        for key in keys:
            z = np.array(key)
            sum = np.sum(z)

            if(sum==D):
                feasible.append(z)

        feasible = np.array(feasible)
        state_costs = np.zeros(len(feasible))


        # Find the best solutions and also the worst solutions
        for k in range(len(feasible)):
            state = feasible[k]
            portfolio_cost = self.compute_portfolio_cost(lam, mu, sigma, state)
            transaction_cost = self.compute_transaction_cost(T,y,state)
            state_costs[k] = portfolio_cost+transaction_cost

        max_cost_indx = np.argwhere(state_costs == np.amax(state_costs)).flatten()
        min_cost_indx = np.argwhere(state_costs == np.amin(state_costs)).flatten()

        results['lambda'] = lam
        results['y'] = y
        results['mu'] = mu
        results['sigma'] = sigma
        results['D'] = D

        results['maximum_cost_states'] = feasible[max_cost_indx]
        results['maximum_cost'] = max(state_costs)
        results['volatility_of_maximum_cost_state'] = self.compute_portfolio_volatility(sigma, feasible[max_cost_indx][0])
        results['returns_of_maximum_cost_state'] = self.compute_portfolio_returns(mu, feasible[max_cost_indx][0])


        results['minimum_cost_states'] = feasible[min_cost_indx]
        results['minimum_cost'] = min(state_costs)
        results['volatility_of_minimum_cost_state'] = self.compute_portfolio_volatility(sigma, feasible[min_cost_indx][0])
        results['returns_of_minimum_cost_state'] = self.compute_portfolio_returns(mu, feasible[min_cost_indx][0])

        return results
コード例 #56
0
ファイル: morestats.py プロジェクト: melshaer/MiniBloq-Sparki
def ansari(x, y):
    """
    Perform the Ansari-Bradley test for equal scale parameters

    The Ansari-Bradley test is a non-parametric test for the equality
    of the scale parameter of the distributions from which two
    samples were drawn.

    Parameters
    ----------
    x, y : array_like
        arrays of sample data

    Returns
    -------
    AB : float
        The Ansari-Bradley test statistic
    p-value : float
        The p-value of the hypothesis test

    See Also
    --------
    fligner : A non-parametric test for the equality of k variances
    mood : A non-parametric test for the equality of two scale parameters

    Notes
    -----
    The p-value given is exact when the sample sizes are both less than
    55 and there are no ties, otherwise a normal approximation for the
    p-value is used.

    References
    ----------
    .. [1] Sprent, Peter and N.C. Smeeton.  Applied nonparametric statistical
           methods.  3rd ed. Chapman and Hall/CRC. 2001.  Section 5.8.2.

    """
    x, y = asarray(x), asarray(y)
    n = len(x)
    m = len(y)
    if m < 1:
        raise ValueError("Not enough other observations.")
    if n < 1:
        raise ValueError("Not enough test observations.")
    N = m + n
    xy = r_[x, y]  # combine
    rank = stats.rankdata(xy)
    symrank = amin(array((rank, N - rank + 1)), 0)
    AB = sum(symrank[:n], axis=0)
    uxy = unique(xy)
    repeats = (len(uxy) != len(xy))
    exact = ((m < 55) and (n < 55) and not repeats)
    if repeats and ((m < 55) or (n < 55)):
        warnings.warn("Ties preclude use of exact statistic.")
    if exact:
        astart, a1, ifault = statlib.gscale(n, m)
        ind = AB - astart
        total = sum(a1, axis=0)
        if ind < len(a1) / 2.0:
            cind = int(ceil(ind))
            if (ind == cind):
                pval = 2.0 * sum(a1[:cind + 1], axis=0) / total
            else:
                pval = 2.0 * sum(a1[:cind], axis=0) / total
        else:
            find = int(floor(ind))
            if (ind == floor(ind)):
                pval = 2.0 * sum(a1[find:], axis=0) / total
            else:
                pval = 2.0 * sum(a1[find + 1:], axis=0) / total
        return AB, min(1.0, pval)

    # otherwise compute normal approximation
    if N % 2:  # N odd
        mnAB = n * (N + 1.0)**2 / 4.0 / N
        varAB = n * m * (N + 1.0) * (3 + N**2) / (48.0 * N**2)
    else:
        mnAB = n * (N + 2.0) / 4.0
        varAB = m * n * (N + 2) * (N - 2.0) / 48 / (N - 1.0)
    if repeats:  # adjust variance estimates
        # compute sum(tj * rj**2,axis=0)
        fac = sum(symrank**2, axis=0)
        if N % 2:  # N odd
            varAB = m * n * (16 * N * fac - (N + 1)**4) / (16.0 * N**2 *
                                                           (N - 1))
        else:  # N even
            varAB = m * n * (16 * fac - N * (N + 2)**2) / (16.0 * N * (N - 1))
    z = (AB - mnAB) / sqrt(varAB)
    pval = distributions.norm.sf(abs(z)) * 2.0
    return AB, pval
コード例 #57
0
ファイル: rbfopt_utils.py プロジェクト: mishpat/rbfopt
def bulk_evaluate_rbf(settings, points, n, k, node_pos, rbf_lambda, rbf_h,
                      return_distances = 'no'):
    """Evaluate the RBF interpolant at all points in a given list.

    Evaluate the RBF interpolant at all points in a given list. This
    version uses numpy and should be faster than individually
    evaluating the RBF at each single point, provided that the list of
    points is large enough. It also computes the distance or the
    minimum distance of each point from the interpolation nodes, if
    requested, since this comes almost for free.

    Parameters
    ----------
    settings : :class:`rbfopt_settings.RbfSettings`.
        Global and algorithmic settings.

    points : 2D numpy.ndarray[float]
        The list of points in R^n where we want to evaluate the
        interpolant.

    n : int
        Dimension of the problem, i.e. the size of the space.

    k : int
        Number of interpolation nodes.

    node_pos : 2D numpy.ndarray[float]
        List of coordinates of the interpolation points.

    rbf_lambda : 1D numpy.ndarray[float]
        The lambda coefficients of the RBF interpolant, corresponding
        to the radial basis functions. List of dimension k.

    rbf_h : 1D numpy.ndarray[float]
        The h coefficients of the RBF interpolant, corresponding to he
        polynomial. List of dimension given by get_size_P_matrix().

    return_distances : string
        If 'no', do nothing. If 'min', return the minimum distance of
        each point to interpolation nodes. If 'all', return the full
        distance matrix to the interpolation nodes.

    Returns
    -------
    1D numpy.ndarray[float] or (1D numpy.ndarray[float], 1D numpy.ndarray[float])
        Value of the RBF interpolant at each point; if
        compute_min_dist is True, additionally returns the minimum
        distance of each point from the interpolation nodes.

    """
    assert(isinstance(points, np.ndarray))
    assert(isinstance(node_pos, np.ndarray))
    assert(isinstance(rbf_lambda, np.ndarray))
    assert(isinstance(rbf_h, np.ndarray))
    assert(points.size)
    assert(len(rbf_lambda)==k)
    assert(len(node_pos)==k)
    assert(isinstance(settings, RbfSettings))
    p = get_size_P_matrix(settings, n)
    assert(len(rbf_h)==p)

    rbf_function = get_rbf_function(settings)
    # Formula:
    # \sum_{i=1}^k \lambda_i \phi(\|x - x_i\|) + h^T (x 1)

    # Create distance matrix
    dist_mat = ss.distance.cdist(points, node_pos)
    # Evaluate radial basis function on each distance
    part1 = np.dot(np.vectorize(rbf_function)(dist_mat), rbf_lambda)
    if (get_degree_polynomial(settings) == 1):
        part2 = np.dot(points, rbf_h[:-1])
    else:
        part2 = np.zeros(len(points))
    part3 = rbf_h[-1] if (p > 0) else 0.0
    if (return_distances == 'min'):
        return ((part1 + part2 + part3), (np.amin(dist_mat, 1)))
    elif (return_distances == 'all'):
        return ((part1 + part2 + part3), dist_mat)
    else:
        return (part1 + part2 + part3)
コード例 #58
0
def Autoencoder(x_train, y_train, x_test, y_test):
    input_shape = (x_train.shape[1],)
    input2 = Input(input_shape)

    encoded = Dense(80, activation='relu',
                    kernel_initializer='glorot_uniform',
                    name='encod0')(input2)
    encoded = Dense(30, activation='relu',
                    kernel_initializer='glorot_uniform',
                    name='encod1')(encoded)
    encoded = Dense(10, activation='relu',
                    kernel_initializer='glorot_uniform',
                    name='encod2')(encoded)

    encoded= Dropout({{uniform(0, 1)}})(encoded)
    decoded = Dense(30, activation='relu',
                    kernel_initializer='glorot_uniform',
                    name='decoder1')(encoded)
    decoded = Dense(80, activation='relu',
                    kernel_initializer='glorot_uniform',
                    name='decoder2')(decoded)
    decoded = Dense(x_train.shape[1], activation='linear',
                    kernel_initializer='glorot_uniform',
                    name='decoder3')(decoded)


    model = Model(inputs=input2, outputs=decoded)
    model.summary()

    adam=Adam(lr={{uniform(0.0001, 0.01)}})
    model.compile(loss='mse', metrics=['acc'],
                  optimizer=adam)
    callbacks_list = [
        callbacks.EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=10,
                                restore_best_weights=True),
    ]
    XTraining, XValidation, YTraining, YValidation = train_test_split(x_train, x_train, stratify=y_train,
                                                                      test_size=0.2)  # before model building

    tic = time.time()
    history= model.fit(XTraining, YTraining,
                      batch_size={{choice([32,64, 128,256,512])}},
                      epochs=150,
                      verbose=2,
                      callbacks=callbacks_list,
                      validation_data=(XValidation,YValidation))

    toc = time.time()


    score = np.amin(history.history['val_loss'])
    print('Best validation loss of epoch:', score)


    scores = [history.history['val_loss'][epoch] for epoch in range(len(history.history['loss']))]
    score = min(scores)
    print('Score',score)


    print('Best score',global_config.best_score)




    if global_config.best_score > score:
        global_config.best_score = score
        global_config.best_model = model
        global_config.best_numparameters = model.count_params()
        global_config.best_time = toc - tic



    return {'loss': score, 'status': STATUS_OK, 'n_epochs': len(history.history['loss']), 'n_params': model.count_params(), 'model': global_config.best_model, 'time':toc - tic}
コード例 #59
0
def aicarx(na_max, nb_max, nk_max, u, y, criterion='aicn'):
    """
    author: @lima84

    Estimates ARX model based on Akaike's Information Criterion (AIC) given
    the upper limits for the polynomial orders (na_max, nb_max, nk_max) and
    a pair of input-output data vectors (u, y). Returns the lowest AIC cost
    and the best fitting A(q) and B(q) polynomials for the ARX model: 
        A(q)y(t) = B(q)u(t) + e(t),

    Parameters
    ----------
    na_max : int
        maximum value for the na parameter -- na = [1, 2, ..., na_max]
    nb_max : int
        maximum value for the na parameter -- nb = [0, 1, ..., nb_max]
    nk_max : int
        maximum value for the na parameter -- nk = [0, 1, ..., nk_max]
    u : ndarray
        input data array
    y : ndarray
        output data array
    criterion: string (optional)
        critrion to be evaluated.
    Returns
    -------
    A : ndarray
        Array containing the A(q) polynomial
    B : ndarray
        Array containing the B(q) polynomial
    J_aic : int
        AIC cost function value using A(q) and B(q)
    """
    # Check input arguments
    _, _, _, _, _, _, u, y = chckin(na_max, nb_max, 0, 0, 0, nk_max, u, y)

    # Number of samples and outputs
    N, ny = y.shape

    A_aic = empty((na_max, nb_max + 1, nk_max + 1), dtype='object')
    B_aic = empty((na_max, nb_max + 1, nk_max + 1), dtype='object')
    J_aic = empty((na_max, nb_max + 1, nk_max + 1), dtype='object')

    criteria = {'aic': aiccrit, 'aicn': aicncrit, 'aicc': aicccrit}
    crit = criteria.get(criterion)

    for na in range(1,na_max+1):
        for nb in range(0,nb_max+1):
            for nk in range(0,nk_max+1):
                # Computes ARX polynomials for current (na, nb, nk)
                A, B = arx(na, nb, nk, u, y)

                # Array-list magic for lfilter 
                A = A.tolist()[0][0]
                B = B.tolist()[0][0]

                # Computes e(t) = A(na,nb,nk,q)y(t) - B(na,nb,nk,q)u(t)
                e = lfilter(A, [1], y, axis=0) - lfilter(B, [1], u, axis=0)

                # Number of parameters
                p = na + nb + 1

                # Computes the cost function
                J = (1/N) * dot(e.T, e)[0][0]

                # Add current polynomials to their respective matrix
                A_aic[na - 1, nb, nk] = A
                B_aic[na - 1, nb, nk] = B

                # Computes AIC cost function
                J_aic[na - 1, nb, nk] = crit(J, N, p)

    # Finds the lowest cost estimate indices
    min_index = where(J_aic == amin(J_aic))

    A, B, J_aic = A_aic[min_index], B_aic[min_index], J_aic[min_index]
    return [A, B, J_aic]
コード例 #60
0
def viterbi(Observation, Emission, Transition, Initial):
    """
    calculates the most likely sequence of hidden states for a hidden
    markov model:

    - Observation: numpy.ndarray of shape (T,) that contains the index
    of the observation
        - T: number of observations
    - Emission: numpy.ndarray of shape (N, M) containing the emission
    probability of a specific observation given a hidden state
        - Emission[i, j]: probability of observing j given the hidden state i
        - N: number of hidden states
        - M: number of all possible observations
    - Transition: 2D numpy.ndarray of shape (N, N) containing the transition
    probabilities
        - Transition[i, j]: probability of transitioning from the hidden
        state i to j
    - Initial: numpy.ndarray of shape (N, 1) containing the probability of
    starting in a particular hidden state
    Returns: path, P, or None, None on failure
        - path: list of length T containing the most likely sequence of
        hidden states
        - P: probability of obtaining the path sequence
    """
    if not isinstance(Observation, np.ndarray) or len(Observation.shape) != 1:
        return None, None
    if not isinstance(Emission, np.ndarray) or len(Emission.shape) != 2:
        return None, None
    if not isinstance(Transition, np.ndarray) or len(Transition.shape) != 2:
        return None, None
    if not isinstance(Initial, np.ndarray) or len(Initial.shape) != 2:
        return None, None
    if (np.sum(Emission, axis=1) != 1).any():
        return None, None
    if (np.sum(Transition, axis=1) != 1).any():
        return None, None
    if (np.sum(Initial, axis=0) != 1).any():
        return None, None

    N, M = Emission.shape
    T = Observation.shape[0]
    if N != Transition.shape[0] or N != Transition.shape[1]:
        return None, None

    omega = np.zeros((N, T))
    aux = (Initial * Emission[:, Observation[0]].reshape(-1, 1))
    omega[:, 0] = aux.reshape(-1)

    backpointer = np.zeros((N, T))
    backpointer[:, 0] = 0
    for col in range(1, T):
        for row in range(N):
            prev = omega[:, col - 1]
            trans = Transition[:, row]
            em = Emission[row, Observation[col]]
            result = prev * trans * em
            omega[row, col] = np.amax(result)
            backpointer[row, col - 1] = np.argmax(result)

    path = []
    # Find the most probable last hidden state
    last_state = np.argmax(omega[:, T - 1])
    path.append(int(last_state))

    # backtracking algorithm gotten from first read
    for i in range(T - 2, -1, -1):
        path.append(int(backpointer[int(last_state), i]))
        last_state = backpointer[int(last_state), i]

    # Flip the path array since we were backtracking
    path.reverse()

    min_prob = np.amax(omega, axis=0)
    min_prob = np.amin(min_prob)

    return path, min_prob