def AddKernel(self, kname, feature): if kname == 'linear': self.kernels.append(Kernels.LinearKernel()) elif kname == 'gaussian': self.kernels.append(Kernels.GaussianKernel(feature.params[0])) elif kname == 'intersection': self.kernels.append(Kernels.IntersectionKernel()) elif kname == 'chi2': self.kernels.append(Kernels.Chi2Kernel())
def Reset(self): self.initialized = False self.box = None self.features = [] self.kernels = [] self.needIntegImg = False self.needIntegHist = False self.learner = None # keep a list of feature counts featureCounts = [] # check for number of features in the config file # should only run for 1 iteration for our experiemnt (Haar only) for feat in self.config.features: self.AddFeature(feat.featureName) self.AddKernel(feat.kernelName, feat) featureCounts.append(self.features[-1].GetCount()) # use combined feature/kernel when there are multiple if (len(self.config.features) > 1): self.features.append(MultiFeatures(self.features)) self.kernels.append( Kernels.MultiKernel(self.kernels, featureCounts)) self.learner = LaRank(self.config, self.features[-1], self.kernels[-1])
def Kullback(Dist, frame): """ Jones Arguments: Dist: np.array() The Distribution to be analysed for a single frame. frame: (int) The Frame number under consideration. Returns: KL: The Kullback Liebler divergence: This is also known as the mutual information between two distributions. It may loosely (and dangerously) interpreted as the similarity between two distributions. I care about plotting this as I suspect strong delineations in the growth of mutual entropy as the system undergoes a phase transition. A fun wikiquoutes quote because I was bored and felt like learning while coding... """ KL = (Kernels.KB_Dist(Dist[0], Dist[frame])) #if frame+1 == len(Dist): #print(wikiquote.quotes(wikiquote.random_titles(max_titles=1)[0])) return KL
def trainModel(self, x_data, penalty=1, kernel=Kernels.defaultKernel(), eta=1, report=False): y = x_data[:, 0] # add a column of ones for the intercept X = np.hstack((np.ones((len(x_data), 1)), x_data[:, 1:])) weights = self._trainSVM(X, y, penalty, eta, report, kernel) return self.packModel(weights, penalty, kernel)
def trainModel(self, x_data, kernel=Kernels.defaultKernel(), report=False): # +1 for intercept self.weights = [0] * (x_data.shape[1] + 1) y = x_data[:, 0] # add a column of ones for the intercept X = np.hstack((np.ones((len(x_data), 1)), x_data[:, 1:])) mistakes = self._trainSVM(x_data[:, 1:], y, report, kernel) # Returns length 785 print mistakes return mistakes
def __init__( self, minPt, size, cell_size, kernel_size ): '''Constructor @param minPt A 2-tuple-like object of floats: the minimum x- and y- position of the grid. @param size A 2-tuple-like object of floats: the dimsions of the grid (width, height). @param cell_size A float; the length of the side of the square grid cells. @param kernel_size A float; the "size" of the kernel. ''' QTGridContext.__init__( self, minPt, size, cell_size ) self.show_kernel = True # TODO: Change this to support different types of kernels (when the GUI supports # different types of kernels). # Setting the cell size to kernel size / 3 guarantees three samples per sigma self.kernel = Kernels.GaussianKernel( kernel_size, kernel_size / 3) self.kernel_data = self.kernel.computeSamples()
def _crossValidate(test_size, trainer, folds, train_data, dim): start = 0 error = 0 kernel = Kernels.polyKernel(dim) start_time = time() for i in range(folds): split_test = train_data[range(start, start + test_size), 1:] split_label = train_data[range(start, start + test_size), 0] split_train = np.delete(train_data, range(start, start + test_size), axis=0) print "Test Size ", split_test.shape trainer.processData(split_train) model_file = "./Model/uci_perceptron_model_poly_" + str(dim) trainer.trainModel(kernel, True, False, model_file) error += _testModel(split_test, split_label, model_file, kernel) start += test_size end_time = time() return error, (end_time - start_time) / 60.0, dim
def splatAgents(self, gridDomain, radius, pedData, overwrite=True): '''Splats the agents onto a grid based on position and the given radius @param gridDomain An instance of AbstractGrid, specifying the grid domain and resolution over which the density field is calculated. @param radius The size (in world units) of the agent's visualization radius. @param pedData The pedestrian data to splat (the product of a call to trajectory.loadTrajectory). @param overwrite A boolean. Indicates whether files should be created even if they already exist or computed from scratch. If True, they are always created, if False, pre-existing files are used. @returns A string. The name of the output file. ''' kernel = Kernels.UniformCircleKernel(radius, gridDomain.cellSize[0], False) # False on reflect signal = Signals.PedestrianSignal(gridDomain.rectDomain) pedData.setNext(0) argsFunc = lambda: (signal.copyEmpty(), pedData, gridDomain, kernel) return self._threadWork('splat', threadConvolve, argsFunc, gridDomain, overwrite)
def execute(self): '''Perform the work of the task''' if (self.work): print 'Density analysis: %s' % (self.workName) print "\tAccessing scb file:", self.scbName frameSet = NPFrameSet(self.scbName) workPath = self.getWorkPath('density') tempFile = os.path.join(workPath, self.workName) grids = Crowd.GridFileSequence(tempFile) if (self.work & AnalysisTask.COMPUTE): print "\tComputing" kernel = Kernels.GaussianKernel(self.smoothParam, self.cellSize, False) domain = makeDomain(self.domainX, self.domainY, self.cellSize) sigDomain = makeDomain(self.domainX, self.domainY) signal = Signals.PedestrianSignal( sigDomain ) # signal domain is the same as convolution domain s = time.clock() grids.convolveSignal(domain, kernel, signal, frameSet) print '\t\tdone in %.2f seconds' % (time.clock() - s) if (self.work & AnalysisTask.VIS): dataFile = grids.outFileName + ".density" if (not os.path.exists(dataFile)): print "\tCan't visualize density - unable to locate file: %s" % dataFile return imageName = os.path.join(workPath, '%s_density_' % self.workName) s = time.clock() reader = Crowd.GridFileSequenceReader(dataFile) try: colorMap = COLOR_MAPS[self.colorMapName] except: print '\tError loading color map: "%s", loading flame instead' % ( self.colorMapName) colorMap = COLOR_MAPS['flame'] print '\tCreating images' visualizeGFS(reader, colorMap, imageName, self.outImgType, 1.0, None) print '\t\tdone in %.2f seconds' % (time.clock() - s)
def getPrediction(self, x_set, y_set): ''' Pass in the datasets and output the prediction set and rmse ''' if self.method == 'basisfunc': method = BasisFunctions(self.x_train, self.model, self.lamb, self.M, self.degree) method.getWeight(self.x_train, self.y_train) self.w, self.phiMatrix = method.w, method.phiMatrix y_predicted = method.getPhiMatrix(x_set).dot(method.w) elif self.method == 'kernel': method = Kernels(self.M, self.model, self.lamb, self.theta, self.degree) method.getAlpha(self.x_train, self.y_train) self.alpha, self.K = method.alpha, method.K y_predicted = method.getGram(self.x_train, x_set).T.dot(method.alpha) rmse = np.sqrt(pow(np.array(y_predicted - y_set), 2).mean()) return y_predicted, rmse
def main(): path = Path.cwd() path = os.path.join(path, 'test.png') new_im = Image.open(path).convert('L') dimker = 40 kernel = Kernels.k2( dimker ) #si può passare(dimensione, anisotropia) / (dim, std, anis) nel caso di k2 arry = signal.fftconvolve(new_im, kernel, mode='same') fig, (normal, ker, convol) = plt.subplots(3, 1) normal.imshow(new_im, cmap='gray') normal.set_title('Original') normal.set_axis_off() ker.imshow(kernel, cmap='gray') ker.set_title('Kernel') ker.set_axis_off() convol.imshow(arry, cmap='gray') convol.set_title('Convoluted') convol.set_axis_off() fig.show() return arry
def JSD(Dist, frame): """ Jones Arguments: Dist: np.array() The Distribution to be analysed for a single frame. frame: (int) The Frame number under consideration. Returns: J: Jenson-Shannon Distance which is a symmetric form the the KL distance above. I do not yet understand fully why this should be a superior function to KL but it's another telling discriptor. A fun wikiquoutes quote because I was bored and felt like learning while coding... """ J = (Kernels.JSD(Dist[0], Dist[frame])) #if frame+1 == len(Dist): #print(wikiquote.quotes(wikiquote.random_titles(max_titles=1)[0])) return J
def SetKernel(self, kernel='Gaussian'): if kernel == 'Gaussian': self.kernel = Kernels.Gaussian(self, self.D)
Nend = 1 names = ['Pinhole2'] # Reconstruction for those names iterations = 500 bFORCE_REAL = False # Set to true if the rspace is real. IM_HALFSIZE = 480 # half the image size to use. Our data is limited to 960x960 (480). PerfLevel = 3 # The error will be registered every X iterations. Higher number means better performance. bUsePinholeMask = False # True = Load the pinhole mask. False = Use a msize X msize mask. bUseCircularMask = True msize = 98 # size of the mask to be used. Radius = 49 Run = 0 beta = 0.9 Kernels.LoadKernel("Scripts/HIOKernels.cu", np.int32(4 * IM_HALFSIZE * IM_HALFSIZE)) ApplyDifPad = Kernels.GetFunction("ApplyDifPad") HIOStep = Kernels.GetFunction("HIOStep") Copy = Kernels.GetFunction("Copy") Error = Kernels.GetFunction("Error1DifCF") def SaveImage(fname, Image): np.save(fname + '_hio', Image) absol = np.absolute(Image).astype(np.float32) maxv = absol.max() Normalized = (255 * np.sqrt((1.0 / maxv) * absol)).astype(np.uint8) cv2.imwrite(fname + '_hio.png', Normalized)
PATH = 'ktest' if ( not os.path.exists( PATH ) ): os.makedirs( PATH ) cMap = BlackBodyMap() CELL_SIZE = 0.03125 #0.05 smoothParam = 1.5 REFLECT = True obst, bb = obstacles.readObstacles( '/projects/crowd/fund_diag/paper/pre_density/experiment/Inputs/Corridor_onewayDB/c240_obstacles.xml') obstSet = ObstacleHandler.ObjectHandler( obst ) ##kernel = Kernels.UniformKernel( smoothParam, CELL_SIZE, REFLECT ) ##kernel = Kernels.TriangleKernel( smoothParam / 1.1, CELL_SIZE, REFLECT ) ##kernel = Kernels.BiweightKernel( smoothParam / 1.2, CELL_SIZE, REFLECT ) kernel = Kernels.GaussianKernel( smoothParam / 3.0, CELL_SIZE, REFLECT ) ##kernel = Kernels.Plaue11Kernel( smoothParam, CELL_SIZE, REFLECT, obstSet ) def syntheticPedestrians( SIZE ): '''Produce a set of fake pedestrian trajectories. @param SIZE The size of the origin-centered simulation domain. @returns An Nx2xM numpy array with N pedestrians over M frames and The number of steps in the trajectory. ''' STEP_COUNT = 15 traj = np.empty( (3, 2, STEP_COUNT ), dtype=np.float32 ) # ped 1 - moves bottom left to top center start = -SIZE * 0.5 + 0.1 end = 0 x = np.linspace( start, end, STEP_COUNT )
acc = (0.0 + sum(Y == pred1)) / len(Y) print 'acc=', acc print '--------------\n' #np.random.seed(0) #n=6 #X = np.random.randn(n, 2) #Y = np.random.randint(1,4,n) #X = np.array([ (1,2), (3,4), (5,6), (7,8), (9,0)]) #Y = np.array([4,1,2,1,4]) svm_solver = slv.FOSVM(X, Y, C) #kernel = Linear() kernel = ker.RBF() t0 = time.clock() svm_solver.init(kernel) t1 = time.clock() print '\nInit takes', t1 - t0 t0 = time.clock() svm_solver.train() t1 = time.clock() print '\nTakes: ', t1 - t0 for k in xrange(len(svm_solver.models)):
def main(): """Test the functionality""" from math import pi, exp import os, sys import optparse parser = optparse.OptionParser() # analysis to perform parser.add_option( "-d", "--density", help="Evaluate density.", action="store_true", dest='density', default=False ) parser.add_option( "-s", "--speed", help="Evaluate speed.", action="store_true", dest='speed', default=False ) parser.add_option( "-o", "--omega", help="Evaluate omega.", action="store_true", dest='omega', default=False ) parser.add_option( "-p", "--progress", help="Evaluate progress.", action="store_true", dest='progress', default=False ) parser.add_option( "-k", "--koshak", help="Evaluate koshak regions.", action="store_true", dest='koshak', default=False ) parser.add_option( "-i", "--include", help="Include all states", action="store_true", dest='includeAll', default=False ) # analysis domain - start, frame count, frame step parser.add_option( "-r", "--range", help="A triple of numbers: start frame, max frame count, frame step", nargs=3, action="store", dest='domain', type="int", default=(0, -1, 1) ) options, args = parser.parse_args() # input source file srcFile = sys.argv[1] pygame.init() CELL_SIZE = 0.2 MAX_AGENTS = -1 MAX_FRAMES = -1 FRAME_STEP = 1 FRAME_WINDOW = 1 START_FRAME = 0 EXCLUDE_STATES = () START_FRAME, MAX_FRAMES, FRAME_STEP = options.domain print "Command line:", START_FRAME, MAX_FRAMES, FRAME_STEP if ( True ): #increase the color bar specifications ColorMap.BAR_HEIGHT = 300 ColorMap.BAR_WIDTH = 30 ColorMap.FONT_SIZE = 20 timeStep = 1.0 outPath = '.' if ( True ): # This size doesn't work for 25k ## size = Vector2( 175.0, 120.0 ) ## minPt = Vector2( -75.0, -60.0 ) # this size DOES work for 25k size = Vector2( 215.0, 160.0 ) minPt = Vector2( -95.0, -80.0 ) res = (int( size.x / CELL_SIZE ), int( size.y / CELL_SIZE ) ) size = Vector2( res[0] * CELL_SIZE, res[1] * CELL_SIZE ) timeStep = 0.05 outPath = os.path.join( '/projects','tawaf','sim','jul2011','results' ) path = os.path.join( outPath, '{0}.scb'.format( srcFile ) ) print "Reading", path outPath = os.path.join( outPath, srcFile ) if ( not options.includeAll ): EXCLUDE_STATES = (1, 2, 3, 4, 5, 6, 7, 8, 9) domain = AbstractGrid( minPt, size, res ) print "Size:", size print "minPt:", minPt print "res:", res timeStep *= FRAME_STEP frameSet = FrameSet( path, START_FRAME, MAX_FRAMES, MAX_AGENTS, FRAME_STEP ) print "Total frames:", frameSet.totalFrames() grids = GridFileSequence( os.path.join( outPath, 'junk' ), Vector2(0,3.2), Vector2(-6., 6.)) colorMap = FlameMap() # output the parameters used to create the data # todo: R = 2.0 R = 1.5 def distFunc( dispX, dispY, radiusSqd ): """Constant distance function""" # This is the local density function provided by Helbing # using Gaussian, delta(in the equation) = radiusSqd return np.exp( -(dispX * dispX + dispY * dispY) / (2.0 * radiusSqd ) ) / ( 2.0 * np.pi * radiusSqd ) dfunc = lambda x, y: distFunc( x, y, R * R ) if ( options.density ): if ( not os.path.exists( os.path.join( outPath, 'dense' ) ) ): os.makedirs( os.path.join( outPath, 'dense' ) ) print "\tComputing density with R = %f" % R s = time.clock() kernel = Kernels.GaussianKernel( R, CELL_SIZE, False ) signal = Signals.PedestrianSignal( domain ) # signal domain is the same as convolution domain grids.convolveSignal( domain, kernel, signal, frameSet ) print "\t\tTotal computation time: ", (time.clock() - s), "seconds" print "\tComputing density images", s = time.clock() imageName = os.path.join( outPath, 'dense', 'dense' ) reader = GridFileSequenceReader( grids.outFileName + ".density" ) visualizeGFS( reader, colorMap, imageName, 'png', 1.0, grids.obstacles ) print "Took", (time.clock() - s), "seconds" if ( options.speed ): if ( not os.path.exists( os.path.join( outPath, 'speed' ) ) ): os.makedirs( os.path.join( outPath, 'speed' ) ) print "\tComputing speeds", s = time.clock() stats = grids.computeSpeeds( minPt, size, res, R, frameSet, timeStep, EXCLUDE_STATES, GridFileSequence.BLIT_SPEED ) stats.write( os.path.join( outPath, 'speed', 'stat.txt' ) ) stats.savePlot( os.path.join( outPath, 'speed', 'stat.png' ), 'Average speed per step' ) print "Took", (time.clock() - s), "seconds" print "\tComputing speed images", s = time.clock() imageName = os.path.join( outPath, 'speed', 'speed' ) # the limit: 0.5 means the color map is saturated from from minVal to 50% of the range reader = GridFileSequenceReader( grids.outFileName + ".speed" ) visualizeGFS( reader, colorMap, imageName, 'png', 0.75, grids.obstacles ) print "Took", (time.clock() - s), "seconds" if ( options.omega ): if ( not os.path.exists( os.path.join( outPath, 'omega' ) ) ): os.makedirs( os.path.join( outPath, 'omega' ) ) print "\tComputing omega", s = time.clock() stats = grids.computeAngularSpeeds( minPt, size, res, R, frameSet, timeStep, EXCLUDE_STATES, GridFileSequence.BLIT_SPEED, FRAME_WINDOW ) stats.write( os.path.join( outPath, 'omega', 'stat.txt' ) ) stats.savePlot( os.path.join( outPath, 'omega', 'stat.png'), 'Average radial velocity per step' ) print "Took", (time.clock() - s), "seconds" print "\tComputing omega images", s = time.clock() imageName = os.path.join( outPath, 'omega', 'omega' ) colorMap = RedBlueMap() reader = GridFileSequenceReader( grids.outFileName + ".omega" ) visualizeGFS( reader, colorMap, imageName, 'png', 1.0, grids.obstacles ) print "Took", (time.clock() - s), "seconds" if ( options.progress ): if ( not os.path.exists( os.path.join( outPath, 'progress' ) ) ): os.makedirs( os.path.join( outPath, 'progress' ) ) print "\tComputing progress", s = time.clock() stats = grids.computeProgress( minPt, size, res, R, frameSet, timeStep, EXCLUDE_STATES, FRAME_WINDOW ) stats.write( os.path.join( outPath, 'progress', 'stat.txt' ) ) stats.savePlot( os.path.join( outPath, 'progress', 'stat.png'), 'Average progress around Kaabah' ) print "Took", (time.clock() - s), "seconds" print "\tComputing progress images", s = time.clock() imageName = os.path.join( outPath, 'progress', 'progress' ) colorMap = FlameMap( (0.0, 1.0) ) reader = GridFileSequenceReader( grids.outFileName + ".progress" ) visualizeGFS( reader, colorMap, imageName, 'png', 1.0, grids.obstacles ) print "Took", (time.clock() - s), "seconds" if ( False ): if ( not os.path.exists( os.path.join( outPath, 'advec' ) ) ): os.makedirs( os.path.join( outPath, 'advec' ) ) lines = [ Segment( Vector2(0.81592, 5.12050), Vector2( 0.96233, -5.27461) ) ] print "\tComputing advection", s = time.clock() grids.computeAdvecFlow( minPt, size, res, dfunc, 3.0, R, frameSet, lines ) print "Took", (time.clock() - s), "seconds" print "\tComputing advection images", s = time.clock() imageName = os.path.join( outPath, 'advec', 'advec' ) reader = GridFileSequenceReader( grids.outFileName + ".advec" ) visualizeGFS( reader, colorMap, imageName, 'png', 1.0, grids.obstacles ) print "Took", (time.clock() - s), "seconds" if ( options.koshak ): if ( not os.path.exists( os.path.join( outPath, 'regionSpeed' ) ) ): os.makedirs( os.path.join( outPath, 'regionSpeed' ) ) print "\tComputing region speeds" s = time.clock() vertices = ( Vector2( -0.551530, 0.792406 ), Vector2( 3.736435, -58.246524 ), Vector2( 42.376927, -56.160723 ), Vector2( 5.681046, -6.353232 ), Vector2( 92.823337, -4.904953 ), Vector2( 5.376837, 6.823865 ), Vector2( 92.526405, 9.199321 ), Vector2( 88.517822, -48.850902 ), Vector2( 6.416100, 53.293737 ), Vector2( -5.906582, 6.230001 ), Vector2( -6.203514, 53.739135 ), Vector2( 62.833196, 57.896184 ), Vector2( 93.268736, 43.643444 ), Vector2( -41.686899, -61.322050 ), Vector2( -74.794826, -25.838665 ), Vector2( -75.388691, 49.582085 ) ) vIDs = ( (0, 3, 4, 6, 5), (5, 6, 12, 11, 8), (5, 8, 10, 9, 0), (0, 9, 10, 15, 14, 13), (0, 13, 1), (0, 1, 2, 3), (3, 2, 7, 4) ) polygons = [] for ids in vIDs: p = Polygon() p.closed = True for id in ids: p.vertices.append( vertices[id] ) polygons.append( p ) grids.computeRegionSpeed( frameSet, polygons, timeStep, EXCLUDE_STATES ) print "Took", (time.clock() - s), "seconds" # output image imagePath = os.path.join( outPath, 'regionSpeed', 'region' ) colorMap = TwoToneHSVMap( (0, 0.63, 0.96), (100, 0.53, 0.75 ) ) regionSpeedImages( grids.outFileName + ".region", imagePath, polygons, colorMap, minPt, size, res ) if ( False ): # flow lines if ( not os.path.exists( os.path.join( outPath, 'flow' ) ) ): os.makedirs( os.path.join( outPath, 'flow' ) ) lines = ( Segment( Vector2( 4.56230, -7.71608 ), Vector2( 81.49586, -4.55443 ) ), Segment( Vector2( 5.08924, 5.72094 ), Vector2( 82.28628, 8.61913 ) ), Segment( Vector2( 3.50842, 8.09218 ), Vector2( 2.71800, 51.30145 ) ), Segment( Vector2( -5.97654, 5.72094 ), Vector2( -8.87472, 51.56492 ) ), Segment( Vector2( -6.50348, -7.18914 ), Vector2( -40.75473, -53.56005 ) ), Segment( Vector2( -1.23406, -6.92567 ), Vector2( 1.13718, -51.18881 ) ), Segment( Vector2( 3.50842, -7.45261 ), Vector2( 44.08297, -45.65592 ) ) ) flow = computeFlowLines( Vector2( 0, 0 ), lines, frameSet ) flowFile = os.path.join( outPath, 'flow', 'flow.txt' ) file = open( flowFile, 'w' ) flow.write( file ) file.close() if ( False ): # Traces print "Rendering traces" s = time.clock() grids.renderTraces( minPt, size, res, frameSet, 5, 5, 'data/trace11_' ) print "Took", (time.clock() - s), "seconds"
count_cls=np.bincount(y_map).astype(np.int32) start_cls = count_cls.cumsum() start_cls=np.insert(start_cls,0,0).astype(np.int32) i=start_cls[ bin_cls[0] ]+1 j=start_cls[ bin_cls[1] ]+1 print i,j #--------------------- num_el,dim = X.shape gamma = 0.5 threadsPerRow = 1 prefetch=2 rbf = ker.RBF() rbf.gamma=gamma rbf.init(X,Y) vecI = X[i,:].toarray() vecJ = X[j,:].toarray() import time #t0=time.clock() t0=time.time() #ki =Y[i]*Y* rbf.K_vec(vecI).flatten() #kj =Y[j]*Y*rbf.K_vec(vecJ).flatten()
max_steps = 50 if molecule == 'Ammonia': neb_method = 'improved' max_force = 1e-3 elif molecule == 'Ethane': neb_method = 'simple_improved' max_force = 2e-3 calc_idpp = True # optimizer delta_t = 3.5 opt_fire = NEB.Fire(delta_t, 2 * delta_t, trust_radius) opt = NEB.Optimizer() kernel = Kernels.RBF([0.8]) C1 = 1e6 C2 = 1e7 eps = 1e-5 restarts = 5 opt_steps = 1 optimize_parameters = True norm_y = True #ml_method = MLDerivative.IRWLS(kernel, C1=C1, C2=C2, epsilon=1e-5, epsilon_prime=1e-5, max_iter=1e4) # ml_method = MLDerivative.RLS(kernel, C1=C1, C2=C2) #ml_method = MLDerivative.GPR(kernel, opt_restarts=restarts, opt_parameter=optimize_parameters, noise_value = 1./C1, # noise_derivative=1./C2, normalize_y=norm_y) ml_method = NNModel.NNModel(molecule, C1=C1,
from numpy.fft import fftn, ifftn from common import printw import pycuda.autoinit from pycuda.autoinit import context import pycuda.gpuarray as gpuarray import skcuda.fft as cu_fft import skcuda from copy import deepcopy from ptychoposcorrection import * from time import time import Kernels # Load CUDA kernel modules, params: name, number of pixels, list of values to replace on source code Kernels.LoadKernel("Scripts/PIEKernels.cu", np.int32(imagApert[0]*imagApert[1]),["#define NumModes X", NumModes]) """ Get CUDA kernel functions """ # Basic ePIE kernels ReplaceInObject = Kernels.GetFunction("ReplaceInObject") ApplyDifPad = Kernels.GetFunction("ApplyDifPad") ExitwaveAndBuffer = Kernels.GetFunction("ExitwaveAndBuffer") ApertureAbs2 = Kernels.GetFunction("ApertureAbs2") ObjectAbs2 = Kernels.GetFunction("ObjectAbs2") CopyFromROI = Kernels.GetFunction("CopyFromROI") CropObject = Kernels.GetFunction("CropObject") UpdateProbeAndRspace = Kernels.GetFunction("UpdateProbeAndRspace") # Only used if bPhaseShift = True PhaseShiftFunc = Kernels.GetFunction("PhaseShift")
# # xy = start_pos # xx = xy[:,0] # yy = xy[:,1] c1 = 1. c2 = 1. gamma = 1.1 epsilon = 0.01 sv_val_2d = [] sv_test_2d = [] sk_test_2d = [] sk_val_2d = [] kernel_2d_gpr = Kernels.RBF([1., 1.]) * Kernels.ConstantKernel() sv_test_2d.append(sv.GPR(Kernels.ConstantKernel() * kernel_2d_gpr)) sv_test_2d.append(sv.RLS(Kernels.RBF())) sk_test_2d.append( gpr.GaussianProcessRegressor(gpr.kernels.RBF([1., 1.]) * gpr.kernels.ConstantKernel(), normalize_y=True)) sk_test_2d.append(svm.SVR(kernel='rbf', gamma=gamma, epsilon=epsilon)) ## for element in sk_test_2d: element.fit(xy, energy(xx, yy).reshape(-1)) sk_val_2d.append((element.__class__.__name__, element.predict(xy_pred))) grad_x, grad_y = gradient(xx, yy) grad = np.concatenate([grad_x.reshape(-1, 1), grad_y.reshape(-1, 1)], axis=1)
def debugFieldConvolve(): '''Test field convolution with a simple''' global CELL_SIZE if ( False ): # synthetic SCALE = 10#30 K_SIZE = 1.5 R = False ## kernel = Kernels.UniformKernel( K_SIZE * SCALE * CELL_SIZE, CELL_SIZE, R ) kernel = Kernels.TriangleKernel( K_SIZE * SCALE * CELL_SIZE / 1.1, CELL_SIZE, R ) ## kernel = Kernels.BiweightKernel( K_SIZE / 1.2 * SCALE * CELL_SIZE, CELL_SIZE, R ) ## kernel = Kernels.GaussianKernel( K_SIZE / 3.0 * SCALE * CELL_SIZE, CELL_SIZE, R ) # synthetic data # define the domain W = 8 * SCALE H = 10 * SCALE minCorner = Vector2( -W / 2.0, -H / 2.0 ) domainSize = Vector2( W * CELL_SIZE, H * CELL_SIZE ) resolution = ( W, H ) data = np.zeros( ( W, H ), dtype=np.float32 ) print data.shape winset = W / 2 - 2 * SCALE hinset = W / 2 - 2 * SCALE data[ winset:-winset, hinset:-hinset ] = 1.0 grid = Grid.DataGrid( minCorner, domainSize, resolution ) sigGrid = Grid.DataGrid() sigGrid.copyDomain( grid ) sigGrid.cells[ :, : ] = data signal = Signals.FieldSignal( sigGrid ) else: # voronoi signal CELL_SIZE = 0.025 K_SIZE = 1.0 R = True kernel = Kernels.UniformKernel( K_SIZE, CELL_SIZE, R ) ## kernel = Kernels.TriangleKernel( K_SIZE / 1.1, CELL_SIZE, R ) ## kernel = Kernels.BiweightKernel( K_SIZE / 1.2, CELL_SIZE, R ) ## kernel = Kernels.GaussianKernel( K_SIZE / 3.0, CELL_SIZE, R ) minCorner = Vector2( 0.0, -4.0 ) width = 2.4 height = 8.0 resolution = ( int( np.ceil( width / CELL_SIZE ) ), int( np.ceil( height / CELL_SIZE ) ) ) domainSize = Vector2( resolution[0] * CELL_SIZE, resolution[1] * CELL_SIZE ) sigGrid = Grid.DataGrid( minCorner, domainSize, resolution ) computeVornoiField( sigGrid ) signal = Signals.FieldSignal( sigGrid ) # set up convolution grid corner = Vector2( 0.0, -3 ) height = 6.0 resolution = ( int( np.ceil( width / CELL_SIZE ) ), int( np.ceil( height / CELL_SIZE ) ) ) domainSize = Vector2( resolution[0] * CELL_SIZE, resolution[1] * CELL_SIZE ) grid = Grid.DataGrid( corner, domainSize, resolution ) print "Input signal max:", sigGrid.cells.max() print "Input signal sum:", sigGrid.cells.sum() minVal = 0 maxVal = sigGrid.cells.max() s = sigGrid.surface( cMap, minVal, maxVal ) pygame.image.save( s, os.path.join( PATH, 'fieldBefore.png' ) ) kernel.convolve( signal, grid ) s = grid.surface( cMap, minVal, maxVal ) print "Convolved signal max:", grid.cells.max() print "Convolved signal sum:", grid.cells.sum() pygame.image.save( s, os.path.join( PATH, 'fieldAfter.png' ) )
print(np.linalg.norm(Model.Ky-kski)) print(np.linalg.norm(Model.alpha-Model1.alpha)) ''' ''' with open('gp.npz','wb') as f1: np.savez(f1,x=Model.mu,y=Model.x,z=Model.y,w=Model.X) with open('kissgp.npz','wb') as f2: np.savez(f2,x=Model1.mu,y=Model1.x,z=Model1.y,w=Model1.X) ''' start = time.time() g1, f1, r1 = Kernels.exact_Gaussian_grad2(Model1.W, Model1.grid.x, Model1.y, Model1.kernel.hyp, Model1.kernel.rank_fix) end = time.time() print(end - start) print(' ') start = time.time() g2, f2, r2 = Kernels.D_Gaussian_Kron(Model1.W, Model1.grid.x, Model1.y, Model1.kernel.hyp, Model1.kernel.rank_fix, epsilon=1e-2) end = time.time() print(end - start) print(' ')