def test_add(self): gaussian_one = Gaussian(25, 3) gaussian_two = Gaussian(30, 4) gaussian_sum = gaussian_one + gaussian_two self.assertEqual(gaussian_sum.mean, 55) self.assertEqual(gaussian_sum.stdev, 5)
def main(): """ Entry point of app """ cap = cv2.VideoCapture("detectbuoy.avi") gaussian = Gaussian() frame_width = int(cap.get(3)) frame_height = int(cap.get(4)) out = cv2.VideoWriter('output_part_2_3.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 15, (frame_width, frame_height)) did_run_once = False i = 0 while cap.isOpened(): i = i + 1 ret, frame = cap.read() if frame is not None and not did_run_once: did_run_once = False frame = gaussian.detect_buoys(frame, i) cv2.imshow('Buoy Detection', frame) out.write(frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows()
def __init__(self, data, sigma_min=1, sigma_max=1, mix=.5): mu_min=min(data) mu_max=max(data) self.data = data self.G1 = Gaussian(uniform(mu_min, mu_max), uniform(sigma_min, sigma_max)) self.G2 = Gaussian(uniform(mu_min, mu_max), uniform(sigma_min, sigma_max)) self.Prior1 = 0.5 self.Prior2 = 0.5 self.mix_factor = mix
class GMM: "Gaussian Mixture Model" def __init__(self, data, sigma_min=1, sigma_max=1, mix=.5): mu_min=min(data) mu_max=max(data) self.data = data self.G1 = Gaussian(uniform(mu_min, mu_max), uniform(sigma_min, sigma_max)) self.G2 = Gaussian(uniform(mu_min, mu_max), uniform(sigma_min, sigma_max)) self.Prior1 = 0.5 self.Prior2 = 0.5 self.mix_factor = mix def E_step(self): "Expectation step" self.loglike = 0 for datum in self.data: pdf1 = self.G1.pdf(float(datum)) * self.Prior1 pdf2 = self.G2.pdf(float(datum)) * self.Prior2 tot_prob = pdf1 + pdf2 pdf1 /= tot_prob pdf2 /= tot_prob self.loglike += log(tot_prob) yield (pdf1, pdf2) def M_step(self, weights): "Maximization step" (left, right) = zip(*weights) sum_left = sum(left) sum_right = sum(right) self.G1.mu = sum(w * d for (w, d) in zip(left, self.data)) / sum_left self.G2.mu = sum(w * d for (w, d) in zip(right, self.data)) / sum_right self.G1.sigma = sqrt(sum(w * (d - self.G1.mu)**2 for (w, d) in zip(left, self.data)) / sum_left) self.G2.sigma = sqrt(sum(w * (d - self.G2.mu)**2 for (w, d) in zip(right, self.data)) / sum_right) self.Prior1 = sum_left / len(self.data) self.Prior2 = 1 - self.Prior1 def iterate(self, N=1): "Iterate over N steps" for i in range(N): self.M_step(self.E_step()) "Compute log-likelihood" self.E_step() def pdf(self, x): return (self.Prior1)*self.G1.pdf(x) + (self.Prior2)*self.G2.pdf(x)
def _em(self, trans, states): ''' Perform parameter estimation for a hidden Markov model (HMM). Perform parameter estimation for an HMM using multi-dimensional Gaussian states. The training observation sequences, signals, are available to the class, and states designates the initial allocation of emitting states to the signal time steps. The HMM parameters are estimated using Viterbi re-estimation. Note: It is possible that some states are never allocated any observations. Those states are then removed from the states table, effectively redusing the number of emitting states. In what follows, n_states is the original number of emitting states, while n_states' is the final number of emitting states, after those states to which no observations were assigned, have been removed. Parameters ---------- trans : (n_states+1,n_states+1) ndarray The left-to-right transition probability table. The rightmost column contains probability of transitioning to final state, and the last row the initial state's transition probabilities. Note that all the rows need to add to 1. states : (n_obs, n_states) ndarray Initial allocation of signal time-steps to states as a one-hot encoding. Thus 'states[:,j]' specifies the allocation of all the observations to state j. Return ------ trans : (n_states'+1,n_states'+1) ndarray Updated transition probability table dists : (n_states',) list Gaussian object of each component. newLL : float Log-likelihood of parameters at convergence. iters: int The number of iterations needed for convergence ''' covs, means = self._updatecovs(states) # Initialize the covariances and means using the initial state allocation dists = [Gaussian(mean=means[i], cov=covs[i]) for i in range(len(covs))] oldstates, trans, oldLL = self._calcstates(trans, dists) converged = False iters = 0 while not converged and iters < self.maxiters: covs, means = self._updatecovs(oldstates) dists = [Gaussian(mean=means[i], cov=covs[i]) for i in range(len(covs))] newstates, trans, newLL = self._calcstates(trans, dists) if abs(newLL - oldLL) / abs(oldLL) < self.rtol: converged = True oldstates, oldLL = newstates, newLL iters += 1 if iters >= self.maxiters: warn("Maximum number of iterations reached - HMM parameters may not have converged") return trans, dists, newLL, iters
class TestGaussianClass(unittest.TestCase): def setUp(self): self.gaussian = Gaussian(25, 2) def test_initialization(self): self.assertEqual(self.gaussian.mean, 25, 'incorrect mean') self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation') def test_pdf(self): self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\ 'pdf function does not give expected result') def test_meancalculation(self): self.gaussian.read_data_file('numbers.txt', True) self.assertEqual(self.gaussian.calculate_mean(),\ sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected') def test_stdevcalculation(self): self.gaussian.read_data_file('numbers.txt', True) self.assertEqual(round(self.gaussian.stdev, 2), 92.87, 'sample standard deviation incorrect') self.gaussian.read_data_file('numbers.txt', False) self.assertEqual(round(self.gaussian.stdev, 2), 88.55, 'population standard deviation incorrect')
def test_gaussian(): p = Gaussian([3, 2, 2]) x = p.sample(10) assert x.shape[0] == 10 assert x.shape[1] == 3 assert x.shape[2] == 2 assert x.shape[3] == 2 t = torch.tensor([i for i in range(3 * 2 * 2) ]).to(torch.float32).reshape(1, 3, 2, 2) logp = p.logProbability(t) assert_almost_equal(logp.item(), -264.0273, decimal=4)
def test_bijective(): p = Gaussian([8]) tList = [ utils.SimpleMLP([4, 10, 4]), utils.SimpleMLP([4, 10, 4]), utils.SimpleMLP([4, 10, 4]), utils.SimpleMLP([4, 10, 4]) ] #x = torch.randn(100,8) # Build your NICE net here, may take multiply lines. f = NICE(tList, prior=p) # import pdb # pdb.set_trace() x = f.sample(100) #变换后的sample op = f.logProbability(x) #变换后的logp y, pi = f.inverse(x) #变换前sample pp = f.prior.logProbability(y) #变换前logp yx, pf = f.forward(y) yxy, pfi = f.inverse(yx) assert_array_almost_equal(x.detach().numpy(), yx.detach().numpy()) assert_array_almost_equal(y.detach().numpy(), yxy.detach().numpy()) assert_array_almost_equal(pi.detach().numpy(), -pf.detach().numpy()) assert_array_almost_equal(pf.detach().numpy(), -pfi.detach().numpy()) assert_array_almost_equal((op + pi).detach().numpy(), pp.detach().numpy()) assert_array_almost_equal((pp - pfi).detach().numpy(), op.detach().numpy())
def estimate(self, estimation_pt, sample, bandwidth): """ Performs a GWR estimation at a given point. estimation_pt is a list, tuple or numpy array containing the coordinates of the regression point. sample is a list or array with a sample to compute a prediction at the same time, by applying the regression parameters found by the regression to this value. bandwidth is a the kernel bandwidth. Returns a GWRResult object. """ # Compute the weights of all known samples using the Euclidean # distance between the points, and a Gaussian kernel. if self.kernel: w = self.kernel(self.locations, estimation_pt, bandwidth)[0] else: w = Gaussian.kernel(self.locations, estimation_pt, bandwidth)[0] # Perform the weighted regression using Maximum likelihood if self.family is None: # Gaussian GWR res = sm.WLS(self.targets, self.samples, w) fres = res.fit() else: # Poisson GWR res = sm.GLM(endog=self.targets, exog=self.samples, family=self.family) fres = res.fit(data_weights=w) # Pack everything into a GWRResult object # A GWRResult allows convenient inspection of the results. gwr_result = GWRResult(estimation_pt, sample, fres, self) del(fres) return gwr_result
def add_to_input(inp_str, **kwargs): from gaussian import Gaussian route = get_route(inp_str) command = Gaussian(**kwargs)._get_route().replace('#p', '').replace('#P', '') n_route = route + command return inp_str.replace(route, n_route)
def run(self, context): # Turn raw python array into ndarray for easier math if self.norm_device: data = np.array(context.getData(self.pos_device, self.sig_device, self.norm_device)) x = data[0] y = data[1] n = data[2] print "x = ", x print "y = ", y print "n = ", n print "norm: ", self.norm_value y = y * float(self.norm_value) / n context.logData("normalized", y.nda) else: data = np.array(context.getData(self.pos_device, self.sig_device)) x = data[0] y = data[1] print "x = ", x print "y = ", y # Compute fit g = Gaussian.fromCentroid(x, y) print g fit = g.values(x) # Log the 'fit' data for later comparison with raw data context.logData("fit", fit.nda) # Set PVs with result context.write(self.pv_pos, g.center) context.write(self.pv_height, g.height) context.write(self.pv_width, g.width)
def run(self, context): # Turn raw python array into ndarray for easier math if self.norm_device: data = np.array( context.getData(self.pos_device, self.sig_device, self.norm_device)) x = data[0] y = data[1] n = data[2] print "x = ", x print "y = ", y print "n = ", n print "norm: ", self.norm_value y = y * float(self.norm_value) / n context.logData("normalized", y.nda) else: data = np.array(context.getData(self.pos_device, self.sig_device)) x = data[0] y = data[1] print "x = ", x print "y = ", y # Compute fit g = Gaussian.fromCentroid(x, y) print g fit = g.values(x) # Log the 'fit' data for later comparison with raw data context.logData("fit", fit.nda) # Set PVs with result context.write(self.pv_pos, g.center) context.write(self.pv_height, g.height) context.write(self.pv_width, g.width)
def __call__(self, domain, field): """ :param object domain: A Domain :param object field: Current field :return: Field after modification by Generator :rtype: Object """ self.field = field for b, bit in enumerate(self.bit_stream): if bit["m"] > 0: self.shape = Gaussian(**bit()) else: self.shape = Sech(**bit()) if domain.channels > 1: self.field[self.channel] += self.shape.generate(domain.t) else: self.field += self.shape.generate(domain.t) # Alternative: Only affect field of the current bit, # not the entire field: #~spb = domain.samples_per_bit #~bit_range = (b * spb, (b + 1) * spb) #~if domain.channels > 1: #~self.field[self.channel][bit_range[0]:bit_range[1]] += \ #~self.shape.generate(domain.t)[bit_range[0]:bit_range[1]] #~else: #~self.field[bit_range[0]:bit_range[1]] += \ #~self.shape.generate(domain.t)[bit_range[0]:bit_range[1]] return self.field
def generate_initial_gaussians(amount, minvalue, maxvalue): gaussians = [] for i in range(amount): gaussians.append( Gaussian(random.gauss((maxvalue - minvalue) / 2, 10), max(1, random.gauss(3, 3)), 1.0 / amount)) return gaussians
def main(): angle = ChangeValue() window = pyglet.window.Window(width=786, height=600, vsync=False) projection = Projection(0, 0, window.width, window.height, near=0.1, far=100) width, height = 128, 128 ripples = Ripples(width, height) height_texture = Texture(width*2, height*2, format=GL_RGBA32F) processor = Processor(height_texture) gaussian = Gaussian(processor) heightmap = Heightmap(width*2, height*2, scale=1.2) sun = Sun() def rain(delta): x = random.randint(0, ripples.width) y = random.randint(0, ripples.height) size = random.random() * 0.5 with nested(ripples.framebuffer, Color): glPointSize(size) glBegin(GL_POINTS) glColor4f(0.2, 0.2, 0.2, 1.0) glVertex3f(x, y, 0) glEnd() fps = pyglet.clock.ClockDisplay() pyglet.clock.schedule_interval(rain, 0.2) pyglet.clock.schedule(lambda delta: None) @window.event def on_draw(): window.clear() ripples.step() processor.copy(ripples.result, height_texture) gaussian.filter(height_texture, 2) heightmap.update_from(height_texture) with nested(projection, Color, sun): glColor3f(7/256.0, 121/256.0, 208/256.0) glPushMatrix() glTranslatef(0, 0, -1) glRotatef(10, 1, 0, 0) glRotatef(angle, 0.0, 1.0, 0.0) glTranslatef(-0.5, 0, -0.5) heightmap.draw() glPopMatrix() fps.draw() ripples.result.draw() heightmap.vertex_texture.draw(2*width, 0, scale=0.5) heightmap.normal_texture.draw(4*width, 0, scale=0.5) glEnable(GL_POINT_SMOOTH) glEnable(GL_LINE_SMOOTH) glClampColorARB(GL_CLAMP_VERTEX_COLOR_ARB, GL_FALSE) glClampColorARB(GL_CLAMP_FRAGMENT_COLOR_ARB, GL_FALSE) glClampColorARB(GL_CLAMP_READ_COLOR_ARB, GL_FALSE) gl_init(light=False) pyglet.app.run()
def grid_update(grid, grid_size, live, dead): """function to update the grid after evolution Args: grid (arry): input array grid_size (int): size of grid live (int): value assigned for live cells dead (int): value assigned for live cells Returns: [array]: updated grid after calcultaing neighbor sum """ # copy grid # and we go line by line mean = 0 std = 0.1 newGrid = grid.copy() for i in range(grid_size): for j in range(grid_size): # compute 8-neghbor sum neighbours_sum = (Gaussian(mean,std).get_sample()+grid[i, (j-1)%grid_size]) + (Gaussian(mean,std).get_sample() + grid[i, (j+1)%grid_size])+\ (Gaussian(mean,std).get_sample() +grid[(i-1)%grid_size, j] )+ (Gaussian(mean,std).get_sample()+grid[(i+1)%grid_size, j]) + \ (Gaussian(mean,std).get_sample() + grid[(i-1)%grid_size, (j-1)%grid_size])+ (Gaussian(mean,std).get_sample()+grid[(i-1)%grid_size, (j+1)%grid_size]) + \ (Gaussian(mean,std).get_sample()+grid[(i+1)%grid_size, (j-1)%grid_size]) + (Gaussian(mean,std).get_sample()+grid[(i+1)%grid_size, (j+1)%grid_size]) # neighbours_sum = (grid[i, (j-1)%grid_size] + grid[i, (j+1)%grid_size] + # grid[(i-1)%grid_size, j] + grid[(i+1)%grid_size, j] + # grid[(i-1)%grid_size, (j-1)%grid_size] + grid[(i-1)%grid_size, (j+1)%grid_size] + # grid[(i+1)%grid_size, (j-1)%grid_size] + grid[(i+1)%grid_size, (j+1)%grid_size]) #print(neighbours_sum) # # Conway's rules if grid[i, j] == live: if (neighbours_sum < 2) or (neighbours_sum > 3): newGrid[i, j] = dead elif ((neighbours_sum >= 2) and (neighbours_sum <= 3)): newGrid[i, j] = live else: if (neighbours_sum) == 3: newGrid[i, j] = live #print(newGrid) return newGrid
class TestGaussianClass(unittest.TestCase): def setUp(self): self.gaussian = Gaussian(25, 2) # def test_initialization(self): # self.assertEqual(self.gaussian.mean, 25, 'incorrect mean') # self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation') def test_pdf(self): self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\ 'pdf function does not give expected result')
def main(): """ Entry point of app """ cap = cv2.VideoCapture("detectbuoy.avi") gaussian = Gaussian() did_run_once = False while cap.isOpened(): ret, frame = cap.read() if frame is not None and not did_run_once: did_run_once = True frame = gaussian.detect_buoys(frame) cv2.imshow('Buoy Detection', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows()
def instrPSF(self, ghostpath): ''' Grabs instrumental psf for negative planet injection. Intended to be expanded to include default gaussian Initialized: William B. 9/22/2020 ''' if self.usegaussian is True: gauss = Gaussian(31, self.fwhm) psf = gauss.g else: psf = fits.getdata(ghostpath) return psf
def get_distributions(num_means, segment_length, difficulty, bound): noise_var = walk_noise_variance(difficulty, bound) attraction_coeff = necessary_attraction_coefficient( noise_var, walk_stationary_variance(difficulty, bound)) means = walk_from_stationary_dist(noise_var, attraction_coeff, num_means) segment_var = iid_segment_variance(difficulty, bound) distributions = [] for mean in means: for _step in range(segment_length): distributions.append(Gaussian(mean, segment_var)) return distributions
def handle_gaussian_task(): print "{0: >8}\t{1: <16} {2: <14} {3: <18}".format( "Nodes", "Result", "H", "Runge") nodes = 2 prev_result = 0.0 prev_runge = 0.0 runge = 0.0 while nodes <= TASK2.NODES_LIMIT: gauss = Gaussian(FUNCTION, TASK2.HIGH, TASK2.LOW, nodes) result = gauss.integrate() if prev_result != 0: runge = runge_error(prev_result, result, TASK2.PRECISION) output = "{0: >8}\t{1: <16} {2: <14} {3: <18} {4: <14}".format( nodes, result, gauss.h, runge, prev_runge*1.0/runge) else: output = "{0: >8}\t{1: <16} {2: <14}".format( nodes, result, gauss.h) print output prev_result = result prev_runge = runge nodes *= 2
def prob(self, gauss): """Returns the probability of drawing the provided Gaussian from this prior.""" d = self.mu.shape[0] wishart = Wishart(d) gaussian = Gaussian(d) wishart.setDof(self.n) wishart.setScale(self.getLambda()) gaussian.setMean(self.mu) gaussian.setPrecision(self.k*gauss.getPrecision()) return wishart.prob(gauss.getPrecision()) * gaussian.prob(gauss.getMean())
def __init__(self, data, limits, num_bins, title, xlabel, ylabel, file_name, fig_num, units): self.data = data print(np.min(data)) print(np.max(data)) self.limits = limits self.num_bins = num_bins self.title = title self.xlabel = xlabel self.ylabel = ylabel self.file_name = file_name self.fig_num = fig_num self.n = [] self.bins = [] self.patches = [] self.mean = float() self.resolution = float() self.FWHM = float() self.signal_limits = tuple() self.lower_sideband_limits = tuple() self.upper_sideband_limits = tuple() self.noise_fit = list() self.N = float() self.B = float() self.S = float() self.prob = float() self.bin_width = (np.max(self.data) - np.min(self.data)) / self.num_bins self.parameters_g = dict() self.parameters_2g = dict() self.parameters_cb = dict() self.single_gaussian = Gaussian() #utilizes gaussian module imported self.double_gaussian = Double_gaussian( ) #utilizes double gaussian module imported self.crystal_ball = Crystal_ball( ) #utilizes crystal ball module imported self.units = units
def oniom_comp_calcs(atoms_oniom, **kwargs): """extracts component calculations from an atoms_oniom object that has run an =OnlyInputFiles oniom calculation""" from gaussian import Gaussian # recursion means if we pass in [atoms1s, atoms2, atoms3...] we return [low_reals, high_models, low_models] # where low_reals = [low_real1, low_real2, low_real3...] etc. if isinstance(atoms_oniom, list): return [ list(c) for c in zip(*[oniom_comp_calcs(r, **kwargs) for r in atoms_oniom]) ] proc, mem, ver = atoms_oniom.calc.job_params[ 'nodes'], atoms_oniom.calc.job_params[ 'memory'], atoms_oniom.calc.job_params['version'] init_inp_strs = extract_oniom_inputs(atoms_oniom) inp_strs = [add_to_input(s, **kwargs) for s in init_inp_strs] old_label = atoms_oniom.calc.label components = [] comp_strs = ['low_real', 'high_model', 'low_model'] method_strs = get_oniom_calc_methods( atoms_oniom.calc.method.replace('=OnlyInputFiles', '')) for i in range(len(inp_strs)): atoms_comp = copy.deepcopy(atoms_oniom) new_label = old_label + '_' + comp_strs[i] try: method, basis = method_strs[i].split('/') except ValueError: method, basis = method_strs[i], '' #the method/basis variables are not used as we are defining raw_input but it is useful to be able to read them later on atoms_comp.set_calculator( Gaussian(label=new_label, raw_input=inp_strs[i], method=method, basis=basis)) atoms_comp.calc.set_job(nodes=proc, memory=mem, version=ver) #the input files gaussian produces have no link information so we have to add that manually atoms_comp.calc.initialize(atoms_comp) atoms_comp.calc.extra_params.update({'initial_raw_input': inp_strs[i]}) atoms_comp.calc.extra_params['raw_input'] = atoms_comp.calc._get_link0( ) + inp_strs[i] components.append(atoms_comp) return components
def main(): # mean , variance np.random.seed(0) gaussian = Gaussian() green, red, yellow = gaussian.getBuoys() cou = 1 time = 1 data_r = np.zeros((1, 19)) data_g = np.zeros((1, 19)) data_b = np.zeros((1, 19)) no_of_clusters = 3 for image in green: if (cou < 3): img = cv2.imread(image) redChannel = img[:, :, 0] #print('red: ',redChannel.shape) greenChannel = img[:, :, 1] blueChannel = img[:, :, 2] for (row1, row2, row3) in zip(redChannel, greenChannel, blueChannel): if time == 1: data_r = row1 data_g = row2 data_b = row3 time = 3 else: data_r = np.append(data_r, row1, axis=0) data_g = np.append(data_g, row2, axis=0) data_b = np.append(data_b, row3, axis=0) cou = 2 data_r = np.array(data_r).flatten() data_b = np.array(data_b).flatten() data_g = np.array(data_g).flatten() b = np.vstack((data_g, data_b)) data = np.vstack((data_r, b)) print(data[0].shape) compute_EM(no_of_clusters, data)
def runPlotLoop(robot: cozmo.robot.Robot): global particles # create plot plt.ion() plt.show() fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot(1, 1, 1, aspect=1) ax.set_xlim(m.grid.minX(), m.grid.maxX()) ax.set_ylim(m.grid.minY(), m.grid.maxY()) plotMap(ax, m) particlesXYA = np.zeros([numParticles, 3]) for i in range(0, numParticles): particlesXYA[i, :] = particles[i].toXYA() particlePlot = plt.scatter(particlesXYA[:, 0], particlesXYA[:, 1], color="red", zorder=3, s=10, alpha=0.5) empiricalG = Gaussian.fromData(particlesXYA[:, 0:2]) gaussianPlot = plotGaussian(empiricalG, color="red") # main loop t = 0 while True: # update plot for i in range(0, numParticles): particlesXYA[i, :] = particles[i].toXYA() particlePlot.set_offsets(particlesXYA[:, 0:2]) empiricalG = Gaussian.fromData(particlesXYA[:, 0:2]) plotGaussian(empiricalG, color="red", existingPlot=gaussianPlot) plt.show(block=False) plt.pause(0.001) time.sleep(0.01)
def run(self, context): # Turn raw python array into ndarray for easier math data = array(context.getData(self.pos_device, self.sig_device)) x = data[0] y = data[1] # Compute fit g = Gaussian.fromCentroid(x, y) print g fit = g.values(x) # Log the 'fit' data for later comparison with raw data context.logData("fit", fit.nda) # Set PVs with result context.write(self.pos_device, g.center)
def __init__(self, data, n_mix, sigma_min=0.1, sigma_max=1.0): self.data = data mu_min = min(data) mu_max = max(data) # create n-gaussians that will comprise the gaussian mixture model self.gaussians = {} # probability of each gaussians self.mix = {} # number of gaussians self.n_mix = n_mix # initialize the gaussians for i in range(0, n_mix): self.gaussians[i] = Gaussian(uniform(mu_min, mu_max), uniform(sigma_min, sigma_max)) self.mix[i] = 1/n_mix
def prob(self, gauss): """Returns the probability of drawing the provided Gaussian from this prior.""" d = self.mu.shape[0] wishart = Wishart(d) gaussian = Gaussian(d) wishart.setDof(self.n) wishart.setScale(self.getLambda()) gaussian.setMean(self.mu) gaussian.setPrecision(self.k * gauss.getPrecision()) return wishart.prob(gauss.getPrecision()) * gaussian.prob(gauss.getMean())
def test_bijective(): p = Gaussian([10]) tList = [SimpleMLP([5,10,5],[nn.ELU(),nn.Tanh()]) for _ in range(2)] sList = [SimpleMLP([5,10,5],[nn.ELU(),nn.Tanh()]) for _ in range(2)] #tList =[utils.SimpleMLP([28*28/2, 28*28, 28*28/2]) for _ in range(4)] #sList =[utils.SimpleMLP([28*28/2, 28*28, 28*28/2]) for _ in range(4)] maskList = [] for i in range(len(tList)//2): b = torch.zeros(1,10).byte() i = torch.randperm(b.numel()).narrow(0, 0, b.numel() // 2) b.zero_()[:,i] = 1 b_=1-b maskList.append(b) maskList.append(b_) ''' b = torch.zeros(1,28*28).byte() b[:,:28*28//2] = 1 for i in range(len(tList)): maskList.append(b) b = 1-b ''' maskList = torch.cat(maskList,0) #x = torch.randn(1,8) # # Build your NICE net here, may take multiply lines. # import pdb # pdb.set_trace() f = Realnvp(sList,tList,p,maskList) x = f.sample(10) #print 10行 1*8矩阵 op = f.logProbability(x) #print 1*10矩阵 y,pi = f.inverse(x) pp = f.prior.logProbability(y) yx,pf = f.forward(y) yxy,pfi = f.inverse(yx) assert_array_almost_equal(x.detach().numpy(),yx.detach().numpy(), decimal=5) assert_array_almost_equal(y.detach().numpy(),yxy.detach().numpy(), decimal=5) assert_array_almost_equal(pi.detach().numpy(),-pf.detach().numpy(), decimal=5) assert_array_almost_equal(pf.detach().numpy(),-pfi.detach().numpy(), decimal=5) assert_array_almost_equal((op+pi).detach().numpy(),pp.detach().numpy(), decimal=5) assert_array_almost_equal((pp-pfi).detach().numpy(),op.detach().numpy(),decimal=5)
def parse(filename): lines = (line.rstrip('\n') for line in open(filename, 'r')) evidence = [] gaussians = [] mustlink = [] cannotlink = [] for line in lines: parts = line.split(' ') if parts[0] == 'g': gaussians.append(Gaussian(float(parts[1]), float(parts[2]), float(parts[3]))) elif parts[0] == 'e': evidence.append(Evidence(float(parts[1]))) elif parts[0] == 'c': cannotlink.append(CannotLinkConstraint(evidence[int(parts[1])], evidence[int(parts[2])])) elif parts[0] == 'm': mustlink.append(MustLinkConstraint(evidence[int(parts[1])], evidence[int(parts[2])])) return (evidence, gaussians, mustlink, cannotlink)
def construct_inst_PSF(self, outside=None): ''' Constructs the instrumental psf to use as the forwarded model, either from an instrumental ghost or a gaussian. FWHM of gaussian can be input or taken from header of files in filelist ''' if self.fwhm is None: fwhm = self.head["0PCTFWHM"] else: fwhm = self.fwhm if outside is not None: psf = outside else: ghostdata, moffat, gauss, gaufwhm, newfwhm, smallermof = ghost.ghostIsolation( self.filepaths, 380, 220, 10, fwhm, 1, fwhm=fwhm) self.fwhm = newfwhm fwhm = newfwhm if self.ePSF == 'doGaussian': gauss = Gaussian(31, newfwhm) psf = gauss.g # psf = gaufwhm elif self.ePSF == 'doGaussFit': psf = gauss elif self.ePSF == 'doMoffat': psf = smallermof #moffat elif self.ePSF == 'doGhost': psf = ghostdata else: psf = fits.getdata(self.ePSF) # shape instrumental psf how pyklipFM wants psf2 = np.zeros((1, psf.shape[0], psf.shape[1])) psf2[0] = psf self.psf2 = psf2 from pyklip.klip import high_pass_filter filtersize = psf2.shape[0] / self.hpf self.psf2 = high_pass_filter(self.psf2, filtersize)
def setUp(self): self.gaussian = Gaussian(25, 2)
window = pyglet.window.Window() projection = Projection(0, 0, window.width, window.height) noise = ShaderProgram( FragmentShader.open('shaders/noise.frag'), seed = 0.0, ) width, height = 64, 64 noise_texture = Texture(width, height, format=GL_RGBA32F) noise_processor = Processor(noise_texture) texture = Texture(64, 64, format=GL_RGBA32F) processor = Processor(texture) noise_processor.filter(noise_texture, noise) processor.copy(noise_texture, texture) gaussian = Gaussian(processor) gaussian.filter(texture, 2) rotation = ChangeValue() @window.event def on_draw(): window.clear() with nested(projection, texture): glPushMatrix() glTranslatef(0, 0, -3) glRotatef(-45, 1, 0, 0) glRotatef(rotation, 0.0, 0.0, 1.0) quad(-1, 1, 1, -1)
def common_flow(self, nodes, accuracy): gauss = Gaussian(self.function, self.high, self.low, nodes) result = gauss.integrate() self.assertAlmostEquals(result, self.accurate_result, accuracy)
def sample(self): """Returns a Gaussian, drawn from this prior.""" d = self.mu.shape[0] wishart = Wishart(d) gaussian = Gaussian(d) ret = Gaussian(d) wishart.setDof(self.n) wishart.setScale(self.getLambda()) ret.setPrecision(wishart.sample()) gaussian.setPrecision(self.k * ret.getPrecision()) gaussian.setMean(self.mu) ret.setMean(gaussian.sample()) return ret
class Gaussian_variable(Variable): #Inherits Variable Class #Method to initialize the class parameters to be used later for analysis def __init__(self, data, limits, num_bins, title, xlabel, ylabel, file_name, fig_num, units): self.data = data print(np.min(data)) print(np.max(data)) self.limits = limits self.num_bins = num_bins self.title = title self.xlabel = xlabel self.ylabel = ylabel self.file_name = file_name self.fig_num = fig_num self.n = [] self.bins = [] self.patches = [] self.mean = float() self.resolution = float() self.FWHM = float() self.signal_limits = tuple() self.lower_sideband_limits = tuple() self.upper_sideband_limits = tuple() self.noise_fit = list() self.N = float() self.B = float() self.S = float() self.prob = float() self.bin_width = (np.max(self.data) - np.min(self.data)) / self.num_bins self.parameters_g = dict() self.parameters_2g = dict() self.parameters_cb = dict() self.single_gaussian = Gaussian() #utilizes gaussian module imported self.double_gaussian = Double_gaussian( ) #utilizes double gaussian module imported self.crystal_ball = Crystal_ball( ) #utilizes crystal ball module imported self.units = units #method that finds the mean of a gaussian by finding the maximum bin in a gaussian histogram def find_mean(self): largest_bin = np.amax(self.n) i = np.where(self.n == largest_bin)[0][0] self.mean = self.bins[i] return self.mean #method to set the Full Width Half Maximum after visual inspection of the histogram def set_FWHM(self, x): self.FWHM = x return self.FWHM #method to obtain an estimate of the resolutions of the histogram after finding the mean and setting the FWHM def find_resolution(self): self.resolution = self.mean / self.FWHM return self.resolution #method that finds the minimum and maximum indices that satisify a limit within the self.bin parameter def find_limits_index(self, limits): i_min = 0 i_max = 0 for i in range(0, len(self.bins)): if self.bins[i] <= limits[0]: i_min = i elif self.bins[i] >= limits[0] and self.bins[i] <= limits[1]: i_max = i return (i_min, i_max) #method to obtain the minimum and maximum indices that characterize the signal in the self.bin parameter. #The signal is the part of the curve that is most characterized by a gaussian curve def find_signal_limits(self): limits = (self.mean - 30., self.mean + 30) i_limits = self.find_limits_index(limits) self.signal_limits = i_limits return i_limits #method to find the number of events within the signal limits def num_events_signal_width(self): total = 0 for j in range(self.signal_limits[0], self.signal_limits[1]): total += self.n[j] self.N = float(total) return total #method that finds the minimum and maximum indices of the limits for both sidebands on either side of #the self.bin parameter def find_sideband_limits(self): lower_limits = (self.bins[0], self.bins[0] + 30) i_lower_limits = self.find_limits_index(lower_limits) self.lower_sideband_limits = i_lower_limits upper_limits = (self.bins[self.num_bins] - 30, self.bins[self.num_bins]) i_upper_limits = self.find_limits_index(upper_limits) self.upper_sideband_limits = i_upper_limits return i_lower_limits, i_upper_limits #method that does a least square fit to the sideband data to obtain a straight lines parameters (the gradient and #y-intercept) that characterizes the background of the data def lstsqr_fit_noise(self): temp_x = self.bins[self.upper_sideband_limits[0]:self. upper_sideband_limits[1]] x_data = np.append( self.bins[self.lower_sideband_limits[0]:self. lower_sideband_limits[1]], temp_x) temp_y = self.n[self.upper_sideband_limits[0]:self. upper_sideband_limits[1]] y_data = np.append( self.n[self.lower_sideband_limits[0]:self. lower_sideband_limits[1]], temp_y) A = np.vstack([x_data, np.ones(len(x_data))]).T m, c = np.linalg.lstsq(A, y_data, rcond=None)[ 0] #calls least square fit function within numpy.linalg self.noise_fit = [m, c] return m, c #method to obtain the number of events that characterize the background of the data from the parameters previously #found for the least square fit of the straight line def signal_num_noise(self): m = self.noise_fit[0] c = self.noise_fit[1] self.B = np.sum( m * (self.bins[self.signal_limits[0]:self.signal_limits[1]]) + c) return self.B #method to find tha actual number of events that are believed to have been due to Jpsi meson production and not #background def actual_signal_events(self): self.S = self.N - self.B return self.S, self.N ##Single Gaussian #method that returns the output of a gaussian with an exponential decay background from parameters given to it def gauss(self, x, F, a, mu, st): y = (1 - F) * self.single_gaussian.single_gaussian( x, mu, st) + F * self.exponential(x, a) return y #method that returns the output of the negative natural logarithm of the maximum likelihood of a single gaussian #with an exponential decay for background from parameters given to it def NLOGL(self, F, a, mu, st): a = np.longdouble(a) L = -1 * np.sum(np.log(self.gauss(self.data, F, a, mu, st))) return L #Double Gaussian #method that returns the output of a double gaussian with an exponential decay background from parameters # given to it def gauss_two(self, x, F, a, mu_1, st_1, st_2, Q): y = (1 - F) * self.double_gaussian.double_gaussian( x, mu_1, st_1, st_2, Q) + F * self.exponential(x, a) return y #method that returns the output of the negative natural logarithm of the maximum likelihood of a double gaussian #with an exponential decay for background from parameters given to it def NLOGL_2(self, F, a, mu_1, st_1, st_2, Q): a = np.longdouble(a) Q = np.longdouble(Q) L = -1 * np.sum( np.log(self.gauss_two(self.data, F, a, mu_1, st_1, st_2, Q))) return L ## Crystal Ball #method that returns the output of the negative natural logarithm of the maximum likelihood of a crystal ball #function with an exponential decay for background from parameters given to it def NLOGL_crystalball(self, n, a, mu, st, F, w): n = (n) a = np.longdouble(a) mu = np.longdouble(mu) st = np.longdouble(st) F = np.longdouble(F) w = np.longdouble(w) e = (self.data - mu) / st x_1 = self.data[(e > -a)] x_2 = self.data[(e <= -a)] L_1 = float() L_2 = float() if len( x_1 ) > 0: #passes data that satifisies first condition in crystal ball function L_1 = np.sum( (np.log((1 - F) * self.crystal_ball.crystal_ball_1(x_1, n, a, mu, st) + F * self.exponential(x_1, w)))) if len( x_2 ) > 0: #passes data that satifisies second condition in crystal ball function L_2 = np.sum( (np.log((1 - F) * self.crystal_ball.crystal_ball_2(x_2, n, a, mu, st) + F * self.exponential(x_2, w)))) L = -1 * (L_1 + L_2) return L #method that plots a histogram of the Probability density function with a crystal ball fitted to it from parameters #found using a maximum likelihood fit def plot_crystal_ball(self, fig_num): n = self.parameters_cb[0] a = self.parameters_cb[1] mu = self.parameters_cb[2] st = self.parameters_cb[3] F = self.parameters_cb[4] w = self.parameters_cb[5] exp_y = np.array([]) crystal_y = np.array([]) plt.figure(fig_num) ax1 = plt.subplot(211) #Finds y for the model to be fitted and just for the crystal ball part for i in range(0, len(self.bins) - 1): x = self.bins[i] if (x - mu) / st > -a: y = (1 - F) * self.crystal_ball.crystal_ball_1( x, n, a, mu, st) + F * self.exponential(x, w) exp_y = np.append(exp_y, np.array([y])) c_y = (1 - F) * self.crystal_ball.crystal_ball_1( x, n, a, mu, st) crystal_y = np.append(crystal_y, np.array([c_y])) elif (x - mu) / st <= -a: y = (1 - F) * self.crystal_ball.crystal_ball_2( x, n, a, mu, st) + F * self.exponential(x, w) exp_y = np.append(exp_y, np.array([y])) c_y = (1 - F) * self.crystal_ball.crystal_ball_2( x, n, a, mu, st) crystal_y = np.append(crystal_y, np.array([c_y])) self.expy_cb = exp_y expo_y = F * self.exponential( self.bins[0:self.num_bins], w) #finds y for background exponential decay plt.xlabel(self.xlabel) plt.ylabel("Probability Density per " + str(round_digits(self.bin_width, 2)) + " " + self.units) plt.title( "Probability Density Function with Crystal Ball Function Fit:\n " + self.title) u, bins, patches = plt.hist(self.data, self.num_bins, self.limits, density=True, stacked=True) #Note that # density and stacked being true ensures histogram is normalized and thus a PDF (Probability Denisty Function) ax1.plot(self.bins[0:self.num_bins], self.expy_cb, label="Full Crystal Ball Model Fit") ax1.plot(self.bins[0:self.num_bins], crystal_y, label="Crystal Ball Function Part") ax1.plot(self.bins[0:self.num_bins], expo_y, label="Background Exponential Decay Fit") ax1.legend() return self.expy_cb #method that plots a histogram of the Probability density function with a single gaussian to it from parameters #found using a maximum likelihood fit def plot_single_gaussian(self, fig_num): F = self.parameters_g[0] a = self.parameters_g[1] mu = self.parameters_g[2] st = self.parameters_g[3] plt.figure(fig_num) ax1 = plt.subplot(211) exp_y = self.gauss(self.bins[0:self.num_bins], F, a, mu, st) #finds y for full model gauss_y = (1 - F) * self.single_gaussian.single_gaussian( self.bins[0:self.num_bins], mu, st) #finds y for just gaussian part expo_y = F * self.exponential( self.bins[0:self.num_bins], a) #finds y for exponential background decay self.expy_single = exp_y plt.xlabel(self.xlabel) plt.ylabel("Probability Density per " + str(round_digits(self.bin_width, 2)) + " " + self.units) plt.title("Probability Density Function with Single Gaussian Fit:\n " + self.title) self.prob, bins, patches = plt.hist(self.data, self.num_bins, self.limits, density=True, stacked=True) ax1.plot(self.bins[0:self.num_bins], self.expy_single, label="Full Single Gaussian Model Fit") ax1.plot(self.bins[0:self.num_bins], gauss_y, label="Single Gaussian Part") ax1.plot(self.bins[0:self.num_bins], expo_y, label="Exponential Background Decay") ax1.legend() return self.expy_single #method that plots a histogram of the Probability density function (PDF) with a double gaussian to it from parameters #found using a maximum likelihood fit def plot_double_gaussian(self, fig_num): F = self.parameters_2g[0] a = self.parameters_2g[1] mu_1 = self.parameters_2g[2] st_1 = self.parameters_2g[3] st_2 = self.parameters_2g[4] Q = self.parameters_2g[5] plt.figure(fig_num) ax1 = plt.subplot(211) print(self.bin_width) self.expy_double = self.gauss_two( self.bins[0:self.num_bins], F, a, mu_1, st_1, st_2, Q) #finds y for full double gaussian model gauss1_y = (1 - F) * Q * self.single_gaussian.single_gaussian( self.bins[0:self.num_bins], mu_1, st_1) #finds y for narrow gaussian gauss2_y = (1 - F) * (1 - Q) * self.single_gaussian.single_gaussian( self.bins[0:self.num_bins], mu_1, st_2) #finds y for wide gaussian expo_y = F * self.exponential( self.bins[0:self.num_bins], a) #finds y for exponential background decay plt.xlabel(self.xlabel) plt.ylabel("Probability Density per " + str(round_digits(self.bin_width, 2)) + " " + self.units) plt.title("Probability Density Function with Double Gaussian Fit\n: " + self.title) n, bins, patches = plt.hist(self.data, self.num_bins, self.limits, density=True, stacked=True) ax1.plot(self.bins[0:self.num_bins], self.expy_double, label="Full Double Gaussian Model Fit") ax1.plot(self.bins[0:self.num_bins], gauss1_y, label="Narrow Gaussian Fit") ax1.plot(self.bins[0:self.num_bins], gauss2_y, label="Wide Gaussian Fit") ax1.plot(self.bins[0:self.num_bins], expo_y, label="Exponential Background Decay") ax1.legend() return self.expy_double #method that plots the residuals between the PDF and a given y set of data def plot_residuals(self, y): ax2 = plt.subplot(212) residuals = self.prob - y plt.xlabel(self.xlabel) plt.ylabel(str("Residuals of " + self.ylabel)) plt.title(str("Plot of the Residuals versus " + self.xlabel)) ax2.scatter(self.bins[0:self.num_bins], residuals) plt.tight_layout()
def fetch(self): """Returns the Gaussian distribution calculated so far.""" ret = Gaussian(self.mean.shape[0]) ret.setMean(self.mean) ret.setCovariance(self.scatter/float(self.n)) return ret