def __init__(s, centroid, stddev): # Probability map, centroid, stddev all in N-E coordinates from origin at lower right s.pmap = n.ndarray((ARRAY_SIZE, ARRAY_SIZE), buffer=n.ones(ARRAY_SIZE**2)) s.centroid, s.stddev = centroid, stddev s.update(lambda n, e: gaussian(0, s.stddev) (toPolar(n - s.centroid[0], e - s.centroid[1])[0]))
def execute(s): tpmap = n.ndarray((ARRAY_SIZE, ARRAY_SIZE), buffer=n.zeros(ARRAY_SIZE**2)) ncentroid = (s.obj.centroid[0] + s.deltapos[0], s.obj.centroid[1] + s.deltapos[1]) tpmaps = 0. ''' # Full Gaussian redistribution. 500^4 iterations --> too slow. Optimize, C, OpenCL? for r in range(len(s.obj.pmap)): for c in range(len(s.obj.pmap[0])): ar = int(round(r - s.deltapos[0] / ARRAY_SCALE)) ac = int(round(c - s.deltapos[1] / ARRAY_SCALE)) for tr in range(ARRAY_SIZE): for tc in range(ARRAY_SIZE): tpmap[tr][tc] += gaussian(0, s.unc)(toPolar((tr - ar)*ARRAY_SCALE, (tc - ac)*ARRAY_SCALE)[0]) * s.obj.pmap[ar][ac] tpmaps += tpmap[tr][tc] print('rc {0}:{1}'.format(r, c)) ''' # Hacky version. Shifts array, then estimates other points as Gaussian from new centroid. for r in range(ARRAY_SIZE): for c in range(ARRAY_SIZE): ar = int(round(r - s.deltapos[0] / ARRAY_SCALE)) ac = int(round(c - s.deltapos[1] / ARRAY_SCALE)) try: tpmap[r][c] = s.obj.pmap[ar][ac] except: tpmap[r][c] = gaussian(0, s.obj.stddev)(toPolar( ncentroid[0] - r * ARRAY_SCALE, ncentroid[1] - c * ARRAY_SCALE)[0]) tpmaps += tpmap[r][c] tpmap /= tpmaps s.obj.pmap = tpmap s.obj.update(lambda n, e: 1.)
def execute(s): tpmap = n.ndarray((ARRAY_SIZE, ARRAY_SIZE), buffer=n.zeros(ARRAY_SIZE**2)) ncentroid = (s.obj.centroid[0] + s.deltapos[0], s.obj.centroid[1] + s.deltapos[1]) tpmaps = 0. ''' # Full Gaussian redistribution. 500^4 iterations --> too slow. Optimize, C, OpenCL? for r in range(len(s.obj.pmap)): for c in range(len(s.obj.pmap[0])): ar = int(round(r - s.deltapos[0] / ARRAY_SCALE)) ac = int(round(c - s.deltapos[1] / ARRAY_SCALE)) for tr in range(ARRAY_SIZE): for tc in range(ARRAY_SIZE): tpmap[tr][tc] += gaussian(0, s.unc)(toPolar((tr - ar)*ARRAY_SCALE, (tc - ac)*ARRAY_SCALE)[0]) * s.obj.pmap[ar][ac] tpmaps += tpmap[tr][tc] print('rc {0}:{1}'.format(r, c)) ''' # Hacky version. Shifts array, then estimates other points as Gaussian from new centroid. for r in range(ARRAY_SIZE): for c in range(ARRAY_SIZE): ar = int(round(r - s.deltapos[0] / ARRAY_SCALE)) ac = int(round(c - s.deltapos[1] / ARRAY_SCALE)) try: tpmap[r][c] = s.obj.pmap[ar][ac] except: tpmap[r][c] = gaussian(0, s.obj.stddev)(toPolar(ncentroid[0] - r*ARRAY_SCALE, ncentroid[1] - c*ARRAY_SCALE)[0]) tpmaps += tpmap[r][c] tpmap /= tpmaps s.obj.pmap = tpmap s.obj.update(lambda n, e: 1.)
def __init__(self, vertex_shader, fragment_shader): super(GaussianBlurProgram, self).__init__(vertex_shader, fragment_shader) weights = np.zeros(5) sigma2 = 4.0 weights[0] = gaussian(0,0,sigma2) sum_weights = weights[0] for i in range(1, 5): weights[i] = gaussian(i, 0, sigma2) sum_weights += 2 * weights[i] weights = [x/sum_weights for x in weights] with self: for i in range(5): weight = weights[i] weight_location = glGetUniformLocation(self.program, 'u_weights[%d]' % i) glUniform1f(weight_location, weight)
def getWeighted(self,mag,pitches): """ Creates the mask with the shape of \"mag\" and the pitches in \"pitches\" and having gaussians centered in the frequency bands """ filtered = np.zeros((self.ninst,mag.shape[0],mag.shape[1])) for j in range(self.ninst): #for all the inputed instrument pitches for p in range(len(pitches[j])): #for each pitch contour for t in range(len(pitches[j,p])): if pitches[j,p,t] > 0: slices_y = util.slicefft_slices(pitches[j,p,t],size=(mag.shape[-1]-1)*2,interval=self.interval,tuning_freq=self.tuning_freq,nharmonics=self.nharmonics,fmin=self.fmin,fmax=self.fmax,iscale=self.iscale,sampleRate=self.sampleRate) gss = [util.gaussian(np.linspace(-1,1,slices_y[k].stop-slices_y[k].start), 1/(slices_y[k].stop-slices_y[k].start), (slices_y[k].stop-slices_y[k].start)) for k in range(len(slices_y))] for k in range(len(slices_y)): filtered[j,t,slices_y[k]] = filtered[j,t,slices_y[k]] + (gss[k]-min(gss[k]))/(max(gss[k])-min(gss[k]))*self.harmonics[j,int(pitches[j,p,t]),k] gss = None slices_y = None filtered /= np.expand_dims(np.maximum(1e-18,filtered.max(axis=2)),axis=2) mask = np.zeros((mag.shape[0],self.ninst*mag.shape[1])) for j in range(self.ninst): #for all the inputed instrument pitches mask[:,j*mag.shape[1]:(j+1)*mag.shape[1]] = filtered[j,:,:] filtered = None j=None p=None t=None k=None return mask
def __add_in_distrurbance(self, t, tau, pose): """Adds distrubance to end effector. """ angle = self.gamma o = np.asmatrix([[0], [0]]) fd = np.asmatrix([[gaussian(t, self.a, self.t0, self.sigma)], [0]]) fd = rotate(o, fd, angle) return tau + self.JtT.subs(pose) * fd
def lidar_weight(self, measurements): """ Determines the likelihood of the particle being close to the robot based on the robot's lidar measurements. """ p = 1.0 for i, d in enumerate(self.lidar_measurements): if d is None: raise RuntimeError("Must compute lidar measurement before calling lidar_weight") p *= util.gaussian(d, self.sensor_noise, measurements[i]) return p
def run(): netG.train() netD.train() for epoch in range(epochs): for i, data in enumerate(trainloader): input = Variable(data).to(device) input_z = util.gaussian(input, 1, 0, sigma**2) # add noise to input # == update Discriminator == netD.zero_grad() Gz = netG(input_z) # G(z) D_Gz = netD(Gz.detach()) # D(G(z)), detach to avoid inplace error Dx = netD(input) # D(x) D_fake_loss = adver_loss_fn(torch.squeeze(D_Gz), fake) D_real_loss = adver_loss_fn(torch.squeeze(Dx), real) D_loss = (D_real_loss + D_fake_loss) * 0.5 D_loss.backward(retain_graph=True) optimD.step() # == update Generator == netG.zero_grad() D_Gz = netD(Gz) # ! DO NOT REUSE netD(Gz.detach()) recon_loss = recon_loss_fn(Gz, input) G_adver_loss = adver_loss_fn(torch.squeeze(D_Gz), real) G_loss = (1 - lamb) * recon_loss + lamb * G_adver_loss G_loss.backward() optimG.step() if i % 10 == 9: print('{}: Epoch {}/{} Batch {}/{}'.\ format(dataset, epoch+1, epochs, i+1, len(trainloader)), 'Recon.: {:.6f}'.format(recon_loss.item()), 'D.: {:.6f}'.format(D_loss.item()), 'G.: {:.6f}'.format(G_loss.item())) schedulerD.step() schedulerG.step() torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (log_dir, epoch + 1)) torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (log_dir, epoch + 1)) print('Training done')
def __init__(self, kernel_size=5, sigma=2, pad=None, device='cuda'): super(GaussBlur3d, self).__init__() try: kernel_size[0] except TypeError: kernel_size = (kernel_size, kernel_size, kernel_size) kernel = util.gaussian(kernel_size, sigma, device=device) kernel = kernel.view(1, 1, kernel_size[0], kernel_size[1], kernel_size[2]) if pad is None: pad = util.pad(kernel_size) self.pad = pad[::-1] self.weights = kernel
def __init__(self, position, flux, sigma, shape): """ A simulated observation of a star. Parameters ---------- position : tuple The pixel coordinates of the star. flux : float Some measure of the brightness of the star in the image sigma : float Some measure of the spread of the star -- seeing + PSF """ self.x0, self.y0 = position self.flux = flux self.sigma = sigma self.data = util.gaussian(flux=self.flux, position=(self.x0,self.y0), sigma=self.sigma, shape=shape) logger.debug("Created new DCStar : {}".format(self))
def bDistance(s, n, e): return gaussian(s.dist, s.distunc)( toPolar(n - s.objfrom.centroid[0], e - s.objfrom.centroid[1])[0]) + UPDATE_FLOOR
def bHeading(s, n, e): return gaussian(s.heading, s.headingunc)(toPolar( n - s.objfrom.centroid[0], e - s.objfrom.centroid[1])[1]) + UPDATE_FLOOR
def bDistance(s, n, e): return gaussian(s.dist, s.distunc)(toPolar(n - s.objfrom.centroid[0], e - s.objfrom.centroid[1])[0]) + UPDATE_FLOOR
def bHeading(s, n, e): return gaussian(s.heading, s.headingunc)(toPolar(n - s.objfrom.centroid[0], e - s.objfrom.centroid[1])[1]) + UPDATE_FLOOR
def __init__(s, centroid, stddev): # Probability map, centroid, stddev all in N-E coordinates from origin at lower right s.pmap = n.ndarray((ARRAY_SIZE, ARRAY_SIZE), buffer=n.ones(ARRAY_SIZE**2)) s.centroid, s.stddev = centroid, stddev s.update(lambda n, e: gaussian(0, s.stddev)(toPolar(n - s.centroid[0], e - s.centroid[1])[0]))