def calcProfilV(self, xy): """renvoie les valeurs des vitesses sur une section""" vxvy = self.getMfVitesse() grd = self.parent.aquifere.getFullGrid() x0, y0, dx, dy, nx, ny = grd['x0'], grd['y0'], grd['dx'], grd['dy'], grd[ 'nx'], grd['ny'] x, y = zip(*xy) xl0, xl1 = x[:2] yl0, yl1 = y[:2] dd = min(dx, dy) * .95 dxp, dyp = xl1 - xl0, yl1 - yl0 ld = max(ceil(abs(dxp / dx)), ceil(abs(dyp / dy))) ld = int(ld + 1) ddx = dxp / ld ddy = dyp / ld xp2 = xl0 + arange(ld + 1) * ddx yp2 = yl0 + arange(ld + 1) * ddy ix = floor((xp2 - x0) / dx) ix = clip(ix.astype(int), 0, nx - 1) iy = floor((yp2 - y0) / dy) iy = clip(iy.astype(int), 0, ny - 1) vx = take(ravel(vxvy[0]), iy * nx + ix) vy = take(ravel(vxvy[1]), iy * nx + ix) V = sqrt(vx**2 + vy**2) cu = sqrt((xp2 - xp2[0])**2 + (yp2 - yp2[0])**2) return [cu, V]
def insert_sphere(im, c, r): r""" Inserts a sphere of a specified radius into a given image Parameters ---------- im : array_like Image into which the sphere should be inserted c : array_like The [x, y, z] coordinate indicating the center of the sphere r : int The radius of sphere to insert Returns ------- image : ND-array The original image with a sphere inerted at the specified location """ c = sp.array(c, dtype=int) if c.size != im.ndim: raise Exception('Coordinates do not match dimensionality of image') bbox = [] [bbox.append(sp.clip(c[i] - r, 0, im.shape[i])) for i in range(im.ndim)] [bbox.append(sp.clip(c[i] + r, 0, im.shape[i])) for i in range(im.ndim)] bbox = sp.ravel(bbox) s = bbox_to_slices(bbox) temp = im[s] blank = sp.ones_like(temp) blank[tuple(c - bbox[0:im.ndim])] = 0 blank = spim.distance_transform_edt(blank) < r im[s] = blank return im
def __call__(self, gradient, error): products = self.previous_gradient * gradient signs = sign(gradient) # For positive gradient parts. positive = (products > 0).astype('int8') pos_step = self.step * self.upfactor * positive clip(pos_step, -self.bound, self.bound) pos_update = self.values - signs * pos_step # For negative gradient parts. negative = (products < 0).astype('int8') neg_step = self.step * self.downfactor * negative clip(neg_step, -self.bound, self.bound) if error <= self.previous_error: # If the error has decreased, do nothing. neg_update = zeros(gradient.shape) else: # If it has increased, move back 2 steps. neg_update = self.more_prev_values # Set all negative gradients to zero for the next step. gradient *= positive # Bookkeeping. self.previous_gradient = gradient self.more_prev_values = self.prev_values self.prev_values = self.values.copy() self.previous_error = error # Updates. self.step[:] = pos_step + neg_step self.values[:] = positive * pos_update + negative * neg_update return self.values
def getBeamFluxSpline(beam, plasma, t, lim1, lim2, points=1000): """ generates a spline off of the beampath. Assumes that the change in flux is MONOTONIC""" lim = beam.norm.s beam.norm.s = scipy.linspace(0, lim[-1], points) h = time.time() psi = plasma.eq.rz2rmid(beam.r()[0], beam.r()[2], t) #evaluates all psi's at once print(time.time() - h) outspline = len(t) * [0] inspline = len(t) * [0] for i in range(t.size): temp = lim1 mask = scipy.logical_and(scipy.isfinite(psi[i]), psi[i] < lim2 + .02) try: minpos = scipy.argmin(psi[i][mask]) test = psi[i][mask][minpos] except ValueError: test = lim2 + .03 #plt.plot(beam.x()[0][mask],psi[i][mask]) #plt.show() sizer = psi[i][mask].size if not test > lim2: #plt.plot(beam.x()[0][mask][0:minpos],psi[i][mask][0:minpos],beam.x()[0][mask][minpos:],psi[i][mask][minpos:]) #plt.show() #limout = scipy.insert(lim,(2,2),(beam.norm.s[mask][minpos],beam.norm.s[mask][minpos])) # add minimum flux s for bound testing if lim1 < test: temp = test try: temp1 = scipy.clip( scipy.digitize((lim1, lim2), psi[i][mask][minpos::-1]), 0, minpos) outspline[i] = beam.norm.s[mask][minpos::-1][temp1] except ValueError: tempmask = (psi[i][mask] < lim2)[0] outspline[i] = scipy.array( [beam.norm.s[mask][minpos], beam.norm.s[mask][tempmask]]) try: temp2 = scipy.clip( scipy.digitize((lim1, lim2), psi[i][mask][minpos:]), 0, sizer - minpos - 1) inspline[i] = beam.norm.s[mask][minpos:][temp2] except ValueError: inspline[i] = scipy.array( [beam.norm.s[mask][minpos], beam.norm.s[mask][-1]]) else: outspline[i] = scipy.array([[], []]) inspline[i] = scipy.array([[], []]) return (outspline, inspline)
def normalize(self, sensors): """ The function scales the parameters to be between -1 and 1. e.g. [(-pi, pi), (0, 1), (-0.001, 0.001)] """ assert(len(self.sensor_limits) == len(sensors)) result = [] for l, s in zip(self.sensor_limits, sensors): if not l: result.append(s) else: result.append((s - l[0]) / (l[1] - l[0]) * 2 - 1.0) if self.clipping: clip(result, -1, 1) return asarray(result)
def __init__(self, gridObj, dt, nParticles=1.e11, tBunchSpacing=25.e-9): self.gridObj = gridObj self.nx = gridObj.getNxExt() self.ny = gridObj.getNyExt() self.np = gridObj.getNpExt() self.lx = gridObj.getLxExt() self.ly = gridObj.getLyExt() self.dx = gridObj.getDx() self.dy = gridObj.getDy() self.dt = dt self.nParticles = nParticles self.charge = spc.elementary_charge self.beamVelocity = spc.c self.circumference = 6900 self.radiusSigma = 0.002 self.radiusLimitSigma = 5 self.xBeamCenter = 0. self.yBeamCenter = 0. self.tBunchSpacing = tBunchSpacing self.bunchLengthSigma = 0.1 self.tBunchLengthSigma = self.bunchLengthSigma / self.beamVelocity self.bunchLengthLimitSigma = 5 self.qTransversalProfile = sp.zeros(self.np) xMesh = self.gridObj.getXMesh() yMesh = self.gridObj.getYMesh() xCoords = sp.tile(xMesh, self.ny) yCoords = sp.reshape(sp.tile(yMesh, (self.nx, 1)).transpose(), self.np) beamPoints = ( (self.radiusLimitSigma * self.radiusSigma + max([self.dx, self.dy]))**2 - xCoords**2 - yCoords**2) > 0 self.qTransversalProfile[beamPoints] = ((sps.norm.cdf( sp.clip( (xCoords - self.xBeamCenter + self.dx / 2.) / self.radiusSigma, -self.radiusLimitSigma, self.radiusLimitSigma)) - sps.norm.cdf( sp.clip( (xCoords - self.xBeamCenter - self.dx / 2.) / self.radiusSigma, -self.radiusLimitSigma, self.radiusLimitSigma))) * (sps.norm.cdf( sp.clip( (yCoords - self.yBeamCenter + self.dy / 2.) / self.radiusSigma, -self.radiusLimitSigma, self.radiusLimitSigma)) - sps.norm.cdf( sp.clip( (yCoords - self.yBeamCenter - self.dy / 2.) / self.radiusSigma, -self.radiusLimitSigma, self.radiusLimitSigma))))[beamPoints] self.qTransversalProfile /= sp.sum(self.qTransversalProfile)
def getBeamFluxSpline(beam,plasma,t,lim1,lim2,points = 1000): """ generates a spline off of the beampath. Assumes that the change in flux is MONOTONIC""" lim = beam.norm.s beam.norm.s = scipy.linspace(0,lim[-1],points) h = time.time() psi = plasma.eq.rz2rmid(beam.r()[0],beam.r()[2],t) #evaluates all psi's at once print(time.time()-h) outspline = len(t)*[0] inspline = len(t)*[0] for i in xrange(t.size): temp = lim1 mask = scipy.logical_and(scipy.isfinite(psi[i]),psi[i] < lim2+.02) try: minpos = scipy.argmin(psi[i][mask]) test = psi[i][mask][minpos] except ValueError: test = lim2+.03 #plt.plot(beam.x()[0][mask],psi[i][mask]) #plt.show() sizer = psi[i][mask].size if not test > lim2: #plt.plot(beam.x()[0][mask][0:minpos],psi[i][mask][0:minpos],beam.x()[0][mask][minpos:],psi[i][mask][minpos:]) #plt.show() #limout = scipy.insert(lim,(2,2),(beam.norm.s[mask][minpos],beam.norm.s[mask][minpos])) # add minimum flux s for bound testing if lim1 < test: temp = test try: temp1 = scipy.clip(scipy.digitize((lim1,lim2),psi[i][mask][minpos::-1]),0,minpos) outspline[i] = beam.norm.s[mask][minpos::-1][temp1] except ValueError: tempmask = (psi[i][mask] < lim2)[0] outspline[i] = scipy.array([beam.norm.s[mask][minpos],beam.norm.s[mask][tempmask]]) try: temp2 = scipy.clip(scipy.digitize((lim1,lim2),psi[i][mask][minpos:]),0,sizer-minpos-1) inspline[i] = beam.norm.s[mask][minpos:][temp2] except ValueError: inspline[i] = scipy.array([beam.norm.s[mask][minpos],beam.norm.s[mask][-1]]) else: outspline[i] = scipy.array([[],[]]) inspline[i] = scipy.array([[],[]]) return (outspline,inspline)
def keyPressEvent(self, event): # reimplementation if event.key() == 16777234: # print " left arrow " self.Data_Display.Frame_Visualizer.frame = self.Data_Display.Frame_Visualizer.frame - 1 self.Data_Display.Frame_Visualizer.frame = sp.clip(self.Data_Display.Frame_Visualizer.frame,0,self.Main.Data.nFrames-1) if event.key() == 16777236: # print " right arrow " self.Data_Display.Frame_Visualizer.frame = self.Data_Display.Frame_Visualizer.frame + 1 self.Data_Display.Frame_Visualizer.frame = sp.clip(self.Data_Display.Frame_Visualizer.frame,0,self.Main.Data.nFrames-1) self.Data_Display.Frame_Visualizer.update_frame() self.Data_Display.Traces_Visualizer.update_vline(self.Data_Display.Frame_Visualizer.frame) # one call is enougth because this one calls the other as well
def normalize(self, sensors): """ limits is a list of 2-tuples, one tuple per parameter, giving min and max for that parameter. The function scales the parameters to be between -1 and 1. e.g. [(-pi, pi), (0, 1), (-0.001, 0.001)] """ assert len(self.sensor_limits) == len(sensors) result = [] for l, s in zip(self.sensor_limits, sensors): if not l: result.append(s) else: result.append((s - l[0]) / (l[1] - l[0]) * 2 - 1.0) if self.clipping: clip(result, -1, 1) return result
def corrCoords(self,lcooI): '''change coordinates if they are out of the domain''' g = self.core.dicaddin['Grid'] ex = (float(g['x1'])-float(g['x0']))/1e5 ey = (float(g['y1'])-float(g['y0']))/1e5 lcoord = [] for b in lcooI: try : x = clip(float(b[0]),float(g['x0'])+ex,float(g['x1'])-ex) except ValueError : continue try: y = clip(float(b[1]),float(g['y0'])+ey,float(g['y1'])-ey) except ValueError : continue if len(b)==2: lcoord.append((x,y)) else : lcoord.append((x,y,float(b[2]))) return lcoord
def getReward(self): # calculate reward and return reward reward = self.env.getSensorByName('headPos')[1] / float( self.epiLen) #reward is hight of head #to prevent jumping reward can't get bigger than head position while standing absolut upright reward = clip(reward, -14.0, 4.0) return reward
def ojf(x,s,d,override=False): #print "called ojf: "+str(x) try: x=x.flatten(0) except: pass xlow = [-2.,-2.] xupp = [2.,2.] xthis = [xlow[i]+0.5*(xin+1)*(xupp[i]-xlow[i]) for i,xin in enumerate(x)] hyp = [10**i for i in xthis] print hyp t0=time.clock() llk = sp.clip(GPdc.GP_LKonly(X,Y,S,D,GPdc.kernel(GPdc.MAT52,1,sp.array(hyp))).plk(pm,ps),-1e60,1e60) t1=time.clock() if llk<-1.: out = sp.log(-llk)+1. else: out = -llk print "--->llk: {0} {1} t: {2}".format(llk,out,t1-t0) return [out,t1-t0]
def two_channel_to_color(im): """Converts a two-channel microarray image to a color image, as described in the paper associated with this codebase""" lower = sp.percentile(im, 5) upper = sp.percentile(im, 98) channel_0 = sp.clip((im[:, :, 0] - lower)/(upper - lower), 0, 1) channel_2 = sp.clip((im[:, :, 1] - lower)/(upper - lower), 0, 1) channel_1 = ((channel_0 + channel_2)/2.) im = sp.array((channel_0, channel_1, channel_2)) im = sp.rollaxis(im, 0, 3) im = (255*im).astype(sp.uint8) return im
def __call__(self, x_new): """Find linearly interpolated y_new = <name>(x_new). Inputs: x_new -- New independent variables. Outputs: y_new -- Linearly interpolated values corresponding to x_new. """ # 1. Handle values in x_new that are outside of x. Throw error, # or return a list of mask array indicating the outofbounds values. # The behavior is set by the bounds_error variable. ## RHC -- was x_new = atleast_1d(x_new) x_new_1d = atleast_1d(x_new) out_of_bounds = self._check_bounds(x_new_1d) # 2. Find where in the orignal data, the values to interpolate # would be inserted. # Note: If x_new[n] = x[m], then m is returned by searchsorted. x_new_indices = searchsorted(self.x, x_new_1d) # 3. Clip x_new_indices so that they are within the range of # self.x indices and at least 1. Removes mis-interpolation # of x_new[n] = x[0] # RHC -- changed Int to Numeric_Int to avoid name clash with numarray x_new_indices = clip(x_new_indices, 1, len(self.x) - 1).astype(Numeric_Int) # 4. Calculate the slope of regions that each x_new value falls in. lo = x_new_indices - 1 hi = x_new_indices # !! take() should default to the last axis (IMHO) and remove # !! the extra argument. x_lo = take(self.x, lo, axis=self.interp_axis) x_hi = take(self.x, hi, axis=self.interp_axis) y_lo = take(self.y, lo, axis=self.interp_axis) y_hi = take(self.y, hi, axis=self.interp_axis) slope = (y_hi - y_lo) / (x_hi - x_lo) # 5. Calculate the actual value for each entry in x_new. y_new = slope * (x_new_1d - x_lo) + y_lo # 6. Fill any values that were out of bounds with NaN # !! Need to think about how to do this efficiently for # !! mutli-dimensional Cases. yshape = y_new.shape y_new = y_new.flat new_shape = list(yshape) new_shape[self.interp_axis] = 1 sec_shape = [1] * len(new_shape) sec_shape[self.interp_axis] = len(out_of_bounds) out_of_bounds.shape = sec_shape new_out = ones(new_shape) * out_of_bounds putmask(y_new, new_out.flat, self.fill_value) y_new.shape = yshape # Rotate the values of y_new back so that they correspond to the # correct x_new values. result = swapaxes(y_new, self.interp_axis, self.axis) try: len(x_new) return result except TypeError: return result[0] return result
def two_channel_to_color(im): """Converts a two-channel microarray image to a color image, as described in the paper associated with this codebase""" lower = sp.percentile(im, 5) upper = sp.percentile(im, 98) channel_0 = sp.clip((im[:, :, 0] - lower) / (upper - lower), 0, 1) channel_2 = sp.clip((im[:, :, 1] - lower) / (upper - lower), 0, 1) channel_1 = ((channel_0 + channel_2) / 2.) im = sp.array((channel_0, channel_1, channel_2)) im = sp.rollaxis(im, 0, 3) im = (255 * im).astype(sp.uint8) return im
def ojf(x, s, d, override=False): #print "called ojf: "+str(x) try: x = x.flatten(0) except: pass xlow = [-2., -2.] xupp = [2., 2.] xthis = [ xlow[i] + 0.5 * (xin + 1) * (xupp[i] - xlow[i]) for i, xin in enumerate(x) ] hyp = [10**i for i in xthis] print hyp t0 = time.clock() llk = sp.clip( GPdc.GP_LKonly(X, Y, S, D, GPdc.kernel(GPdc.MAT52, 1, sp.array(hyp))).plk(pm, ps), -1e60, 1e60) t1 = time.clock() if llk < -1.: out = sp.log(-llk) + 1. else: out = -llk print "--->llk: {0} {1} t: {2}".format(llk, out, t1 - t0) return [out, t1 - t0]
def find_dt_artifacts(dt): r""" Finds points in a distance transform that are closer to wall than solid. These points could *potentially* be erroneously high since their distance values do not reflect the possibility that solid may have been present beyond the border of the image but lost by trimming. Parameters ---------- dt : ND-array The distance transform of the phase of interest Returns ------- image : ND-array An ND-array the same shape as ``dt`` with numerical values indicating the maximum amount of error in each volxel, which is found by subtracting the distance to nearest edge of image from the distance transform value. In other words, this is the error that would be found if there were a solid voxel lurking just beyond the nearest edge of the image. Obviously, voxels with a value of zero have no error. """ temp = sp.ones(shape=dt.shape) * sp.inf for ax in range(dt.ndim): dt_lin = distance_transform_lin(sp.ones_like(temp, dtype=bool), axis=ax, mode='both') temp = sp.minimum(temp, dt_lin) result = sp.clip(dt - temp, a_min=0, a_max=sp.inf) return result
def plotHeatmap(fwrap, aclass, algoparams, trials, maxsteps): """ Visualizing performance across trials and across time (iterations in powers of 2) """ psteps = int(log2(maxsteps)) + 1 storesteps = [0] + [2 ** x for x in range(psteps)] ls = lossTraces(fwrap, aclass, dim=trials, maxsteps=maxsteps, storesteps=storesteps, algoparams=algoparams, minLoss=1e-10) initv = mean(ls[0]) maxgain = exp(fwrap.stochfun.maxLogGain(maxsteps) + 1) maxneggain = (sqrt(maxgain)) M = zeros((psteps, trials)) for sid in range(psteps): # skip the initial values winfactors = clip(initv / ls[sid+1], 1. / maxneggain, maxgain) winfactors[isnan(winfactors)] = 1. / maxneggain M[sid, :] = log10(sorted(winfactors)) pylab.imshow(M.T, interpolation='nearest', cmap=cm.RdBu, #@UndefinedVariable aspect=psteps / float(trials) / 1, vmin= -log10(maxgain), vmax=log10(maxgain), ) pylab.xticks([]) pylab.yticks([]) return ls
def getReward(self): if self.epiStep == self.epiLen / 3 or self.epiStep == 2 * self.epiLen / 3 or self.epiStep == self.epiLen: self.reward[0] = clip(160.0 * (1.0 - self.rawReward), 0.0, 160.0) - self.getPain() else: self.reward[0] = -self.getPain() return self.reward[0]
def plotHeatmap(fwrap, aclass, algoparams, trials, maxsteps): """ Visualizing performance across trials and across time (iterations in powers of 2) """ psteps = int(log2(maxsteps)) + 1 storesteps = [0] + [2**x for x in range(psteps)] ls = lossTraces(fwrap, aclass, dim=trials, maxsteps=maxsteps, storesteps=storesteps, algoparams=algoparams, minLoss=1e-10) initv = mean(ls[0]) maxgain = exp(fwrap.stochfun.maxLogGain(maxsteps) + 1) maxneggain = (sqrt(maxgain)) M = zeros((psteps, trials)) for sid in range(psteps): # skip the initial values winfactors = clip(initv / ls[sid + 1], 1. / maxneggain, maxgain) winfactors[isnan(winfactors)] = 1. / maxneggain M[sid, :] = log10(sorted(winfactors)) pylab.imshow( M.T, interpolation='nearest', cmap=cm.RdBu, #@UndefinedVariable aspect=psteps / float(trials) / 1, vmin=-log10(maxgain), vmax=log10(maxgain), ) pylab.xticks([]) pylab.yticks([]) return ls
def normalize_inputs(inputs,names,clip=False): normf = dict(phi=(0.0,s.pi), umu=(0.5,1.0), aod=(0.0,0.5), h2o=(0.0,2.5), albedo=(0.0,1.0)) assert(inputs.shape[1]==len(names)) outputs = inputs.copy() for i,namev in enumerate(names): name = namev.lower() if name in normf: vmin,vmax = normf[name] elif name.startswith('ao'): vmin,vmax = normf['aod'] elif 'h2o' in name: vmin,vmax = normf['h2o'] elif name.startswith('alb'): vmin,vmax = normf['albedo'] else: raise Exception('Unknown input: '+namev) outputs[:,i] = (outputs[:,i]-vmin) / (vmax-vmin) if outputs.min() < 0 or outputs.max() > 1: if clip: warn('clipping normalized inputs outside the [0,1] range') outputs = s.clip(outputs,0.0,1.0) else: warn('normalized inputs exist that are outside [0,1] range') return outputs
def pso(func, nswarm, lbound, ubound, vmax, args=(), maxiter=1000, cp=2.0, cg=2.0): ndim = len(lbound) lbound = sp.asarray(lbound) ubound = sp.asarray(ubound) vmax = sp.asarray(vmax) # initialize the swarm swarm = lbound + sp.rand(nswarm, ndim)*(ubound-lbound) # initialize the "personal best" values pbestv = sp.zeros(nswarm, sp.Float) for i in sp.arange(nswarm): pbestv[i] = func(swarm[i]) pbest = sp.array(swarm) # initialize the "global best" values gbesti = sp.argmin(pbestv) gbestv = pbestv[gbesti] gbest = pbest[gbesti] # initialize velocities velocities = 2*vmax*sp.randn(nswarm, ndim) - vmax for i in sp.arange(maxiter): values = sp.zeros(nswarm, sp.Float) for j in sp.arange(nswarm): values[j] = func(swarm[j]) mask = values < pbestv mask2d = sp.repeat(mask, ndim) mask2d.shape = (nswarm, ndim) pbestv = sp.where(mask, values, pbestv) pbest = sp.where(mask2d, swarm, pbest) if sp.minimum.reduce(pbestv) < gbestv: gbesti = sp.argmin(pbestv) gbestv = pbestv[gbesti] gbest = pbest[gbesti] velocities += (cp*sp.rand()*(pbest - swarm) + cg*sp.rand()*(gbest - swarm)) velocities = sp.clip(velocities, -vmax, vmax) swarm += velocities swarm = sp.clip(swarm, lbound, ubound) yield gbest
def __call__(self,x_new): """Find linearly interpolated y_new = <name>(x_new). Inputs: x_new -- New independent variables. Outputs: y_new -- Linearly interpolated values corresponding to x_new. """ # 1. Handle values in x_new that are outside of x. Throw error, # or return a list of mask array indicating the outofbounds values. # The behavior is set by the bounds_error variable. ## RHC -- was x_new = atleast_1d(x_new) x_new_1d = atleast_1d(x_new) out_of_bounds = self._check_bounds(x_new_1d) # 2. Find where in the orignal data, the values to interpolate # would be inserted. # Note: If x_new[n] = x[m], then m is returned by searchsorted. x_new_indices = searchsorted(self.x,x_new_1d) # 3. Clip x_new_indices so that they are within the range of # self.x indices and at least 1. Removes mis-interpolation # of x_new[n] = x[0] # RHC -- changed Int to Numeric_Int to avoid name clash with numarray x_new_indices = clip(x_new_indices,1,len(self.x)-1).astype(Numeric_Int) # 4. Calculate the slope of regions that each x_new value falls in. lo = x_new_indices - 1; hi = x_new_indices # !! take() should default to the last axis (IMHO) and remove # !! the extra argument. x_lo = take(self.x,lo,axis=self.interp_axis) x_hi = take(self.x,hi,axis=self.interp_axis) y_lo = take(self.y,lo,axis=self.interp_axis) y_hi = take(self.y,hi,axis=self.interp_axis) slope = (y_hi-y_lo)/(x_hi-x_lo) # 5. Calculate the actual value for each entry in x_new. y_new = slope*(x_new_1d-x_lo) + y_lo # 6. Fill any values that were out of bounds with NaN # !! Need to think about how to do this efficiently for # !! mutli-dimensional Cases. yshape = y_new.shape y_new = y_new.flat new_shape = list(yshape) new_shape[self.interp_axis] = 1 sec_shape = [1]*len(new_shape) sec_shape[self.interp_axis] = len(out_of_bounds) out_of_bounds.shape = sec_shape new_out = ones(new_shape)*out_of_bounds putmask(y_new, new_out.flat, self.fill_value) y_new.shape = yshape # Rotate the values of y_new back so that they correspond to the # correct x_new values. result = swapaxes(y_new,self.interp_axis,self.axis) try: len(x_new) return result except TypeError: return result[0] return result
def getReward(self): # calculate reward and return reward if self.count < 800: return 0.0 else: reward = self.env.getSensorByName('SpecificBodyPositionSensor8')[1] / float(self.epiLen - 800) #reward is hight of head #to prevent jumping reward can't get bigger than head position while standing absolut upright reward = clip(reward, -14.0, 4.0) return reward
def performAction(self, action): """ a filtered mapping towards performAction of the underlying environment. """ # scaling self.incStep() action = (action + 1.0) / 2.0 * self.dif + self.env.fraktMin * self.env.dists[0] #Clipping the maximal change in actions (max force clipping) action = clip(action, self.action - self.maxSpeed, self.action + self.maxSpeed) EpisodicTask.performAction(self, action) self.action = action.copy()
def _boltzmannProbs(qvalues, temperature=1.): if temperature == 0: tmp = zeros(len(qvalues)) tmp[r_argmax(qvalues)] = 1. else: tmp = qvalues / temperature tmp -= max(tmp) tmp = exp(clip(tmp, -20, 0)) return tmp / sum(tmp)
def late_filling(target, pressure='pore.pressure', Pc_star='pore.pc_star', Swp_star=0.2, eta=3): r""" Calculates the fraction of a pore or throat filled with invading fluid based on the capillary pressure in the invading phase. The invading phase volume is calculated from: .. math:: S_{nwp} = 1 - S_{wp}^{*} (P^{*}/P_{c})^{\eta} Parameters ---------- pressure : string The capillary pressure in the non-wetting phase (Pc > 0). Pc_star : string The minimum pressure required to create an interface within the pore body or throat. Typically this would be calculated using the Washburn equation. Swp_star : float The residual wetting phase in an invaded pore or throat at a pressure of ``pc_star``. eta : float Exponent controlling the rate at which wetting phase is displaced with increasing pressure. Returns ------- An array containing the fraction of each pore or throat that would be filled with non-wetting phase at the given phase pressure. This does not account for whether or not the element is actually invaded, which requires a percolation algorithm of some sort. """ element = pressure.split('.')[0] network = target.project.network phase = target.project.find_phase(target) pc_star = phase[Pc_star] Pc = phase[pressure] # Remove any 0's from the Pc array to prevent numpy div by 0 warning Pc = sp.maximum(Pc, 1e-9) Swp = Swp_star * ((pc_star / Pc)**eta) values = sp.clip(1 - Swp, 0.0, 1.0) # Now map element onto target object if element == 'throat': Ts = network.map_throats(throats=target.Ts, origin=target) values = values[Ts] else: Ps = network.map_pores(pores=target.Ps, origin=target) values = values[Ps] return values
def polydisperse_spheres(shape: List[int], porosity: float, dist, nbins: int = 5, r_min: int = 5): r""" Create an image of randomly place, overlapping spheres with a distribution of radii. Parameters ---------- shape : list The size of the image to generate in [Nx, Ny, Nz] where Ni is the number of voxels in each direction. If shape is only 2D, then an image of polydisperse disks is returns porosity : scalar The porosity of the image, defined as the number of void voxels divided by the number of voxels in the image. The specified value is only matched approximately, so it's suggested to check this value after the image is generated. dist : scipy.stats distribution object This should be an initialized distribution chosen from the large number of options in the ``scipy.stats`` submodule. For instance, a normal distribution with a mean of 20 and a standard deviation of 10 can be obtained with ``dist = scipy.stats.norm(loc=20, scale=10)`` nbins : scalar The number of discrete sphere sizes that will be used to generate the image. This function generates ``nbins`` images of monodisperse spheres that span 0.05 and 0.95 of the possible values produced by the provided distribution, then overlays them to get polydispersivity. Returns ------- image : ND-array A boolean array with ``True`` values denoting the pore space """ shape = sp.array(shape) if sp.size(shape) == 1: shape = sp.full((3, ), int(shape)) Rs = dist.interval(sp.linspace(0.05, 0.95, nbins)) Rs = sp.vstack(Rs).T Rs = (Rs[:-1] + Rs[1:]) / 2 Rs = sp.clip(Rs.flatten(), a_min=r_min, a_max=None) phi_desired = 1 - (1 - porosity) / (len(Rs)) im = sp.ones(shape, dtype=bool) for r in Rs: phi_im = im.sum() / sp.prod(shape) phi_corrected = 1 - (1 - phi_desired) / phi_im temp = overlapping_spheres(shape=shape, radius=r, porosity=phi_corrected) im = im * temp return im
def keyPressEvent(self, event): # reimplementation if event.key() == 16777234: # print " left arrow " self.Data_Display.Frame_Visualizer.frame = self.Data_Display.Frame_Visualizer.frame - 1 self.Data_Display.Frame_Visualizer.frame = sp.clip( self.Data_Display.Frame_Visualizer.frame, 0, self.Main.Data.nFrames - 1) if event.key() == 16777236: # print " right arrow " self.Data_Display.Frame_Visualizer.frame = self.Data_Display.Frame_Visualizer.frame + 1 self.Data_Display.Frame_Visualizer.frame = sp.clip( self.Data_Display.Frame_Visualizer.frame, 0, self.Main.Data.nFrames - 1) self.Data_Display.Frame_Visualizer.update_frame() self.Data_Display.Traces_Visualizer.update_vline( self.Data_Display.Frame_Visualizer.frame ) # one call is enougth because this one calls the other as well
def getCharge(self, t): conditions = sp.mod( t, self.tBunchSpacing ) < 2 * self.bunchLengthLimitSigma * self.tBunchLengthSigma if conditions: tRed = sp.mod(t, self.tBunchSpacing) temp = sps.norm.cdf( sp.clip( (tRed + self.dt / 2.) / self.tBunchLengthSigma - self.bunchLengthLimitSigma, -self.bunchLengthLimitSigma, self.bunchLengthLimitSigma)) - sps.norm.cdf( sp.clip( (tRed - self.dt / 2.) / self.tBunchLengthSigma - self.bunchLengthLimitSigma, -self.bunchLengthLimitSigma, self.bunchLengthLimitSigma)) return temp * self.nParticles / self.dt / self.beamVelocity * self.charge * self.qTransversalProfile else: return 0
def prob(self, position, clip=True): if self.distance is not None: z, ra, dec = position logpdf = self.kernel.score_samples(self.get_position(position)) toret = self.norm * scipy.exp(logpdf) if clip: toret = scipy.clip(toret, 0., 1.) if self.distance is not None and self.zrange is not None: mask = (z >= self.zrange[0]) & (z <= self.zrange[-1]) toret[~mask] = 0. return toret
def set_interp(self): prob = scipy.clip(self.norm * self.nbar, 0., 1.) self.interp = interpolate.RectBivariateSpline( self.z, self.other, prob, bbox=[None, None, None, None], kx=3, ky=3, s=0)
def getReward(self): if self.epiStep < self.epiLen: if self.rawReward < 0.5: self.reward[0] = (0.5 - self.rawReward) * 1.0 - self.getPain() else: self.reward[0] = -self.getPain() else: self.reward[0] = clip(160.0 * (1.0 - self.rawReward), 0.0, 160.0) - self.getPain() return self.reward[0]
def _compose_multi(imgs): _imgs = [] for i in range(len(imgs)): _imgs.append([]) for j in range(imgs[i].shape[0]): _imgs[i].append(imgs[i][j]) _imgs[i] = sp.concatenate(_imgs[i], 1) _rv = sp.concatenate(_imgs, 0) _rv = sp.clip(_rv, 0, 1) return _rv
def pointchargePot(x,y,charge=1,scale=1): from scipy import sqrt,pi,clip size_x = x.max() size_y = y.max() Vbottom = 0 x = x-size_x/2 left_charge = 1/sqrt(x**2+y**2) right_charge = 1/sqrt(x**2+(y-size_y)**2) V = Vbottom +p.q*charge/(p.a*4*pi*p.eps0*p.epsr)*(left_charge+right_charge) V = clip(V,0,scale) return V
def calcProfilV(self,xy): """renvoie les valeurs des vitesses sur une section""" vxvy = self.getMfVitesse() grd = self.parent.aquifere.getFullGrid() x0,y0,dx,dy,nx,ny = grd['x0'],grd['y0'],grd['dx'],grd['dy'],grd['nx'],grd['ny'] x,y = zip(*xy) xl0, xl1 = x[:2] yl0, yl1 = y[:2] dd = min(dx,dy)*.95;dxp, dyp = xl1-xl0, yl1-yl0 ld = max(ceil(abs(dxp/dx)),ceil(abs(dyp/dy))) ld = int(ld+1); ddx = dxp/ld; ddy = dyp/ld xp2 = xl0+arange(ld+1)*ddx yp2 = yl0+arange(ld+1)*ddy ix = floor((xp2-x0)/dx);ix=clip(ix.astype(int),0,nx-1) iy = floor((yp2-y0)/dy);iy=clip(iy.astype(int),0,ny-1) vx = take(ravel(vxvy[0]),iy*nx+ix) vy = take(ravel(vxvy[1]),iy*nx+ix) V = sqrt(vx**2+vy**2) cu = sqrt((xp2-xp2[0])**2+(yp2-yp2[0])**2) return [cu,V]
def late_filling(target, pressure='pore.pressure', Pc_star='pore.pc_star', Swp_star=0.2, eta=3): r""" Calculates the fraction of a pore or throat filled with invading fluid based on the capillary pressure in the invading phase. The invading phase volume is calculated from: .. math:: S_{nwp} = 1 - S_{wp}^{*} (P^{*}/P_{c})^{\eta} Parameters ---------- pressure : string The capillary pressure in the non-wetting phase (Pc > 0). Pc_star : string The minimum pressure required to create an interface within the pore body or throat. Typically this would be calculated using the Washburn equation. Swp_star : float The residual wetting phase in an invaded pore or throat at a pressure of ``pc_star``. eta : float Exponent controlling the rate at which wetting phase is displaced with increasing pressure. Returns ------- An array containing the fraction of each pore or throat that would be filled with non-wetting phase at the given phase pressure. This does not account for whether or not the element is actually invaded, which requires a percolation algorithm of some sort. """ element = pressure.split('.')[0] network = target.project.network phase = target.project.find_phase(target) pc_star = phase[Pc_star] Pc = phase[pressure] # Remove any 0's from the Pc array to prevent numpy div by 0 warning Pc = sp.maximum(Pc, 1e-9) Swp = Swp_star*((pc_star/Pc)**eta) values = sp.clip(1 - Swp, 0.0, 1.0) # Now map element onto target object if element == 'throat': Ts = network.map_throats(throats=target.Ts, origin=target) values = values[Ts] else: Ps = network.map_pores(pores=target.Ps, origin=target) values = values[Ps] return values
def _compose(orig, recon): _imgo = [] _imgr = [] for i in range(orig.shape[0]): _imgo.append(orig[i]) for i in range(orig.shape[0]): _imgr.append(recon[i]) _imgo = sp.concatenate(_imgo, 1) _imgr = sp.concatenate(_imgr, 1) _rv = sp.concatenate([_imgo, _imgr], 0) _rv = sp.clip(_rv, 0, 1) return _rv
def pointchargePot(x, y, charge=1, scale=1): from scipy import sqrt, pi, clip size_x = x.max() size_y = y.max() Vbottom = 0 x = x - size_x / 2 left_charge = 1 / sqrt(x**2 + y**2) right_charge = 1 / sqrt(x**2 + (y - size_y)**2) V = Vbottom + p.q * charge / (p.a * 4 * pi * p.eps0 * p.epsr) * (left_charge + right_charge) V = clip(V, 0, scale) return V
def scroll_event(self,event): """ changes width of slice """ if event.button == 'up': self.width += 2 if event.button == 'down': self.width -= 2 self.width = sp.clip(self.width,1,self.nPlaces) self.xs = self.calc_x(self.pos,self.width) self.Rect.set_xy((self.xs[0] ,0)) self.Rect.set_width(self.width) self.update()
def fitFunc(positions, A, x0, sigmax, y0, sigmay, B, clipValue): """data is a 2 row array of positions e.g. 0 1 2 3 4 5 0 1 2 3 4 5... 0 0 0 0 0 0 1 1 1 1 1 1... so data[0] is x data[1] is y """ #integration limits +- 1000 pixels return scipy.clip( vectorisedIntegral(positions[0], positions[1], A, x0, sigmax, y0, sigmay), 0, clipValue)
def denormalize(self, actors): """ The function scales the parameters from -1 and 1 to the given interval (min, max) for each actor. """ assert(len(self.actor_limits) == len(actors)) result = [] for l, a in zip(self.actor_limits, actors): if not l: result.append(a) else: r = (a + 1.0) / 2 * (l[1] - l[0]) + l[0] if self.clipping: r = clip(r, l[0], l[1]) result.append(r) return result
def get_color(listing, listings, f, cm): price = f(listing) prices = [f(l) for l in listings] lower = sp.percentile(prices, 10) upper = sp.percentile(prices, 90) relative_price = (price - lower)/(upper - lower) color = cm(sp.clip(relative_price, 0, 1)) is_dark = sum(color[:3])/4 < 0.4 background_color = tuple([int(255*c) for c in color[:3]]) text_color = (230, 230, 230) if is_dark else (50, 50, 50) return background_color, text_color
def denormalize(self, actors): """ limits is a list of 2-tuples, one tuple per parameter, giving min and max for that parameter. The function scales the parameters from -1 and 1 to the given interval (min, max) for each actor. """ assert len(self.actor_limits) == len(actors) result = [] for l, a in zip(self.actor_limits, actors): if not l: result.append(a) else: r = (a + 1.0) / 2 * (l[1] - l[0]) + l[0] if self.clipping: r = clip(r, l[0], l[1]) result.append(r) return result
def compute_utilities(self, fitnesses): n_fitnesses = fitnesses.shape[0] ranks = scipy.zeros_like(fitnesses) l = sorted(enumerate(fitnesses), key=lambda x: x[1]) for i, (j, _) in enumerate(l): ranks[j] = i # smooth reshaping # If we do not cast to float64 here explicitly, scipy will at random # points crash with a weird AttributeError. utilities = -scipy.log((n_fitnesses - ranks).astype("float64")) utilities += scipy.log(n_fitnesses / 2.0 + 1.0) utilities = scipy.clip(utilities, 0, float("inf")) utilities /= utilities.sum() # make the utilities sum to 1 utilities -= 1.0 / n_fitnesses # baseline return utilities
def percolating_continua(target, phi_crit, tau, volume_fraction='pore.volume_fraction', bulk_property='pore.intrinsic_conductivity'): r''' Calculates the effective property of a continua using percolation theory Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. volume_fraction : string The dictionary key in the Phase object containing the volume fraction of the conducting component bulk_property : string The dictionary key in the Phase object containing the intrinsic property of the conducting component phi_crit : float The volume fraction below which percolation does NOT occur tau : float The exponent of the percolation relationship Returns ------- sigma_eff : NumPy ndarray Array containing effective electrical conductivity values. Notes ----- This model uses the following standard percolation relationship: .. math:: \sigma_{effective}=\sigma_{bulk}(\phi - \phi_{critical})^\lambda ''' sigma = target[bulk_property] phi = target[volume_fraction] diff_phi = _sp.clip(phi - phi_crit, a_min=0, a_max=_sp.inf) sigma_eff = sigma*(diff_phi)**tau return sigma_eff
def _find_blocks(self, array, trim_edges=False): array = sp.clip(array, a_min=0, a_max=1) temp = sp.pad(array, pad_width=1, mode='constant', constant_values=0) end_pts = sp.where(sp.ediff1d(temp) == -1)[0] # Find 1->0 transitions end_pts -= 1 # To adjust for 0 padding seg_len = sp.cumsum(array)[end_pts] seg_len[1:] = seg_len[1:] - seg_len[:-1] start_pts = end_pts - seg_len + 1 a = dict() a['start'] = start_pts a['end'] = end_pts a['length'] = seg_len if trim_edges: if (a['start'].size > 0) and (a['start'][0] == 0): [a.update({item: a[item][1:]}) for item in a] if (a['end'].size > 0) and (a['end'][-1] == sp.size(array)-1): [a.update({item: a[item][:-1]}) for item in a] return a
def sigmoid(z): """ Sigmoid Function """ try: xdim, ydim = z.shape except ValueError: # for array z = z.reshape(1, z.size) except AttributeError: # for scalar value z = sp.array([z]).reshape(1, 1) finally: xdim, ydim = z.shape g = sp.zeros((z.shape)).view(sp.matrix) for i in range(xdim): for j in range(ydim): g[i, j] = (1 + sp.e ** (-z[i, j])) ** (-1) g = sp.clip(g, a_min=0.0000000001, a_max=0.999999999) return g
def hac(vecs, metric, linkage_method, threshold, weights): """ Hierarchical Agglomerative Clustering. `scipy.spatial.distance.pdist` seemed to perform quite a bit faster than `sklearn.metrics.pairwise.pairwise_distances` when they both operated as a single process. `pdist`, however, does not accept sparse matrices as inputs (as of scipy v0.15.2), so there can be quite a bit memory cost when using it. `pairwise_distances` does accept sparse matrices and has built-in support for parallelization (with the `n_jobs` param). Because it can have a significantly lower memory footprint, it seems better to use that as a multicore job. """ vecs = weight_vectors(vecs, weights) if platform == 'darwin': # This breaks on OSX 10.9.4, py3.3+, with large arrays: # https://stackoverflow.com/questions/11662960/ioerror-errno-22-invalid-argument-when-reading-writing-large-bytestring # https://github.com/numpy/numpy/issues/3858 # So for OSX ('darwin'), just running it as a single job. distance_matrix = pairwise_distances(vecs, metric=metric, n_jobs=1) else: # n_jobs=-1 to use all cores, n_jobs=-2 to use all cores except 1, etc. distance_matrix = pairwise_distances(vecs, metric=metric, n_jobs=-2) # `pairwise_distances` returns the distance matrix in squareform, # we use `squareform()` to convert it to condensed form, which is what `linkage()` accepts. distance_matrix = squareform(distance_matrix, checks=False) linkage_matrix = linkage(distance_matrix, method=linkage_method, metric=metric) # Floating point errors with the cosine metric occasionally lead to negative values. # Round them to 0. linkage_matrix = clip(linkage_matrix, 0, np.amax(linkage_matrix)) labels = fcluster(linkage_matrix, threshold, criterion='distance') return labels
def getScalingMatrix(self): # Here we add one to avoid division by zero. # rho = 1.0 / (self.hessiansamplenumber + 1) if self.hessiansamplenumber < self.minHessianSampleNumber: rho = 0 else: if self.enableDamptingRatio: # get scale weight to reduce noise rr = self.rewardRange rho = (self.alpha - rr[0]) / (rr[1] - rr[0]) rho = scipy.clip(rho, 0, 1) # FOR TEST DISABLE DAMPING RATIO else: rho = 1 I = scipy.eye(self.paramdim) mat = rho * self.H + (1 - rho) * I try: scaleMatrix = inv(mat) except: scaleMatrix = I return scaleMatrix
def percolating_continua(phase, phi_crit, tau, volume_fraction='pore.volume_fraction', bulk_property='pore.intrinsic_conductivity', **kwargs): r''' Calculates the effective property of a continua using percolation theory Parameters ---------- volume_fraction : string The dictionary key in the Phase object containing the volume fraction of the conducting component bulk_property : string The dictionary key in the Phase object containing the intrinsic property of the conducting component phi_crit : float The volume fraction below which percolation does NOT occur tau : float The exponent of the percolation relationship Notes ----- This model uses the following standard percolation relationship: .. math:: \sigma_{effective}=\sigma_{bulk}(\phi - \phi_{critical})^\lambda ''' sigma = phase[bulk_property] phi = phase[volume_fraction] diff_phi = _sp.clip(phi - phi_crit, a_min=0, a_max=_sp.inf) sigma_eff = sigma*(diff_phi)**tau return sigma_eff
## Load data from multiple files print "Got %d files to plot %s..." % (len(sys.argv), quantity) x, y, z, z2 = [np.array([]) for _ in range(4)] ## three empty arrays filenames = sys.argv[1:] #filenames.sort(key=lambda name: float(name.split('radius=')[1].split('_')[0])) # sort (optional) for datafile_name in filenames: ## Getting 1D data (freq, s11_ampli, s11p, s12_ampli, s12p, Nre, Nim, Zre, Zim, eps_r, eps_i, mu_r, mu_i) = \ np.loadtxt(datafile_name, usecols=range(13), unpack=True) if quantity == 'reflection': znew = s11_ampli elif quantity == 'transmission': znew = s12_ampli elif quantity == 'loss': znew = np.log(1 - s11_ampli**2 - s12_ampli**2) elif quantity == 'absNimag': znew = np.clip(abs(Nim), 0, 10) #znew = np.log10(np.clip(abs(Nim), 0, 300 )) elif quantity == 'absNre': znew = abs(np.arcsin(np.sin(np.real(Nre*freq*100e-6/c) * np.pi)) / np.pi) znew2 = np.clip(abs(Nim), 0, 10) elif quantity == 'Nre': znew = Nre #np.real(Nre*freq*100e-6/c) elif quantity == 'eps': znew = eps_r elif quantity == 'epsangle': znew = np.angle(eps_r + 1j*eps_i) elif quantity == 'mu': znew = mu_r else: print 'Error!, select a known quantity to plot!' ## Truncate the data ranges truncated = np.logical_and(freq>minf, freq<maxf) (freq, znew) = map(lambda x: x[truncated], [freq, znew]) #(znew2) = map(lambda x: x[truncated], [znew2]) ## XXX
def normAct(self, s): return clip(s, self.minAkt, self.maxAkt)
def safeExp(x): "Bounded range for the exponential function (won't produce inf or NaN)." return exp(clip(x, -500, 500))