def fgrad_y(self, y, psi, return_precalc=False): """ gradient of f w.r.t to y ([N x 1]) returns: Nx1 vector of derivatives, unless return_precalc is true, then it also returns the precomputed stuff """ mpsi = psi.copy() mpsi[:, 0:2] = SP.exp(mpsi[:, 0:2]) s = SP.zeros((len(psi), y.shape[0], y.shape[1])) r = SP.zeros((len(psi), y.shape[0], y.shape[1])) d = SP.zeros((len(psi), y.shape[0], y.shape[1])) grad = 1 for i in range(len(mpsi)): a, b, c = mpsi[i] s[i] = b * (y + c) r[i] = SP.tanh(s[i]) d[i] = 1 - r[i]**2 grad += a * b * d[i] #vectorized version S = (mpsi[:, 1] * (y + mpsi[:, 2])).T R = SP.tanh(S) D = 1 - R**2 GRAD = (1 + (mpsi[:, 0:1] * mpsi[:, 1:2] * D).sum(axis=0))[:, SP.newaxis] if return_precalc: return GRAD, S, R, D #return grad, s, r, d return grad
def forward_pass(self, xL, xR): bs = sp.ones((1,xL.shape[1]), dtype=float) # First Layer xLb = sp.vstack([xL, bs]) xRb = sp.vstack([xR, bs]) a1L = sp.dot(self.w1l, xLb) a1R = sp.dot(self.w1r, xRb) z1L = sp.tanh(a1L) z1R = sp.tanh(a1R) # Second Layer z1Lb = sp.vstack([z1L, bs]) z1LRb = sp.vstack([z1L, z1R, bs]) z1Rb = sp.vstack([z1R, bs]) a2L = sp.dot(self.w2l, z1Lb) a2LR = sp.dot(self.w2lr, z1LRb) a2R = sp.dot(self.w2r, z1Rb) z2 = a2LR*self.sigmoid(a2L)*self.sigmoid(a2R) # Third Layer z2b = sp.vstack([z2, bs]) a3 = sp.dot(self.w3, z2b) return a1L, a1R, a2L, a2LR, a2R, a3, z1Lb, z1LRb, z1Rb, z2b, xLb, xRb
def hyperbolic_tangent(a, b, prime_offset=0.0, threshold=float('inf')): """Return an element-wise hyperbolic tangent with range 'a' and slope 'b'. i.e. f(x) = a * tanh(b * x) Arguments: a - Output range: [-a, +a]. (real) b - Slope parameter. (real) prime_offset - Add a constant value to the derivative, avoiding values to stuck on zero for large input. (real) threshold - When the output's magnitude is greater than the threshold then return 'a' (or '-a'). (real) Return: An ActivationFunction object. """ thr_fun = lambda X: (X < -threshold) * -a + (X > threshold) * a + ((X < -threshold) + (X > threshold) == 0) * X fun = lambda X: thr_fun(a * scipy.tanh(X * b)) # der = lambda X: scipy.ones(X.shape) - scipy.tanh(X)**2 ab = a * b der = lambda X: ab * (scipy.ones(X.shape) - scipy.tanh(X * b)**2) + scipy.ones(X.shape) * prime_offset inv = lambda X: scipy.arctanh(X / a) / b descr = "hyperbolic_tangent(%f, %f, %f, %f)" % (a, b, prime_offset, threshold) return ActivationFunction(fun, inv, der, descr)
def fgrad_y(self, y, psi, return_precalc = False): """ gradient of f w.r.t to y ([N x 1]) returns: Nx1 vector of derivatives, unless return_precalc is true, then it also returns the precomputed stuff """ mpsi = psi.copy() mpsi[:,0:2] = SP.exp(mpsi[:,0:2]) s = SP.zeros((len(psi), y.shape[0], y.shape[1])) r = SP.zeros((len(psi), y.shape[0], y.shape[1])) d = SP.zeros((len(psi), y.shape[0], y.shape[1])) grad = 1 for i in range(len(mpsi)): a,b,c = mpsi[i] s[i] = b*(y+c) r[i] = SP.tanh(s[i]) d[i] = 1 - r[i]**2 grad += a*b*d[i] #vectorized version S = (mpsi[:,1]*(y + mpsi[:,2])).T R = SP.tanh(S) D = 1-R**2 GRAD = (1+(mpsi[:,0:1]*mpsi[:,1:2]*D).sum(axis=0))[:,SP.newaxis] if return_precalc: return GRAD,S,R,D #return grad, s, r, d return grad
def DP_lib(compuesto, T): """Dohrn, R.; Prausnitz, J.M. A simple perturbation term for the Carnahan-Starling equation of state. Fluid Phase Eq. 1990, 61, 53.""" Tr = T / compuesto.Tc if Tr >= 1: m = 1 else: m = 0 a1 = 0.367845 + 0.055966 * compuesto.f_acent a2 = -1 ** m * (0.604709 + 0.008477 * compuesto.f_acent) ac = 0.550408 * R_atml ** 2 * compuesto.Tc ** 2 / compuesto.Pc.atm a = ac * (a1 * tanh(a2 * abs(Tr - 1) ** 0.7) + 1) b1 = 0.356983 - 0.190003 * compuesto.f_acent b2 = -1 ** m * (1.37 - 1.898981 * compuesto.f_acent) bc = 0.187276 * R_atml * compuesto.Tc / compuesto.Pc.atm b = bc * (b1 * tanh(b2 * abs(log(Tr)) ** 0.8) + 1) sigma = (3 * b / 2 / pi / Avogadro) ** (1.0 / 3) return a, b, sigma def DP_Z(self, T, P): """Factor de compresibilidad según la ecuación de estado de Dohrn-Prausnith""" V = self.DP_V(T, P) * self.peso_molecular return P * V / R_atml / T def DP_V(self, T, P): """Volumen según el modelo de Dohrn-Prausnith""" a, b, sigma = self.DP_lib(T, P) D = sigma E = sigma ** 2 F = sigma ** 3 if self.Fase(T, P) == "gas": V = 25 else: V = 0.5 def Vm(V): nu = b / 4 / V Zref = ( 1 + (3 * D * E / F - 2) * nu + (3 * E ** 3 / F ** 2 - 3 * D * E / F + 1) * nu ** 2 - E ** 3 / F ** 2 * nu ** 3 ) / (1 - nu) ** 3 Zpert = a / R_atml / T / V * (1 - 1.41 * b / V / 4 + 5.07 * (b / V / 4) ** 2) return V - (Zref + Zpert) * R_atml * T / P v = fsolve(Vm, V) return unidades.SpecificVolume(v / self.peso_molecular) def DP_RhoG(self, T, P): """Método para el cálculo de la densidad de gases haciendo uso de la ecuación de estado de Dohrn-Prausnith""" z = self.DP_Z(T, P) return unidades.SpecificVolume(P / z / R_atml / T * self.peso_molecular)
def DP_lib(compuesto, T): """Dohrn, R.; Prausnitz, J.M. A simple perturbation term for the Carnahan-Starling equation of state. Fluid Phase Eq. 1990, 61, 53.""" Tr = T / compuesto.Tc if Tr >= 1: m = 1 else: m = 0 a1 = 0.367845 + 0.055966 * compuesto.f_acent a2 = -1**m * (0.604709 + 0.008477 * compuesto.f_acent) ac = 0.550408 * R_atml**2 * compuesto.Tc**2 / compuesto.Pc.atm a = ac * (a1 * tanh(a2 * abs(Tr - 1)**0.7) + 1) b1 = 0.356983 - 0.190003 * compuesto.f_acent b2 = -1**m * (1.37 - 1.898981 * compuesto.f_acent) bc = 0.187276 * R_atml * compuesto.Tc / compuesto.Pc.atm b = bc * (b1 * tanh(b2 * abs(log(Tr))**0.8) + 1) sigma = (3 * b / 2 / pi / Avogadro)**(1. / 3) return a, b, sigma def DP_Z(self, T, P): """Factor de compresibilidad según la ecuación de estado de Dohrn-Prausnith""" V = self.DP_V(T, P) * self.peso_molecular return P * V / R_atml / T def DP_V(self, T, P): """Volumen según el modelo de Dohrn-Prausnith""" a, b, sigma = self.DP_lib(T, P) D = sigma E = sigma**2 F = sigma**3 if self.Fase(T, P) == "gas": V = 25 else: V = 0.5 def Vm(V): nu = b / 4 / V Zref = (1 + (3 * D * E / F - 2) * nu + (3 * E**3 / F**2 - 3 * D * E / F + 1) * nu**2 - E**3 / F**2 * nu**3) / (1 - nu)**3 Zpert = a / R_atml / T / V * (1 - 1.41 * b / V / 4 + 5.07 * (b / V / 4)**2) return V - (Zref + Zpert) * R_atml * T / P v = fsolve(Vm, V) return unidades.SpecificVolume(v / self.peso_molecular) def DP_RhoG(self, T, P): """Método para el cálculo de la densidad de gases haciendo uso de la ecuación de estado de Dohrn-Prausnith""" z = self.DP_Z(T, P) return unidades.SpecificVolume(P / z / R_atml / T * self.peso_molecular)
def encode(self, graph, input_label): """Perform the encoding of given graph. Return the internal state representation x \in (|V(g)| x Nr) as a list of arrays. How it works: - Start from x=0. - For each vertex v compute: x_t(v) = tanh( W_in u(v) + \sum_{w \in N(v)} W x_{t-1}(w) ). - Stop when ||x_t - x_{t-1}|| < epsilon for each vertex or after a fixed number of iterations. Note: no bias is needed in the input label. A bias term (+1) is added during the encoding process. Arguments: graph -- Input Graph. input_label -- Name of the attribute storing the input label. Input label must be a scipy.array shaped (Nu,). (string) Return: Internal representation of the graph (scipy.array (|V(g)|,Nr)). Exceptions: Raise an error if the encoding doesn't converge (i.e. after 'maxit' iterations). """ key = self._extract_key(graph, input_label) if key in self._cache_encode: return self._cache_encode[key] else: Wu = self._compute_Wu(graph, input_label) # W_in u(v), (|V|,Nr) x = scipy.zeros((len(graph.vertices), self.Nr)) # (|V|,Nr) done = False it = 0 # iteration counter while (not done) and it < self.maxit: if it == 0: # x_0(v) = 0 = Wx_0 for each vertex next_x = scipy.tanh(Wu) else: Wx = self._compute_Wx(graph, x) # (|V|,Nr) next_x = scipy.tanh(Wx + Wu) # check for convergence done = self._check_convergence(x, next_x) x = next_x # update current encoding it += 1 if it == self.maxit: raise Exception("Unable to reach reservoir convergence") assert x.shape == (len(graph.vertices), self.Nr), \ "Invalid shape x(g): %s" % str(x.shape) if self.memo_enc: self._cache_encode[key] = x # save result return x
def double_tanh_warp(x, n, lcore, lmid, ledge, la, lb, xa, xb): r"""Implements a sum-of-tanh warping function and its derivative. .. math:: l = a\tanh\frac{x-x_a}{l_a} + b\tanh\frac{x-x_b}{l_b} Parameters ---------- x : float or array of float Locations to evaluate the function at. n : int Derivative order to take. Used for ALL of the points. lcore : float Core length scale. lmid : float Intermediate length scale. ledge : float Edge length scale. la : positive float Transition of first tanh. lb : positive float Transition of second tanh. xa : float Transition of first tanh. xb : float Transition of second tanh. Returns ------- l : float or array Warped length scale at the given locations. Raises ------ NotImplementedError If `n` > 1. """ a, b, c = scipy.dot([[-0.5, 0, 0.5], [0, 0.5, -0.5], [0.5, 0.5, 0]], [[lcore], [ledge], [lmid]]) a = a[0] b = b[0] c = c[0] if n == 0: return a * scipy.tanh((x - xa) / la) + b * scipy.tanh( (x - xb) / lb) + c elif n == 1: return (a / la * (scipy.cosh( (x - xa) / la))**(-2.0) + b / lb * (scipy.cosh( (x - xb) / lb))**(-2.0)) else: raise NotImplementedError( "Only derivatives up to order 1 are supported!")
def RegularLightSimple(t, Intensity=150.0, wakeUp=8.0, workday=16.0): """Define a basic light schedule with a given intensity of the light, wakeup time and length of the active period (non-sleeping) This schedule will automatically repeat on a daily basis, so each day will be the same..... """ s=fmod(t,24.0)-wakeUp if (s<0): s+=24.0 val=0.5*sp.tanh(100*(s)) - 0.5*sp.tanh(100*(s - workday)) return(Intensity*val)
def encode(self, graph, input_label): """Override the GraphESN.encode method. New encode is performed considering also auxiliary reservoir inputs, possibly coming from other PluggableGraphESN. Encoding step il modified as following: x_t(v) = tanh( W_in u(v) + \sum_{w \in N(v)} W x_{t-1}(w) + W_aux z_aux(g) ) It's worth mentioning that z_aux(g) is the concatenation of values all referring to the whole graph instead of the current vertex. This allows the local encoding process to exploit contextual informations. Arguments: graph -- Input Graph. input_label -- Name of the attribute storing the input label. Input label must be a scipy.array shaped (Nu,). (string) Return: Internal representation of the graph (scipy.array (|V(g)|,Nr)). Exceptions: Raise an error if the encoding doesn't converge (i.e. after 'maxit' iterations). """ key = self._extract_key(graph, input_label) if key in self._cache_encode: return self._cache_encode[key] else: Wu = self._compute_Wu(graph, input_label) # W_in u(v), (|V|,Nr) Wz = self._compute_Wz(graph, input_label) # W_aux z_sub(g), (Nr,) Wuz = Wu + Wz # W_in u(v) + W_sub z_sub(g) forall v, (|V|,Nr) x = scipy.zeros((len(graph.vertices), self.Nr)) # (|V|,Nr) done = False it = 0 # iteration counter while (not done) and it < self.maxit: if it == 0: # x_0(v) = 0 = Wx_0 for each vertex next_x = scipy.tanh(Wu) else: Wx = self._compute_Wx(graph, x) # (|V|,Nr) next_x = scipy.tanh(Wx + Wuz) # check for convergence done = self._check_convergence(x, next_x) x = next_x # update current encoding it += 1 if it == self.maxit: raise Exception("Unable to reach reservoir convergence") assert x.shape == (len(graph.vertices), self.Nr), \ "Invalid shape x(g): %s" % str(x.shape) if self.memo_enc: self._cache_encode[key] = x # save result return x
def generateTS(self): dt=1.0/60.0 #minute long bins self.time=np.arange(0.0, 24.0*1000+dt, dt) self.lightVals=[] period=24.0 #period of the days AP=16.0 #activity period (this is allowed to vary day to day) PP=12.0 #photoperiod (hours where you can get outdoor sunlight, this is fixed) IntLast=0.0 LightOnset=0.0 #This controls when the AP starts (defaults to dawn). Can't do wake up earlier than dawn as of yet for t in self.time: day=t/24.0 if (day.is_integer()): AP=np.random.normal(loc=self.ap_loc, scale=self.ap_sd) if (AP>22.0): AP=22.0 if(PP < 12.0): AP=12.0 #LightOnset=np.random.uniform(0.0,2.0) val=0.5*sp.tanh(10*(fmod(t, period)-LightOnset)) - 0.5*sp.tanh(10*(fmod(t, period) - (AP+LightOnset))) #Sleep modulation of light input if (fmod(t, period)<=PP): Int=self.background_ll+10**np.random.normal(0.0,self.nl_sd) #Random light Intensity, allowing for natural light exposures else: Int=self.background_ll+np.random.normal(loc=0.0, scale=self.al_sd) #Random light intensity only indoor ranges if (Int> 10000): Int=10000.0 if (Int < 0.0): Int=0.0 self.lightVals.append(Int*val) IntLast=Int self.lightVals=np.array(self.lightVals) #Do some smoothing of the data LightTS=pd.Series(self.lightVals, self.time) b=LightTS.rolling(window=10, center=False).mean() self.lightVals[10:]=b.values[10:] self.LightFunc=interpolate.interp1d(self.time, self.lightVals)
def double_tanh_warp(x, n, lcore, lmid, ledge, la, lb, xa, xb): r"""Implements a sum-of-tanh warping function and its derivative. .. math:: l = a\tanh\frac{x-x_a}{l_a} + b\tanh\frac{x-x_b}{l_b} Parameters ---------- x : float or array of float Locations to evaluate the function at. n : int Derivative order to take. Used for ALL of the points. lcore : float Core length scale. lmid : float Intermediate length scale. ledge : float Edge length scale. la : positive float Transition of first tanh. lb : positive float Transition of second tanh. xa : float Transition of first tanh. xb : float Transition of second tanh. Returns ------- l : float or array Warped length scale at the given locations. Raises ------ NotImplementedError If `n` > 1. """ a, b, c = scipy.dot([[-0.5, 0, 0.5], [0, 0.5, -0.5], [0.5, 0.5, 0]], [[lcore], [ledge], [lmid]]) a = a[0] b = b[0] c = c[0] if n == 0: return a * scipy.tanh((x - xa) / la) + b * scipy.tanh((x - xb) / lb) + c elif n == 1: return (a / la * (scipy.cosh((x - xa) / la))**(-2.0) + b / lb * (scipy.cosh((x - xb) / lb))**(-2.0)) else: raise NotImplementedError("Only derivatives up to order 1 are supported!")
def calculate_dwf(self): """ Calculates Debye-Waller factor according to Sears and Shelley Acta Cryst. A 47, 441 (1991) """ run = self.vanaws.getRun() nhist = self.vanaws.getNumberHistograms() thetasort = np.zeros(nhist) # theta in radians, not 2Theta instrument = self.vanaws.getInstrument() detID_offset = self.get_detID_offset() for i in range(nhist): det = instrument.getDetector(i + detID_offset) thetasort[i] = 0.5*np.sign(np.cos(det.getPhi()))*self.vanaws.detectorTwoTheta(det) # thetasort[i] = 0.5*self.vanaws.detectorSignedTwoTheta(det) # gives opposite sign for detectors 0-24 temperature = self.get_temperature() # T in K wlength = float(run.getLogData('wavelength').value) # Wavelength, Angstrom mass_vana = 0.001*self.Mvan/sp.constants.N_A # Vanadium mass, kg temp_ratio = temperature/self.DebyeT if temp_ratio < 1.e-3: integral = 0.5 else: integral = integrate.quad(lambda x: x/sp.tanh(0.5*x/temp_ratio), 0, 1)[0] msd = 3.*sp.constants.hbar**2/(2.*mass_vana*sp.constants.k * self.DebyeT)*integral*1.e20 return np.exp(-msd*(4.*sp.pi*sp.sin(thetasort)/wlength)**2)
def plot_neural_net(X, y, clf, segment=False): values = X pca_plot(X, y, save="00_conn.png", segment=segment) counter = 1 for i, layer in enumerate(clf.nn.modulesSorted): name = layer.__class__.__name__ if name == "BiasUnit": continue try: conn = clf.nn.connections[layer][0] except IndexError: continue if "Linear" not in name: if "Sigmoid" in name: add = "sigmoid" values = sigmoid(values) elif "Tanh" in name: add = "tanh" values = tanh(values) pca_plot(values, y, save="%02d_conn_%s.png" % (counter, add), segment=segment) counter += 1 shape = (conn.outdim, conn.indim) temp = numpy.dot(numpy.reshape(conn.params, shape), values.T) pca_plot(temp.T, y, save="%02d_conn.png" % counter, segment=segment) counter += 1 values = temp.T
def calculateELBO(self): # Compute Evidence Lower Bound using the lower bound to the likelihood Z = self.markov_blanket["Z"].getExpectation() Wtmp = self.markov_blanket["W"].getExpectations() Ztmp = self.markov_blanket["Z"].getExpectations() zeta = self.params["zeta"] SW, SWW = Wtmp["E"], Wtmp["E2"] Z, ZZ = Ztmp["E"], Ztmp["E2"] mask = self.getMask() # calculate E(Z)E(W) ZW = Z.dot(SW.T) ZW[mask] = 0. # Calculate E[(ZW_nd)^2] # this is equal to E[\sum_{k != k} z_k w_k z_k' w_k'] + E[\sum_{k} z_k^2 w_k^2] tmp1 = s.square(ZW) - s.dot( s.square(Z), s.square(SW).T) # this is for terms in k != k' tmp2 = ZZ.dot(SWW.T) # this is for terms in k = k' EZZWW = tmp1 + tmp2 # calculate elbo terms term1 = 0.5 * ((2. * self.obs - 1.) * ZW - zeta) term2 = -s.log(1 + s.exp(-zeta)) term3 = -1 / (4 * zeta) * s.tanh(zeta / 2.) * (EZZWW - zeta**2) lb = term1 + term2 + term3 lb[mask] = 0. return lb.sum()
def draw_eccentricities(self, eccentricity='flat', emin=0., emax='tidal'): """Draw a new eccentricity distribution. Either provide an array (with the same length as the number of binaries) or choose from: - 'flat': Flat distribution between `emin` and `emax`. - 'thermal`: f(e) ~ e^2 between `emin` and `emax`. If `emax` is set to 'tidal' it will depend on the period of the binary to mimic tidal circulirization through: emax = max(emin, 0.5 * (0.95 + tanh(0.6 * log_10(period in days) - 1.7))) If this setting is chosen the eccentricities will have to be redrawn if the periods are redrawn. Arguments: - `eccentricity`: New eccentricity distribution (array or name of distriution). - `emin`: Minimum eccentricity (default: 0.) - `emax`: Maximum eccentricity (default: set by tidal circularization) """ nbinaries = self.size if emax == 'tidal': emax = .5 * (0.95 + sp.tanh(0.6 * sp.log10(self['period'] * 365.25) - 1.7)) emax[emax < emin] = emin if eccentricity == 'flat': self['eccentricity'] = sp.random.rand(nbinaries) * (emax - emin) + emin elif eccentricity == 'thermal': self['eccentricity'] = sp.sqrt(sp.random.rand(nbinaries) * (emax ** 2. - emin ** 2.) + emin ** 2.) elif isinstance(eccentricity, basestring): raise ValueError("Eccentricity distribution '%s' not found" % eccentricity) else: self['eccentricity'] = eccentricity
def calculate_dwf(self): """ Calculates Debye-Waller factor according to Sears and Shelley Acta Cryst. A 47, 441 (1991) """ run = self.vanaws.getRun() nhist = self.vanaws.getNumberHistograms() thetasort = np.empty(nhist) # half of the scattering angle, in radians for i in range(nhist): det = self.vanaws.getDetector(i) thetasort[i] = 0.5 * self.vanaws.detectorTwoTheta(det) # T in K temperature = self.get_temperature() # Wavelength, Angstrom wlength = float(run.getLogData('wavelength').value) # Vanadium mass, kg mass_vana = 0.001 * self.Mvan / sp.constants.N_A temp_ratio = temperature / self.DebyeT if temp_ratio < 1.e-3: integral = 0.5 else: integral = \ integrate.quad(lambda x: x/sp.tanh(0.5*x/temp_ratio), 0, 1)[0] msd = 3.*sp.constants.hbar**2 / \ (2.*mass_vana*sp.constants.k * self.DebyeT)*integral*1.e20 return np.exp(-msd * (4. * sp.pi * sp.sin(thetasort) / wlength)**2)
def calculate_dwf(self): """ Calculates Debye-Waller factor according to Sears and Shelley Acta Cryst. A 47, 441 (1991) """ run = self.vanaws.getRun() nhist = self.vanaws.getNumberHistograms() thetasort = np.empty(nhist) # half of the scattering angle, in radians for i in range(nhist): det = self.vanaws.getDetector(i) thetasort[i] = 0.5 * self.vanaws.detectorTwoTheta(det) # T in K temperature = self.get_temperature() # Wavelength, Angstrom wlength = float(run.getLogData('wavelength').value) # Vanadium mass, kg mass_vana = 0.001*self.Mvan/sp.constants.N_A temp_ratio = temperature/self.DebyeT if temp_ratio < 1.e-3: integral = 0.5 else: integral = \ integrate.quad(lambda x: x/sp.tanh(0.5*x/temp_ratio), 0, 1)[0] msd = 3.*sp.constants.hbar**2 / \ (2.*mass_vana*sp.constants.k * self.DebyeT)*integral*1.e20 return np.exp(-msd*(4.*sp.pi*sp.sin(thetasort)/wlength)**2)
def ne(rho, ncore, nped): #horrible function from /u/dlbo/w_diag/fit_function_ne_profile.pro, used by the GIW shotfile generator w_diag temp = (_func(rho) - _func(.97)) / (_func(0.) - _func(.97)) temp[rho > .97] = 0. return temp * (ncore - nped) * scipy.exp( -2 * pow(rho, 2)) + nped * .5 * (1 + scipy.tanh((.99 - rho) * 80))
def astroOut(self): for astro in self.astros: astro.act = tanh(astro.act) for syn in astro.syns: i, j = syn self.astroOuts[i][j] = astro.act * self.syn_wi[i][j] return self.astroOuts.copy()
def tanh_warp_arb(X, l1, l2, lw, x0): r"""Warps the `X` coordinate with the tanh model .. math:: l = \frac{l_1 + l_2}{2} - \frac{l_1 - l_2}{2}\tanh\frac{x-x_0}{l_w} Parameters ---------- X : :py:class:`Array`, (`M`,) or scalar float `M` locations to evaluate length scale at. l1 : positive float Small-`X` saturation value of the length scale. l2 : positive float Large-`X` saturation value of the length scale. lw : positive float Length scale of the transition between the two length scales. x0 : float Location of the center of the transition between the two length scales. Returns ------- l : :py:class:`Array`, (`M`,) or scalar float The value of the length scale at the specified point. """ if isinstance(X, scipy.ndarray): if isinstance(X, scipy.matrix): X = scipy.asarray(X, dtype=float) return 0.5 * ((l1 + l2) - (l1 - l2) * scipy.tanh((X - x0) / lw)) else: return 0.5 * ((l1 + l2) - (l1 - l2) * mpmath.tanh((X - x0) / lw))
def EstimateCC_NewHighRes(cutoff, d0, r, Ey): '''Estimate a New High resolution cutoff based on CC1/2=value Ey is used to check if experimental data contain a CC1/2<Threshold, if not function is stopped ''' from math import sqrt from scipy import tanh from numpy import linspace #Check if calculation is sensible or not if cutoff <= min(Ey): print "WARNING: data don't reach this cutoff value...skipping CC1/2 analysis" return cutoff = float(cutoff) d0 = float(d0) r = float(r) HighRes = None CC_calc, delta = [], [] #Searching for CC1/2 resolution by minimizing CC1/2-cutoff x = linspace(0., 1., 1001).tolist() for val in x: CC_calc.append((0.5 * (1 - tanh((val - d0) / r)))) for val in CC_calc: delta.append(abs(val - cutoff)) HighRes = round(sqrt(1 / x[delta.index(min(delta))]), 2) CalculatedCutoff = CC_calc[delta.index(min(delta))] print ''' %s -> Suggested New High Resolution Limit: %.2f A for CC1/2= %.2f <- %s ''' % ('=' * 66, HighRes, CalculatedCutoff, '=' * 66) return HighRes
def draw_eccentricities(self, eccentricity='flat', emin=0., emax='tidal'): """Draw a new eccentricity distribution. Either provide an array (with the same length as the number of binaries) or choose from: - 'flat': Flat distribution between `emin` and `emax`. - 'thermal`: f(e) ~ e^2 between `emin` and `emax`. If `emax` is set to 'tidal' it will depend on the period of the binary to mimic tidal circulirization through: emax = max(emin, 0.5 * (0.95 + tanh(0.6 * log_10(period in days) - 1.7))) If this setting is chosen the eccentricities will have to be redrawn if the periods are redrawn. Arguments: - `eccentricity`: New eccentricity distribution (array or name of distriution). - `emin`: Minimum eccentricity (default: 0.) - `emax`: Maximum eccentricity (default: set by tidal circularization) """ nbinaries = self.size if emax == 'tidal': emax = .5 * ( 0.95 + sp.tanh(0.6 * sp.log10(self['period'] * 365.25) - 1.7)) emax[emax < emin] = emin if eccentricity == 'flat': self['eccentricity'] = sp.random.rand(nbinaries) * (emax - emin) + emin elif eccentricity == 'thermal': self['eccentricity'] = sp.sqrt( sp.random.rand(nbinaries) * (emax**2. - emin**2.) + emin**2.) elif isinstance(eccentricity, basestring): raise ValueError("Eccentricity distribution '%s' not found" % eccentricity) else: self['eccentricity'] = eccentricity
def calculateCPandRT(stim_strengths, k, A, t1_nondt, t2_nondt): """ Given a diffusion to bound model with flat bounds, compute the following as a function of stimulus strength: 1) Probability of t1 choice (correct choice for positive strengths) 2) Mean reaction times for t1 and t2 choices Parameters ---------- stim_strengths : array of unique stimulus strengths k : float, proportionality constant between stim_strength and drift rate A : float, bound t1_nondt: float, non decision time for making t1 choice t2_nondt: float, non decision time for making t2 choice Returns ------- out : dictionary with keys stim_strength, p_t1, t1_meanrt, t2_meanrt """ # compute probability of t1 response given k and A p = 1 / (1 + np.exp(-2 * A * k * stim_strengths)) # compute mean response times for t1 and t2 t1_meanrt = np.zeros(len(stim_strengths)) t2_meanrt = np.zeros(len(stim_strengths)) for i in range(len(stim_strengths)): if stim_strengths[i] == 0: t1_meanrt[i] = A ** 2 + t1_nondt t2_meanrt[i] = A ** 2 + t2_nondt else: t1_meanrt[i] = A / (k * stim_strengths[i]) * \ sp.tanh(A * k * stim_strengths[i]) + t1_nondt t2_meanrt[i] = A / (k * stim_strengths[i]) * \ sp.tanh(A * k * stim_strengths[i]) + t2_nondt out = {} out['stim_strengths'] = stim_strengths out['p_t1'] = p out['t1_meanrt'] = t1_meanrt out['t2_meanrt'] = t2_meanrt return out
def activateHid(self): # self.ah[:] = tanh(sum(self.wi.T * self.ai, axis=1)) for j in range(self.hiddim): s = 0 for i in range(self.indim): s += sum(self.wi[i][j] * (self.ai + self.astroOuts[i][j])) # print self.astroOuts self.ah[j] = tanh(s)
def TempProfile(z,T0=1000.,z0=100.): """This function creates a tempreture profile for test purposes.""" zall = (z-z0)*2.*sp.exp(1)/400. -sp.exp(1) atanshp = (sp.tanh(zall)+1.)/2 Te = 1700*atanshp+T0 Ti = 500*atanshp+T0 return (Te,Ti)
def func(x, d0, r): '''x and y are lists of same size x is 1/d**2, y is CC1/2 d0 is 1/d**2 at half decrease r is the steepness of the falloff ''' from scipy import tanh return 0.5 * (1 - tanh((x - d0) / r))
def get_rotation_angle(t, f, amp, u, k, phase=0.0): """ Rotation angle function w/ J. Wang's shape parameter. The control input shifts the mean rotation angle. Arguments: t = array of time points f = flapping frequency amp = rotation angle amplitude u = control input, scalar or array k = shape parameter, k near 0 implies sinewave, k large implies square wave. """ alpha = amp / scipy.tanh(k) * scipy.tanh( k * scipy.sin(2 * PI * f * t + phase)) + u return alpha
def grate(k,L): l2 = (np.pi**2)/(4*(L**2)) mu = np.sqrt(k**2 + l2) th = sp.tanh(mu/2.) cth = 1./th sigma = (k/mu)*np.sqrt( (mu/2.-cth)*(th-mu/2.) ) sigma[np.isnan(sigma)] = 0. return sigma
def step(x, break_points): """ Step-like scaling function. Parameters ---------- x: ndarray An input array, for which to compute the scalings. break_points: tuple A list of the break points. Each entry should be a tuple of (break_position, break_width). Returns ------- ndarray Array of computed scales in the [-1; 1] range. """ # Applying the first break point break_point = break_points[0] break_x = break_point[0] break_width = break_point[1] res = scipy.tanh((x - break_x) / break_width) sign = 1 # If there are more break points given, applying them as well for break_point in break_points[1:]: # First recalling the previous break point position break_x_old = break_x # New break point data break_x = break_point[0] break_width = break_point[1] # Will fill only points above the transition position trans_x = (break_x + break_x_old) / 2.0 above_trans_x = scipy.where(x >= trans_x) # Flip the sign - above the transition position function behaviour is reversed sign *= -1 res[above_trans_x] = sign * scipy.tanh( (x[above_trans_x] - break_x) / break_width) return res
def performAction(self, action): #Filtered mapping towards performAction of the underlying environment #The standard Johnnie task uses a PID controller to controll directly angles instead of forces #This makes most tasks much simpler to learn isJoints=self.env.getSensorByName('JointSensor') #The joint angles isSpeeds=self.env.getSensorByName('JointVelocitySensor') #The joint angular velocitys act=(action+1.0)/2.0*(self.env.cHighList-self.env.cLowList)+self.env.cLowList #norm output to action intervall action=tanh((act-isJoints-isSpeeds)*16.0)*self.maxPower*self.env.tourqueList #simple PID EpisodicTask.performAction(self, action)
def fun(t, z): i = 1 Fr = sp.zeros(40) while i < 20: Fr[i + 20] = -(CAP[i]) * invM[i, i] * sp.tanh((z[20 + i]) - z[20 + i - 1]) i += 1 return sp.matmul(A, z) + Fr
def te(rho, tcore, tped): #horrible function from /u/dlbo/w_diag/fit_function_te_profile_new.pro, used by the GIW shotfile generator w_diag #corr = scipy.exp(-3.*pow(.97-rho,2)) #temp = ((1-corr)+.97*6*scipy.exp(-3*pow(.97,2))*(rho*scipy.exp(-8*pow(rho,2))-.97*scipy.exp(-8*pow(.97,2))))/0.940374146 temp = (_func(rho) - _func(.97)) / (_func(0.) - _func(.97)) temp[rho > .97] = 0. return temp * (tcore - tped) * scipy.exp( -2 * pow(rho, 2)) + tped * .5 * (1 + scipy.tanh((.985 - rho) * 60))
def avgxHomogeneous(h, J, N): Zfunc = lambda m: scipy.exp(-FHomogeneous(h, J, N, m)) Z = scipy.integrate.quad(Zfunc, -scipy.inf, scipy.inf)[0] Jbar = N * J xFunc = lambda m: scipy.tanh(2. * Jbar * m + h) * scipy.exp(-FHomogeneous( h, J, N, m)) avgx = scipy.integrate.quad(xFunc, -scipy.inf, scipy.inf)[0] / Z return avgx
def update_reservoir(self, u, n, Y): # u is input at specific time # u has shape (N_u (3 for L63)) # See page 16 eqtn 18 of Lukosevicius PracticalESN for feedback info. x_n_tilde = sp.tanh( sp.matmul(self.W, self.x[n]) + sp.matmul(self.W_in, sp.hstack((sp.array([1]), u))) + sp.matmul(self.W_fb, Y)) # TODO: Add derivative term? self.x[n+1] = sp.multiply((1-self.alpha_matrix), self.x[n]) \ + sp.multiply(self.alpha_matrix, x_n_tilde)
def main(): a = array([1, 1]) b = array([0, 0]) x = array([-0.5, 1]) y = tanh(a * x + b) dbma = [-(-0.46 * 2.79), -(0.76 * 2.42)] print("ma byt:", dbma[0], dbma[1]) print("je:", ipgauss(x, y, a, b)[0])
def dNeurons(self, statevec, t): # Extract relevant parameters from train = training > 0 and t > training and t < training + train_dur x_i = statevec[0:Ng] w_i = statevec[Ng:2 * Ng] # generate noise patterns exp_noise_amp = 0.1 if train: zeta = np.random.uniform(-exp_noise_amp, exp_noise_amp, 1) else: zeta = 0.0 # Compute Firing Rates and feedback signals r_i = sp.tanh(x_i) + zeta_state(t) z_i = np.dot(w_i, r_i) + zeta # Compute next timestep depending on if training or not if train: dxidt = (-x_i + lambd * np.dot(W_rec, r_i) + np.dot(W_in_left, self.uleft[t]) + np.dot(W_in_right, self.uright[t]) + z_i * W_fb) / tau x_new = x_i + dxidt * dt P = -1.0 * sp.power(z_i - self.f_target[t], 2) M = 1.0 * (P > self.P_avg) dwdt = eta(t) * (z_i - self.z_avg) * M * r_i w_new = w_i + dwdt * dt self.P_avg = (1 - (dt / tau_avg)) * self.P_avg + (dt / tau_avg) * P self.z_avg = (1 - (dt / tau_avg)) * self.z_avg + (dt / tau_avg) * z_i else: dxidt = (-x_i + lambd * np.dot(W_rec, r_i) + np.dot(W_in_left, self.uleft[t]) + np.dot(W_in_right, self.uright[t]) + z_i * W_fb) / tau x_new = x_i + dxidt * dt dwdt = np.zeros(np.shape(w_i)) w_new = w_i #weight change magnitude: dwmag = LA.norm(dwdt) rmag = LA.norm(r_i) if t % 10000 == 0: print(str(t) + ' ' + str(z_i) + ' ' + str(train)) self.zsave[t] = z_i self.rsave[t] = rmag return np.concatenate((x_new, w_new))
def fun(t, z): #---- Esto sirve solo para reportar en que tiempo vamos if t > fun.tnextreport: print " {} at t = {}".format(fun.solver, fun.tnextreport) fun.tnextreport += 1 if fun.solver != "Euler": z = np.squeeze(z) Famort = sp.zeros(40) #vector de fuerza friccional de amortiguamiento Famort[0] = -(CAP[0] * (1. / M[0, 0]) * sp.tanh((z[20] / vr))) for i in range(1, 20): Famort[i] = -(1. / M[i, i]) * CAP[i] * sp.tanh( (z[i + 20] - z[i - 1 + 20]) / vr) Ft = sp.zeros(40) if t < 226.81: Ft[20:] = inte(t) * 9.8 return sp.matmul(A, z) + Famort + Ft
def main(): a = array([1, 1]) b = array([0, 0]) x = array([-0.5, 1]) y = tanh( a * x + b) dbma = [-(-0.46 * 2.79), -(0.76*2.42)] print("ma byt:", dbma[0], dbma[1]) print("je:", ipgauss(x, y, a, b)[0])
def showFor(self, segment, multiplier=1.0): nhi = self.takeNumber(self.getSymbol("hi")) nlo = self.takeNumber(self.getSymbol("lo")) hi = max(nlo + 1.0, nhi) lo = min(nhi - 1.0, nlo) self.modSymbol("hi", self.makeNumber(hi)) self.modSymbol("lo", self.makeNumber(lo)) duration = abs(hi + lo) / 2.0 duration += (abs(hi - lo) / 2.0) * scipy.tanh(self.tparam) print segment + "\r", sys.stdout.flush() time.sleep(duration * multiplier)
def fun(t,z): # --- Reporte de paso de tiempo if t > fun.tnextreport: print " {} at t = {}".format(fun.solver, fun.tnextreport) fun.tnextreport += 1 # --- Cálculo Famort = sp.zeros((40)) # vector de fuerza friccional de amortiguamiento Famort[0] = -(Cap[0] * (1./M[0,0]) * sp.tanh((z[20]/vr))) Ft=sp.zeros(40) if t<226.81: Ft[20:]=f0(t)*9.8 return sp.matmul(A,z) + Famort + Ft
def ungray(self): self.R = self.source[:,:,0] self.G = self.source[:,:,1] self.B = self.source[:,:,2] self.R = filter2D(self.R, -1, self.kernel) self.G = filter2D(self.G, -1, self.kernel) self.B = filter2D(self.B, -1, self.kernel) slope = self.control[ord('s')].val self.R = (1.0 + tanh(slope*self.R)) / 2.0 self.G = (1.0 + tanh(slope*self.G)) / 2.0 self.B = (1.0 + tanh(slope*self.B)) / 2.0 self.R = filter2D(self.R, -1, self.gauss) self.G = filter2D(self.G, -1, self.gauss) self.B = filter2D(self.B, -1, self.gauss) self.target[:,:,0] = self.R * self.scale self.target[:,:,1] = self.G * self.scale self.target[:,:,2] = self.B * self.scale
def performAction(self, action): #Filtered mapping towards performAction of the underlying environment #The standard CCRL task uses a PID controller to controll directly angles instead of forces #This makes most tasks much simpler to learn self.oldAction = action #Grasping as reflex depending on the distance to target - comment in for more easy grasping if abs(abs(self.dist[:3]).sum())<2.0: action[15]=1.0 #self.grepRew=action[15]*.01 else: action[15]=-1.0 #self.grepRew=action[15]*-.03 isJoints=array(self.env.getSensorByName('JointSensor')) #The joint angles isSpeeds=array(self.env.getSensorByName('JointVelocitySensor')) #The joint angular velocitys act=(action+1.0)/2.0*(self.env.cHighList-self.env.cLowList)+self.env.cLowList #norm output to action intervall action=tanh((act-isJoints-0.9*isSpeeds*self.env.tourqueList)*16.0)*self.maxPower*self.env.tourqueList #simple PID EpisodicTask.performAction(self, action)
def performAction(self, action): #Filtered mapping towards performAction of the underlying environment #The standard CCRL task uses a PID controller to controll directly angles instead of forces #This makes most tasks much simpler to learn self.oldAction = action #Grasping as reflex depending on the distance to target - comment in for more easy grasping #if abs(self.dist[2])<2.0: action[15]=(1.0+2.0*action[15])*.3333 #self.grepRew=action[15]*.01 #else: action[15]=(-1.0+2.0*action[15])*.3333 #self.grepRew=action[15]*-.03 isJoints=array(self.env.getSensorByName('JointSensor')) #The joint angles isSpeeds=array(self.env.getSensorByName('JointVelocitySensor')) #The joint angular velocitys act=(action+1.0)/2.0*(self.env.cHighList-self.env.cLowList)+self.env.cLowList #norm output to action intervall action=tanh((act-isJoints-0.9*isSpeeds*self.env.tourqueList)*16.0)*self.maxPower*self.env.tourqueList #simple PID EpisodicTask.performAction(self, action)
def dNeurons(self, statevec, t): # Extract relevant parameters from train = training > 0 and t > training and t < training + train_dur x_i = statevec[0:Ng] w_i = statevec[Ng:2*Ng] # generate noise patterns exp_noise_amp = 0.1 if train: zeta = np.random.uniform(-exp_noise_amp, exp_noise_amp, 1) else: zeta = 0.0 # Compute Firing Rates and feedback signals r_i = sp.tanh(x_i) + zeta_state(t) z_i = np.dot(w_i, r_i) + zeta # Compute next timestep depending on if training or not if train: dxidt = (-x_i + lambd * np.dot(W_rec, r_i) + np.dot(W_in_left, self.uleft[t]) + np.dot(W_in_right, self.uright[t]) + z_i*W_fb )/tau x_new = x_i + dxidt*dt P = -1.0*sp.power(z_i - self.f_target[t], 2) M = 1.0*(P > self.P_avg) dwdt = eta(t) * (z_i - self.z_avg) * M * r_i w_new = w_i + dwdt*dt self.P_avg = (1 - (dt/tau_avg)) * self.P_avg + (dt/tau_avg) * P self.z_avg = (1 - (dt/tau_avg)) * self.z_avg + (dt/tau_avg) * z_i else: dxidt = (-x_i + lambd * np.dot(W_rec, r_i) + np.dot(W_in_left, self.uleft[t]) + np.dot(W_in_right, self.uright[t]) + z_i*W_fb )/tau x_new = x_i + dxidt*dt dwdt = np.zeros(np.shape(w_i)) w_new = w_i #weight change magnitude: dwmag = LA.norm(dwdt) rmag = LA.norm(r_i) if t%10000 == 0: print(str(t) + ' ' + str(z_i) + ' ' + str(train)) self.zsave[t] = z_i self.rsave[t] = rmag return np.concatenate((x_new, w_new))
def Calculate_Sigma(self, data): for temperature in xrange(self.n_temp): tmp = 1/(sp.tanh(data.energy/(2*K_B*(temperature+1)))) self.IP_cluster_qu[temperature] = np.sqrt(np.sum((data.IP_cluster_a1*data.IP_cluster_a1)/2 * tmp)) self.EA_cluster_qu[temperature] = np.sqrt(np.sum((data.EA_cluster_a1*data.EA_cluster_a1)/2 * tmp)) self.IP_alone_qu[temperature] = np.sqrt(np.sum((data.IP_alone_a1*data.IP_alone_a1)/2 * tmp)) self.EA_alone_qu[temperature] = np.sqrt(np.sum((data.EA_alone_a1*data.EA_alone_a1)/2 * tmp)) self.P_plus_qu[temperature] = np.sqrt(np.sum((data.P_plus_a1*data.P_plus_a1)/2 * tmp)) self.P_minus_qu[temperature] = np.sqrt(np.sum((data.P_minus_a1*data.P_minus_a1)/2 * tmp)) if temperature+1 == 300: self.IP_cluster_qu_300K = 100*((data.IP_cluster_a1*data.IP_cluster_a1)/2 * tmp)/(self.IP_cluster_qu[temperature]*self.IP_cluster_qu[temperature]) self.EA_cluster_qu_300K = 100*((data.EA_cluster_a1*data.EA_cluster_a1)/2 * tmp)/(self.EA_cluster_qu[temperature]*self.EA_cluster_qu[temperature]) self.IP_alone_qu_300K = 100*((data.IP_alone_a1*data.IP_alone_a1)/2 * tmp)/(self.IP_alone_qu[temperature]*self.IP_alone_qu[temperature]) self.EA_alone_qu_300K = 100*((data.EA_alone_a1*data.EA_alone_a1)/2 * tmp)/(self.EA_alone_qu[temperature]*self.EA_alone_qu[temperature]) self.P_plus_qu_300K = 100*((data.P_plus_a1*data.P_plus_a1)/2 * tmp)/(self.P_plus_qu[temperature]*self.P_plus_qu[temperature]) self.P_minus_qu_300K = 100*((data.P_minus_a1*data.P_minus_a1)/2 * tmp)/(self.P_minus_qu[temperature]*self.P_minus_qu[temperature]) tmp = 2*K_B*(temperature+1)/data.energy self.IP_cluster_cl[temperature] = np.sqrt(np.sum((data.IP_cluster_a1*data.IP_cluster_a1)/2 * tmp)) self.EA_cluster_cl[temperature] = np.sqrt(np.sum((data.EA_cluster_a1*data.EA_cluster_a1)/2 * tmp)) self.IP_alone_cl[temperature] = np.sqrt(np.sum((data.IP_alone_a1*data.IP_alone_a1)/2 * tmp)) self.EA_alone_cl[temperature] = np.sqrt(np.sum((data.EA_alone_a1*data.EA_alone_a1)/2 * tmp)) self.P_plus_cl[temperature] = np.sqrt(np.sum((data.P_plus_a1*data.P_plus_a1)/2 * tmp)) self.P_minus_cl[temperature] = np.sqrt(np.sum((data.P_minus_a1*data.P_minus_a1)/2 * tmp)) if temperature+1 == 300: self.IP_cluster_cl_300K = 100*((data.IP_cluster_a1*data.IP_cluster_a1)/2 * tmp)/(self.IP_cluster_cl[temperature]*self.IP_cluster_cl[temperature]) self.EA_cluster_cl_300K = 100*((data.EA_cluster_a1*data.EA_cluster_a1)/2 * tmp)/(self.EA_cluster_cl[temperature]*self.EA_cluster_cl[temperature]) self.IP_alone_cl_300K = 100*((data.IP_alone_a1*data.IP_alone_a1)/2 * tmp)/(self.IP_alone_cl[temperature]*self.IP_alone_cl[temperature]) self.EA_alone_cl_300K = 100*((data.EA_alone_a1*data.EA_alone_a1)/2 * tmp)/(self.EA_alone_cl[temperature]*self.EA_alone_cl[temperature]) self.P_plus_cl_300K = 100*((data.P_plus_a1*data.P_plus_a1)/2 * tmp)/(self.P_plus_cl[temperature]*self.P_plus_cl[temperature]) self.P_minus_cl_300K = 100*((data.P_minus_a1*data.P_minus_a1)/2 * tmp)/(self.P_minus_cl[temperature]*self.P_minus_cl[temperature]) self.IP_cluster_L = np.sum((data.IP_cluster_a1*data.IP_cluster_a1)/(2*data.energy)) self.EA_cluster_L = np.sum((data.EA_cluster_a1*data.EA_cluster_a1)/(2*data.energy)) self.IP_alone_L = np.sum((data.IP_alone_a1*data.IP_alone_a1)/(2*data.energy)) self.EA_alone_L = np.sum((data.EA_alone_a1*data.EA_alone_a1)/(2*data.energy)) self.P_plus_L = np.sum((data.P_plus_a1*data.P_plus_a1)/(2*data.energy)) self.P_minus_L = np.sum((data.P_minus_a1*data.P_minus_a1)/(2*data.energy)) self.IP_cluster_G2 = np.sum((data.IP_cluster_a1*data.IP_cluster_a1)/(2)) self.EA_cluster_G2 = np.sum((data.EA_cluster_a1*data.EA_cluster_a1)/(2)) self.IP_alone_G2 = np.sum((data.IP_alone_a1*data.IP_alone_a1)/(2)) self.EA_alone_G2 = np.sum((data.EA_alone_a1*data.EA_alone_a1)/(2)) self.P_plus_G2 = np.sum((data.P_plus_a1*data.P_plus_a1)/(2)) self.P_minus_G2 = np.sum((data.P_minus_a1*data.P_minus_a1)/(2))
def k_fuel(p, B, T_K): """ Calculate fuel conductivity (W/m-K) From J.D. Hales et al. (2013) "Bison Theory" (NFIR model) """ # kelvin to celsius T = T_K - 273.15 # thermal recovery function rf = 0.5 * (1.0 + sp.tanh((T - 900.0) / 150.0)) # phonon contribution at start of thermal recovery [Hales eq. 8.14] kps = 1.0 / (0.09592 + 0.00614 * B - 0.000014 * B ** 2 + (0.00025 - 0.00000181 * B) * T) # phonon contribution at the end of thermal recovery [Hales eq. 8.15] kpend = 1.0 / (0.09592 + 0.0026 * B + (0.00025 - 0.00000027 * B) * T) # unirradiated material at 95% th. density [Hales eq. 8.17] kel = 0.0132 * sp.exp((0.00188) * T) k = (1.0 - rf) * kps + rf * kpend + kel return k
def TempProfile(z,T0=1000.,z0=100.): """ This function creates a tempreture profile using arc tan functions for test purposes. Inputs z - The altitude locations in km. T0 - The value of the lowest tempretures in K. z0 - The middle value of the atan functions along alitutude. In km. Outputs Te - The electron density profile in K. 1700*(atan((z-z0)2*exp(1)/400-exp(1))+1)/2 +T0 Ti - The ion density profile in K. 500*(atan((z-z0)2*exp(1)/400-exp(1))+1)/2 +T0 """ zall = (z-z0)*2.*sp.exp(1)/400. -sp.exp(1) atanshp = (sp.tanh(zall)+1.)/2 Te = 1700*atanshp+T0 Ti = 500*atanshp+T0 return (Te,Ti)
def __init__(self, indim, outdim, peepholes = False, name = None): nrNeurons = outdim self.peep = peepholes # internal buffers: self.ingate = zeros((0,nrNeurons)) self.outgate = zeros((0,nrNeurons)) self.forgetgate = zeros((0,nrNeurons)) self.cell = zeros((0,nrNeurons)) self.ingatex = zeros((0,nrNeurons)) self.outgatex = zeros((0,nrNeurons)) self.forgetgatex = zeros((0,nrNeurons)) self.cellx = zeros((0,nrNeurons)) self.state = zeros((0,nrNeurons)) self.ingateError = zeros((0,nrNeurons)) self.outgateError = zeros((0,nrNeurons)) self.forgetgateError = zeros((0,nrNeurons)) self.stateError = zeros((0,nrNeurons)) self.Sin = zeros((0,indim*nrNeurons)) self.Sforget = zeros((0,indim*nrNeurons)) self.Scell = zeros((0,indim*nrNeurons)) self.SinRec = zeros((0,nrNeurons*nrNeurons)) self.SforgetRec = zeros((0,nrNeurons*nrNeurons)) self.ScellRec = zeros((0,nrNeurons*nrNeurons)) Module.__init__(self, indim, outdim, name) if self.peep: ParameterContainer.__init__(self, nrNeurons*3 + (4*indim+nrNeurons)*nrNeurons) self.Sin_peep = zeros((0,nrNeurons)) self.Sforget_peep = zeros((0,nrNeurons)) self.Scell_peep = zeros((0,nrNeurons)) else: ParameterContainer.__init__(self, (4*indim+nrNeurons)*nrNeurons) self._setParameters(self.params) self._setDerivatives(self.derivs) # transfer functions and their derivatives self.f = sigmoid self.fprime = sigmoidPrime self.g = lambda x: 2*tanh(x) self.gprime = lambda x: 2*tanhPrime(x) self.h = self.g self.hprime = self.gprime
def f(self,y,psi): """transform y with f using parameter vector psi psi = [[a,b,c]] f = \sum_{terms} a * tanh(b*(y+c)) """ #1. check that number of params is consistent assert psi.shape[0]==self.n_terms, 'inconsistent parameter dimensions' assert psi.shape[1]==3, 'inconsistent parameter dimensions' #2. exponentiate the a and b (positive!) mpsi = psi.copy() mpsi[:,0:2] = SP.exp(mpsi[:,0:2]) #3. transform data z = y.copy() for i in range(len(mpsi)): a,b,c = mpsi[i] z += a*SP.tanh(b*(y+c)) return z
def genTraitEffect(self,distribution='normal'): W = SP.zeros((self.P,self.P)) if self.trait_effect=='shared': if distribution=='normal': W[0,:] = SP.random.randn(1,self.P) else: W[0,:] = genBinormal(1,self.P) elif self.trait_effect=='tanh': assert distribution=='normal', 'tanh trait effect is only implemented for normal distributed effects' X = 10*SP.linspace(0,1,self.P)-5 a = - SP.absolute(SP.random.randn()) c = - SP.absolute(SP.random.randn()) Y = SP.tanh(a*X + c) W[0,:] = Y return W
def tanh_warp(x, n, l1, l2, lw, x0): r"""Implements a tanh warping function and its derivative. .. math:: l = \frac{l_1 + l_2}{2} - \frac{l_1 - l_2}{2}\tanh\frac{x-x_0}{l_w} Parameters ---------- x : float or array of float Locations to evaluate the function at. n : int Derivative order to take. Used for ALL of the points. l1 : positive float Left saturation value. l2 : positive float Right saturation value. lw : positive float Transition width. x0 : float Transition location. Returns ------- l : float or array Warped length scale at the given locations. Raises ------ NotImplementedError If `n` > 1. """ if n == 0: return (l1 + l2) / 2.0 - (l1 - l2) / 2.0 * scipy.tanh((x - x0) / lw) elif n == 1: return -(l1 - l2) / (2.0 * lw) * (scipy.cosh((x - x0) / lw))**(-2.0) else: raise NotImplementedError( "Only derivatives up to order 1 are supported!" )
def save(data, P, M, fmt, font, **kw): coeff = float(kw.get('--tanh', 0.0)) count = "%07d" % (P) annotation = ["%02x" % (ord(c)) for c in count.lstrip('0')] if kw.get('--verbose', False): print annotation # TODO use BIOSFONT to annotate image #for n, code in enumerate([eval('0x%s' % (val)) for val in annotation]): #font.text(data, "a", (1,1)) #if kw.get('constrain', False): #image = toimage(data, cmax=256, cmin=0, mode='RGB') #else: #image = toimage(data, mode='RGB') if coeff != 0.0: temp = tanh(coeff * data) image = toimage(temp, mode='RGB') else: image = toimage(data, mode='RGB') image = toimage(zoom(image, (4.0, 4.0, 1.0), order=0), mode='RGB') filename = fmt % (count) image.save(filename) return filename
def tanhPrime(x): """ Derivative of tanh. """ tmp = tanh(x) return 1 - tmp * tmp
def activateOut(self): self.ao[:] = tanh(npsum(self.wo.T * self.ah, axis=1))