def test_zetac_special_cases(): assert sc.zetac(np.inf) == 0 assert np.isnan(sc.zetac(-np.inf)) assert sc.zetac(0) == -1.5 assert sc.zetac(1.0) == np.inf assert_equal(sc.zetac([-2, -50, -100]), -1)
def t_bol(wave, flux, freq=False): """ wave: wavelength in um unless freq is set to True. If freq is True, then wave is frequency in Hz flux: flux in Jy as always. """ import numpy as np import scipy.special as spec import astropy.constants as const # constants setup c = const.c.cgs.value h = const.h.cgs.value k = const.k_B.cgs.value # convert unit from (um, Jy) -> (Hz, erg s-1 cm-2 Hz-1) fv = np.array(flux) * 1e-23 if freq == False: freq = c / (1e-4 * np.array(wave)) else: freq = wave diff_dum = freq[1:] - freq[0:-1] freq_interpol = np.hstack((freq[0:-1] + diff_dum / 2.0, freq[0:-1] + diff_dum / 2.0, freq[0], freq[-1])) freq_interpol = freq_interpol[np.argsort(freq_interpol)[::-1]] fv_interpol = np.empty(len(freq_interpol)) # calculate the histogram style of spectrum # for i in range(0, len(fv)): if i == 0: fv_interpol[i] = fv[i] else: fv_interpol[2 * i - 1] = fv[i - 1] fv_interpol[2 * i] = fv[i] fv_interpol[-1] = fv[-1] dv = freq_interpol[0:-1] - freq_interpol[1:] dv = np.delete(dv, np.where(dv == 0)) fv = fv[np.argsort(freq)] freq = freq[np.argsort(freq)] # calculate the zeroth and first moment I1 = np.trapz(fv * freq, freq) I0 = np.trapz(fv, freq) # T_bol equation from Myers & Ladd 1993 t_bol = (spec.zetac(4) + 1) / (4 * (spec.zetac(5) + 1)) * h / k * (I1 / I0) return t_bol
def t_bol(wave, flux, freq=False): """ wave: wavelength in um unless freq is set to True. If freq is True, then wave is frequency in Hz flux: flux in Jy as always. """ import numpy as np import scipy.special as spec import astropy.constants as const # constants setup c = const.c.cgs.value h = const.h.cgs.value k = const.k_B.cgs.value # convert unit from (um, Jy) -> (Hz, erg s-1 cm-2 Hz-1) fv = np.array(flux) * 1e-23 if freq == False: freq = c/(1e-4*np.array(wave)) else: freq = wave diff_dum = freq[1:]-freq[0:-1] freq_interpol = np.hstack((freq[0:-1]+diff_dum/2.0,freq[0:-1]+diff_dum/2.0,freq[0],freq[-1])) freq_interpol = freq_interpol[np.argsort(freq_interpol)[::-1]] fv_interpol = np.empty(len(freq_interpol)) # calculate the histogram style of spectrum # for i in range(0,len(fv)): if i == 0: fv_interpol[i] = fv[i] else: fv_interpol[2*i-1] = fv[i-1] fv_interpol[2*i] = fv[i] fv_interpol[-1] = fv[-1] dv = freq_interpol[0:-1]-freq_interpol[1:] dv = np.delete(dv,np.where(dv==0)) fv = fv[np.argsort(freq)] freq = freq[np.argsort(freq)] # calculate the zeroth and first moment I1 = np.trapz(fv*freq, freq) I0 = np.trapz(fv, freq) # T_bol equation from Myers & Ladd 1993 t_bol = (spec.zetac(4)+1)/(4*(spec.zetac(5)+1))*h/k * (I1/I0) return t_bol
def main(): frequency = {} list_words = [] fp = open("test.txt", "r") for line in fp.readlines(): x, y = scanner.scan(line) for i in range(len(x)): list_words.append(x[i][1]) # print(x[i][1], end= " ") # print("") # tokenise(line) print(list_words) for word in list_words: count = frequency.get(word, 0) frequency[word] = count + 1 # for key, value in reversed(sorted(frequency.items(), key = operator.itemgetter(1))): # print(key, value) # n = 1000 # frequency = {key:value for key,value in list(frequency.items())[0:n]} #convert value of frequency to numpy array s = frequency.values() s = np.array(list(s)) # print(s2) #Calculate zipf and plot the data a = 2. # distribution parameter counts, bins, ignored = plt.hist(s, 50, density=None) x = np.arange(1., 50.) y = x**(-a) / special.zetac(a) plt.plot(x, y / max(y), linewidth=2, color='r') plt.show() print(ngram(4, list_words))
def zipfPlot(zipfs_data, N, filename): """ uses list of words and plots a zip's law graph for the N most frequents ones :param zipfs_data: list of 5000 most frequent terms in collection and their frequencies :param N: number of terms to include in model and fit :param filename: name of file to save the plot to """ freq_arr = zipfs_data[:N] x_axis = np.arange(1, N+1, 1) result = curve_fit(zipfFun, x_axis, freq_arr) a = result[0][0] b = result[0][1] fit_curve = x_axis**(-a) / zetac(b) plt.bar(x_axis, freq_arr, label="document data") plt.plot(x_axis, fit_curve, 'r-', label="fitted line", linewidth=0.5) string = ("fitting along zipf's law:" + "\nexponent parameter, s = {0:f}" + "\nnormalisation parameter, n = {1:f}" + "\n $f = 1/k^s * 1/\zeta(n)$, where k is the rank" ).format(a, b) plt.text(25, 2*10**7, string) plt.title("Zipf's Law, {} most frequent words in corpus".format(N)) plt.xlabel("word rank") plt.ylabel("word frequency") plt.legend() plt.savefig(filename+'.png', dpi=400) return
def distTotalUserApps(nbr_apps, nbr_users): a = 2. # parameter x = arange(1., nbr_apps + 1) y = x**(-a) / sps.zetac(a) result = y / sum(y) * nbr_users print sum(result) return result
def nu0_fb(tau_m, tau_s, tau_r, V_th, V_r, mu, sigma): """ Calculates stationary firing rates for filtered synapses based on Fourcaud & Brunel 2002. Parameters: ----------- tau_m: float Membrane time constant in seconds. tau_s: float Synaptic time constant in seconds. tau_r: float Refractory time in seconds. V_th_rel: float Relative threshold potential in mV. V_0_rel: float Relative reset potential in mV. mu: float Mean neuron activity in mV. sigma: Standard deviation of neuron activity in mV. Returns: -------- float: Stationary firing rate in Hz. """ alpha = np.sqrt(2) * abs(zetac(0.5) + 1) # effective threshold V_th1 = V_th + sigma * alpha / 2. * np.sqrt(tau_s / tau_m) # effective reset V_r1 = V_r + sigma * alpha / 2. * np.sqrt(tau_s / tau_m) # use standard Siegert with modified threshold and reset return nu_0(tau_m, tau_r, V_th1, V_r1, mu, sigma)
def nu0_fb(tau_m, tau_s, tau_r, V_th, V_r, mu, sigma): alpha = np.sqrt(2)*abs(zetac(0.5)+1) # effective threshold V_th1 = V_th + sigma*alpha/2.*np.sqrt(tau_s/tau_m) # effective reset V_r1 = V_r + sigma*alpha/2.*np.sqrt(tau_s/tau_m) # use standard Siegert with modified threshold and reset return nu_0(tau_m, tau_r, V_th1, V_r1, mu, sigma)
def nu0_fb(tau_m, tau_s, tau_r, V_th, V_r, mu, sigma): alpha = np.sqrt(2) * abs(zetac(0.5) + 1) # effective threshold V_th1 = V_th + sigma * alpha / 2. * np.sqrt(tau_s / tau_m) # effective reset V_r1 = V_r + sigma * alpha / 2. * np.sqrt(tau_s / tau_m) # use standard Siegert with modified threshold and reset return nu_0(tau_m, tau_r, V_th1, V_r1, mu, sigma)
def _nu0_fb433(tau_m, tau_s, tau_r, V_th_rel, V_0_rel, mu, sigma): """Helper function implementing nu0_fb433 without quantities.""" # use zetac function (zeta-1) because zeta is not giving finite values for # arguments smaller 1. alpha = np.sqrt(2.) * abs(zetac(0.5) + 1) nu0 = nu_0(tau_m, tau_r, V_th_rel, V_0_rel, mu, sigma) nu0_dPhi = _nu0_dPhi(tau_m, tau_r, V_th_rel, V_0_rel, mu, sigma) return nu0 * (1 - np.sqrt(tau_s * tau_m / 2) * alpha * nu0_dPhi)
def Zipf(frekuensi, par=2, nBins=100, yScale=0.05): count, bins, ignored = plt.hist(np.array(frekuensi), nBins, normed=True) plt.title("Zipf plot") x = np.arange(1., nBins) plt.xlabel("Frequency Rank of Token") y = x**(-par) / special.zetac(par) plt.ylabel("Absolute Frequency of Token") plt.plot(x, y * yScale / max(y), linewidth=2, color='r') plt.show()
def test_zetac(): assert_equal(sc.zetac(0), -1.5) assert_equal(sc.zetac(1.0), np.inf) # Expected values in the following were computed using # Wolfram Alpha `Zeta[x] - 1`: rtol = 1e-12 assert_allclose(sc.zetac(-2.1), -0.9972705002153750, rtol=rtol) assert_allclose(sc.zetac(0.8), -5.437538415895550, rtol=rtol) assert_allclose(sc.zetac(0.9999), -10000.42279161673, rtol=rtol) assert_allclose(sc.zetac(9), 0.002008392826082214, rtol=rtol) assert_allclose(sc.zetac(50), 8.881784210930816e-16, rtol=rtol) assert_allclose(sc.zetac(75), 2.646977960169853e-23, rtol=rtol)
def d_nu_d_mu_fb433(tau_m, tau_s, tau_r, V_th, V_r, mu, sigma): alpha = np.sqrt(2) * abs(zetac(0.5) + 1) x_th = np.sqrt(2) * (V_th - mu) / sigma x_r = np.sqrt(2) * (V_r - mu) / sigma integral = 1. / (nu_0(tau_m, tau_r, V_th, V_r, mu, sigma) * tau_m) prefactor = np.sqrt(tau_s / tau_m) * alpha / (tau_m * np.sqrt(2)) dnudmu = d_nu_d_mu(tau_m, tau_r, V_th, V_r, mu, sigma) dPhi_prime = Phi_prime_mu(x_th, sigma) - Phi_prime_mu(x_r, sigma) dPhi = Phi(x_th) - Phi(x_r) phi = dPhi_prime * integral + (2 * np.sqrt(2) / sigma) * dPhi**2 return dnudmu - prefactor * phi / integral**3
def d_nu_d_nu_in_fb(tau_m, tau_s, tau_r, V_th, V_r, j, mu, sigma): """ Derivative of nu_0 by input rate for low-pass-filtered synapses with tau_s. Effective threshold and reset from Fourcaud & Brunel 2002. Parameters: ----------- tau_m: float Membrane time constant in seconds. tau_s: float Synaptic time constant in seconds. tau_r: float Refractory time in seconds. V_th_rel: float Relative threshold potential in mV. V_0_rel: float Relative reset potential in mV. j: float Effective connectivity weight in mV. mu: float Mean neuron activity in mV. sigma: Standard deviation of neuron activity in mV. Returns: -------- float: Derivative in Hz/mV (sum of linear (mu) and squared (sigma^2) contribution). float: Derivative in Hz/mV (linear (mu) contribution). float: Derivative in Hz/mV (squared (sigma^2) contribution). """ alpha = np.sqrt(2) * abs(zetac(0.5) + 1) y_th = (V_th - mu) / sigma y_r = (V_r - mu) / sigma y_th_fb = y_th + alpha / 2. * np.sqrt(tau_s / tau_m) y_r_fb = y_r + alpha / 2. * np.sqrt(tau_s / tau_m) nu0 = nu0_fb(tau_m, tau_s, tau_r, V_th, V_r, mu, sigma) # linear contribution lin = np.sqrt(np.pi) * (tau_m * nu0)**2 * j / sigma * (np.exp(y_th_fb**2) * (1 + erf(y_th_fb)) - np.exp(y_r_fb**2) * (1 + erf(y_r_fb))) # quadratic contribution sqr = np.sqrt(np.pi) * (tau_m * nu0)**2 * j / sigma * (np.exp(y_th_fb**2) * (1 + erf(y_th_fb)) *\ 0.5 * y_th * j / sigma - np.exp(y_r_fb**2) * (1 + erf(y_r_fb)) * 0.5 * y_r * j / sigma) return lin + sqr, lin, sqr
def distTotalUserApps(nbr_apps, nbr_users, rand1=False): a = 0.83 # parameter 0.83 to 0.83 x = arange(1, nbr_apps + 1) y = x**(-a) / sp.zetac(a) x1 = ["A" + str(l) for l in range(0, nbr_apps)] result = y / sum(y) * nbr_users _result_dict_app = dict(zip(x1, result.astype(np.int64))) if rand1: return fnRandApp(_result_dict_app) else: return _result_dict_app
def f(x, s, v_shift_left, piece_boundary, a, b, v_shift_right): return np.piecewise( x, [ x < piece_boundary, x >= piece_boundary, ], [ lambda x: (x**-s) / zetac(s) + v_shift_left, lambda x: a * x**b + v_shift_right, ], )
def test_zetac(): # Expected values in the following were computed using Wolfram # Alpha's `Zeta[x] - 1` x = [-2.1, 0.8, 0.9999, 9, 50, 75] desired = [ -0.9972705002153750, -5.437538415895550, -10000.42279161673, 0.002008392826082214, 8.881784210930816e-16, 2.646977960169853e-23, ] assert_allclose(sc.zetac(x), desired, rtol=1e-12)
def d_nu_d_mu_fb433(tau_m, tau_s, tau_r, V_th_rel, V_0_rel, mu, sigma): """ Derivative of the stationary firing rates with synaptic filtering with respect to the mean input See Appendix B in Schuecker, J., Diesmann, M. & Helias, M. Reduction of colored noise in excitable systems to white noise and dynamic boundary conditions. 1–23 (2014). Parameters: ----------- tau_m: float Membrane time constant in seconds. tau_s: float Synaptic time constant in seconds. tau_r: float Refractory time in seconds. V_th_rel: float Relative threshold potential in mV. V_0_rel: float Relative reset potential in mV. mu: float Mean neuron activity in mV. sigma: Standard deviation of neuron activity in mV. Returns: -------- float: Zero frequency limit of colored noise transfer function in Hz/mV. """ pos_parameters = [tau_m, tau_s, tau_r, sigma] pos_parameter_names = ['tau_m', 'tau_s', 'tau_r', 'sigma'] check_if_positive(pos_parameters, pos_parameter_names) check_for_valid_k_in_fast_synaptic_regime(tau_m, tau_s) if sigma == 0: raise ZeroDivisionError('Function contains division by sigma!') alpha = np.sqrt(2) * abs(zetac(0.5) + 1) x_th = np.sqrt(2) * (V_th_rel - mu) / sigma x_r = np.sqrt(2) * (V_0_rel - mu) / sigma integral = 1. / (nu_0(tau_m, tau_r, V_th_rel, V_0_rel, mu, sigma) * tau_m) prefactor = np.sqrt(tau_s / tau_m) * alpha / (tau_m * np.sqrt(2)) dnudmu = d_nu_d_mu(tau_m, tau_r, V_th_rel, V_0_rel, mu, sigma) dPhi_prime = Phi_prime_mu(x_th, sigma) - Phi_prime_mu(x_r, sigma) dPhi = Phi(x_th) - Phi(x_r) phi = dPhi_prime * integral + (2 * np.sqrt(2) / sigma) * dPhi**2 return dnudmu - prefactor * phi / integral**3
def generate_zipfian_dataset(dataset_size, dataset_path): a_list = [i for i in range(dataset_size)] a = 2. x = np.arange(float(dataset_size) / 20, float(dataset_size)) y = x**(-a) / sps.zetac(a) pdf = y / y.sum() zeros = [0 for i in range(int(float(dataset_size / 20)))] pdf.tolist().extend(zeros) b_list = np.random.choice(np.arange(len(pdf)), size=dataset_size, p=pdf) b_list = [int(b) for b in b_list] random.shuffle(a_list) random.shuffle(b_list) save_doc(dataset_path, a_list, b_list, length=int(dataset_size))
def distUsersNodes(usersPerApp, dist, nbr_leafs): result = [] if (dist == "Uniform"): mu = usersPerApp sigma = 0.1 result = np.random.normal(mu, sigma, nbr_leafs) elif (dist == "Zeta"): a = 2. # parameter x = arange(1., nbr_leafs + 1) y = x**(-a) / sps.zetac(a) result = y / sum(y) * usersPerApp else: print "Put correct distr function" return return result
def skew(self): """ Distribution skewness Returns ------- float Distribution skewness Notes ----- `zetac` is the complementary Riemann zeta function (zeta function minus 1). See http://docs.scipy.org/doc/scipy/reference/generated/scipy.special.zetac.html """ return 12. * np.sqrt(6.) * (1. + zetac(3)) / np.pi ** 3
def skew(self): """ Distribution skewness Returns ------- s : float distribution skewness """ try: # zetac is the complementary Riemann zeta function (zeta function minus 1) # http://docs.scipy.org/doc/scipy/reference/generated/scipy.special.zetac.html s = -12. * np.sqrt(6.) * (1. + zetac(3)) / np.pi ** 3 return s except TypeError: print("Distribution parameters are not defined.")
def real_shifted_siegert(tau_m, tau_s, tau_r, V_th_rel, V_0_rel, mu, sigma): """ Siegert formula with shifted boundaries for the colored noise case. Introduced in Fourcaud 2002, and Schuecker 2015. """ alpha = np.sqrt(2.) * abs(zetac(0.5) + 1) k = np.sqrt(tau_s / tau_m) V_th_eff = V_th_rel + sigma * alpha * k / 2 V_0_eff = V_0_rel + sigma * alpha * k / 2 nu = real_siegert(tau_m, tau_r, V_th_eff, V_0_eff, mu, sigma) return nu
def distTotalUserApps(nbr_apps, nbr_users, rand1=False): """ Input: """ a = 2. # parameter x = arange(1, nbr_apps + 1) y = x**(-a) / sps.zetac(a) x1 = ["A" + str(l) for l in range(0, nbr_apps)] result = y / sum(y) * nbr_users _result_dict_app = dict(zip(x1, result.astype(np.int64))) # print sum(result) if rand1: return fnRandApp(_result_dict_app) else: return _result_dict_app
def nu0_fb433(tau_m, tau_s, tau_r, V_th_rel, V_0_rel, mu, sigma): """ Calcs stationary firing rates for exp PSCs Calculates the stationary firing rate of a neuron with synaptic filter of time constant tau_s driven by Gaussian noise with mean mu and standard deviation sigma, using Eq. 433 in Fourcaud & Brunel (2002) with Taylor expansion k = sqrt(tau_s/tau_m). Parameters: ----------- tau_m: float Membrane time constant in seconds. tau_s: float Synaptic time constant in seconds. tau_r: float Refractory time in seconds. V_th_rel: float Relative threshold potential in mV. V_0_rel: float Relative reset potential in mV. mu: float Mean neuron activity in mV. sigma: float Standard deviation of neuron activity in mV. Returns: -------- float: Stationary firing rate in Hz. """ alpha = np.sqrt(2.) * abs(zetac(0.5) + 1) x_th = np.sqrt(2.) * (V_th_rel - mu) / sigma x_r = np.sqrt(2.) * (V_0_rel - mu) / sigma # preventing overflow in np.exponent in Phi(s) if x_th > 20.0 / np.sqrt(2.): result = nu_0(tau_m, tau_r, V_th_rel, V_0_rel, mu, sigma) else: r = nu_0(tau_m, tau_r, V_th_rel, V_0_rel, mu, sigma) dPhi = Phi(x_th) - Phi(x_r) result = (r - np.sqrt(tau_s / tau_m) * alpha / (tau_m * np.sqrt(2)) * dPhi * (r * tau_m)**2) if math.isnan(result): print(mu, sigma, x_th, x_r) return result
def count50Unfiltered(self, collection, topic): word_counter = Counter() for obj in collection.find(): my_string = obj['text'] words = re.findall(r'\w+', my_string.lower()) # This finds words in the document and convert them in lowercase # print(words) for word in words: word_counter[word] += 1 print(word_counter.most_common(50)) ## HISTOGRAM : labels, values = zip(*word_counter.most_common(50)) indexes = np.arange(len(labels)) plt.figure(figsize=(10,5)) plt.bar(indexes, values) plt.title("50 more frequent terms (unfiltered) for: " + topic) plt.xticks(indexes, labels, size=8, rotation=70) plt.show() ## ZIPF : vals = [] for k, v in word_counter.items(): vals.append(v) print(vals) a = 2. # distribution parameter # s = np.random.zipf(a,1500) s = np.array(vals) count, bins, ignored = plt.hist(s[s<50], 50, normed=True) x = np.arange(1., 50.) y = x ** (-a) / special.zetac(a) # plt.xscale('log') # plt.yscale('log') plt.title('Zipf distribution diagram(log-log scale) for all word, from topic: ' + topic) # plt.plot(x, y / max(y), linewidth=2, color='r') plt.loglog(x, y / max(y), linewidth=2, color='r') plt.show()
def transfer_function(omega, params, mu, sigma): """Calculates transfer function of leaky-integrate and fire neuron model subjected to colored noise according to Eq. 93 in Schuecker et al. (2014) "Reduction of colored noise in excitable systems to white noise and dynamic boundary conditions" arXiv:1410.8799v3 """ taum = params['taum'] * 1e-3 taus = params['tauf'] * 1e-3 taur = params['taur'] * 1e-3 V0 = 0.0 dV = params['Vth'] - params['V0'] if omega == 0.: # print siegert.d_nu_d_mu_fb433(taum, taus, taur, dV, V0, mu, sigma) # print siegert.d_nu_d_mu_numerical(taum, taus, taur, dV, V0, mu, sigma) # return siegert.d_nu_d_mu_fb433(taum, taus, taur, dV, V0, mu, sigma) return siegert.d_nu_d_mu_numerical(taum, taus, taur, dV, V0, mu, sigma) else: nu0 = siegert.nu_0(taum, taur, dV, V0, mu, sigma) nu0_fb = siegert.nu0_fb433(taum, taus, taur, dV, V0, mu, sigma) x_t = np.sqrt(2.) * (dV - mu) / sigma x_r = np.sqrt(2.) * (V0 - mu) / sigma z = complex(-0.5, complex(omega * taum)) alpha = np.sqrt(2) * abs(zetac(0.5) + 1) k = np.sqrt(taus / taum) A = alpha * taum * nu0 * k / np.sqrt(2) def Phi_x_r(x, y): return Phi(z, x) - Phi(z, y) def dPhi_x_r(x, y): return d_Phi(z, x) - d_Phi(z, y) def d2Phi_x_r(x, y): return d_2_Phi(z, x) - d_2_Phi(z, y) a0 = Phi_x_r(x_t, x_r) a1 = dPhi_x_r(x_t, x_r) / a0 a3 = A / taum / nu0_fb * (-a1**2 + d2Phi_x_r(x_t, x_r) / a0) result = np.sqrt(2.) / sigma * nu0_fb / \ complex(1., omega * taum) * (a1 + a3) return result
def _nu0_fb(tau_m, tau_s, tau_r, V_th_rel, V_0_rel, mu, sigma): """Helper function implementing nu0_fb without quantities.""" pos_parameters = [tau_m, tau_s, tau_r, sigma] pos_parameter_names = ['tau_m', 'tau_s', 'tau_r', 'sigma'] check_if_positive(pos_parameters, pos_parameter_names) check_for_valid_k_in_fast_synaptic_regime(tau_m, tau_s) if V_th_rel < V_0_rel: raise ValueError('V_th should be larger than V_0!') # using zetac (zeta-1), because zeta is giving nan result for arguments # smaller 1 alpha = np.sqrt(2) * abs(zetac(0.5) + 1) # effective threshold # additional factor sigma is canceled in siegert V_th1 = V_th_rel + sigma * alpha / 2. * np.sqrt(tau_s / tau_m) # effective reset V_01 = V_0_rel + sigma * alpha / 2. * np.sqrt(tau_s / tau_m) # use standard Siegert with modified threshold and reset return nu_0(tau_m, tau_r, V_th1, V_01, mu, sigma)
def distUsersNodes(usersPerApp, dist, nbr_leafs): x = arange(1, nbr_leafs + 1) x1 = [l for l in range(0, nbr_leafs)] if (dist == "Uniform"): mu = usersPerApp sigma = 0.1 result = np.random.normal(mu, sigma, nbr_leafs) _result_dict_node = refineDictionary( dict(zip(x1, result.astype(np.int64))), 5) elif (dist == "Zeta"): a = 2. # parameter y = x**(-a) / sps.zetac(a) result = y / sum(y) * usersPerApp _result_dict_node = refineDictionary( dict(zip(x1, result.astype(np.int64))), 5) else: print "Put correct distribution function" return return _result_dict_node
def plot_zipf_law(self): ranks = len(self.vocabulary_tfc) values = [] # convert value of frequency to numpy array # frequency = {key: value for key, value in self.vocabulary_tfc.items()[0:1000]} frequencies = self.vocabulary_tfc for key in frequencies: values.append(frequencies[key]) s = values s = np.array(s) # Calculate zipf and plot the data a = 2. # distribution parameter count, bins, ignored = plt.hist(s[s < 20], 20, normed=True) x = np.arange(1., 50.) #y = x ** (-a) / special.zetac(a) y = x**(-a) / special.zetac(a) plt.plot(x, y / max(y), linewidth=2, color='r') #plt.show() plt.savefig(stat.path + '/Statistics/zipf_law_Graph.png')
def d_nu_d_mu_fb433(tau_m, tau_s, tau_r, V_th_rel, V_0_rel, mu, sigma): """ Derivative of the stationary firing rates with synaptic filtering with respect to the mean input See Appendix B in Schuecker, J., Diesmann, M. & Helias, M. Reduction of colored noise in excitable systems to white noise and dynamic boundary conditions. 1–23 (2014). Parameters: ----------- tau_m: float Membrane time constant in seconds. tau_s: float Synaptic time constant in seconds. tau_r: float Refractory time in seconds. V_th_rel: float Relative threshold potential in mV. V_0_rel: float Relative reset potential in mV. mu: float Mean neuron activity in mV. sigma: Standard deviation of neuron activity in mV. Returns: -------- float: Something in Hz/mV. """ alpha = np.sqrt(2) * abs(zetac(0.5) + 1) x_th = np.sqrt(2) * (V_th_rel - mu) / sigma x_r = np.sqrt(2) * (V_0_rel - mu) / sigma integral = 1. / (nu_0(tau_m, tau_r, V_th_rel, V_0_rel, mu, sigma) * tau_m) prefactor = np.sqrt(tau_s / tau_m) * alpha / (tau_m * np.sqrt(2)) dnudmu = d_nu_d_mu(tau_m, tau_r, V_th_rel, V_0_rel, mu, sigma) dPhi_prime = Phi_prime_mu(x_th, sigma) - Phi_prime_mu(x_r, sigma) dPhi = Phi(x_th) - Phi(x_r) phi = dPhi_prime * integral + (2 * np.sqrt(2) / sigma) * dPhi**2 return dnudmu - prefactor * phi / integral**3
def zipf_theory(size: int, num_ranks: int, alpha: float = 1.5) -> Line2D: """ Построение теоретического графика Закона Ципфа (Zipf's law) по заданным параметрам Аргументы: size (int): Количество слов num_ranks (int): Количество ранков слов alpha (float): Коэффициент α Вывод: plot (Line2D): График теоретического Закона Ципфа """ x = np.arange(1, num_ranks + 1) y = x**(-alpha) / special.zetac(alpha) plot = plt.plot(x, y / max(y) * size, linewidth=2, color='r', label='Теоретический закон') return plot
def transfer_function_shift(omega, params, mu, sigma): """ Calculates transfer function according to $\tilde{n}$ in [1]. The expression is to first order equivalent to `transfer_function_taylor`. Since the underlying theory is correct to first order, the two expressions are exchangeable. We add it here for completeness, but it is not used in this package. """ # convert from ms to s taum = params['taum'] * 1e-3 tauf = params['tauf'] * 1e-3 taur = params['taur'] * 1e-3 Vth = params['Vth'] V0 = params['V0'] # convert mu to absolute value (not relative to reset) mu += V0 # effective threshold and reset alpha = np.sqrt(2) * abs(zetac(0.5) + 1) Vth += sigma * alpha / 2. * np.sqrt(tauf / taum) V0 += sigma * alpha / 2. * np.sqrt(tauf / taum) # for frequency zero the exact expression is given by the derivative of # f-I-curve if np.abs(omega - 0.) < 1e-15: return siegert.d_nu_d_mu(taum, tauf, taur, Vth, V0, mu, sigma) else: nu = siegert.nu_0(taum, taur, Vth, V0, mu, sigma) x_t = np.sqrt(2.) * (Vth - mu) / sigma x_r = np.sqrt(2.) * (V0 - mu) / sigma z = complex(-0.5, complex(omega * taum)) frac = dPhi_x_r(z, x_t, x_r) / Phi_x_r(z, x_t, x_r) return np.sqrt(2.) / sigma * nu / (1. + complex(0., complex(omega * taum))) * frac
def transfer_function_taylor(omega, params, mu, sigma): """ Calculates transfer function according to Eq. 93 in [2]. The results in [3] were obtained with this expression and it is used throughout this package """ # convert from ms to s taum = params['taum'] * 1e-3 tauf = params['tauf'] * 1e-3 taur = params['taur'] * 1e-3 Vth = params['Vth'] V0 = params['V0'] # convert mu to absolute values (not relative to reset) mu += V0 # for frequency zero the exact expression is given by the derivative of # f-I-curve if np.abs(omega - 0.) < 1e-15: return siegert.d_nu_d_mu_fb433(taum, tauf, taur, Vth, V0, mu, sigma) else: nu0 = siegert.nu_0(taum, taur, Vth, V0, mu, sigma) nu0_fb = siegert.nu0_fb433(taum, tauf, taur, Vth, V0, mu, sigma) x_t = np.sqrt(2.) * (Vth - mu) / sigma x_r = np.sqrt(2.) * (V0 - mu) / sigma z = complex(-0.5, complex(omega * taum)) alpha = np.sqrt(2) * abs(zetac(0.5) + 1) k = np.sqrt(tauf / taum) A = alpha * taum * nu0 * k / np.sqrt(2) a0 = Phi_x_r(z, x_t, x_r) a1 = dPhi_x_r(z, x_t, x_r) / a0 a3 = A / taum / nu0_fb * (-a1**2 + d2Phi_x_r(z, x_t, x_r) / a0) result = np.sqrt(2.) / sigma * nu0_fb / \ complex(1., omega * taum) * (a1 + a3) return result
def nu0_fb433(tau_m, tau_s, tau_r, V_th, V_r, mu, sigma, switch_fb=-7.): """Calculates stationary firing rates for exponential PSCs using expression with taylor expansion in k = sqrt(tau_s/tau_m) (Eq. 433 in Fourcoud & Brunel 2002) """ alpha = np.sqrt(2.) * abs(zetac(0.5) + 1) x_th = np.sqrt(2.) * (V_th - mu) / sigma x_r = np.sqrt(2.) * (V_r - mu) / sigma if x_r < switch_fb: return nu0_fb(tau_m, tau_s, tau_r, V_th, V_r, mu, sigma) # preventing overflow in np.exponent in Phi(s) if x_th > 20.0 / np.sqrt(2.): result = nu_0(tau_m, tau_r, V_th, V_r, mu, sigma) else: r = nu_0(tau_m, tau_r, V_th, V_r, mu, sigma) dPhi = Phi(x_th) - Phi(x_r) result = r - np.sqrt(tau_s / tau_m) * alpha / \ (tau_m * np.sqrt(2)) * dPhi * (r * tau_m)**2 if math.isnan(result): print mu, sigma, x_th, x_r return result
def test_zetac_inf(): assert_equal(sc.zetac(np.inf), 0.0) assert_(np.isnan(sc.zetac(-np.inf)))
def f(x, a): return (x**-a)/zetac(a)
def fittingMethod(xdata, ydata, method, initialParameters=None, verbose=False): colors = ["red", "green", "blue", "purple", "orange", "magenta", "cyan", "limegreen", "gold"] ## default return values fit, params, label, color = [], [], "", "" ## prepare data xdata = np.array(xdata) ydata = np.array(ydata) ## fitting functions powerlaw = lambda x, amp, index: amp * (x**index) zipf = lambda x, a: x**(-a)/special.zetac(a) diffErr = lambda p, x, y, f: (y - f(p, x)) quadratic = lambda p, x: p[0] * (p[3]*x)**2 + p[1] * p[3]*x + p[2] quadraticErr = lambda p, x, y: abs(quadratic(p,x) -y) / abs( (quadratic(p,x) + y) / 2.) nlogn = lambda p, x: p[0] * (p[3]*x * log(p[3]*x)) + p[1] * p[3]*x + p[2] nlognErr = lambda p, x, y: abs(nlogn(p,x) -y) / abs( (nlogn(p,x) + y) / 2.) ## fitting often best done by first converting to a linear equation and then fitting to a straight line: ## y = a * x^b <=> log(y) = log(a) + b*log(x) affine = lambda p, x: p[0] + p[1] * x ## conversion functions (needed when handling probabilities) toProbabilities = lambda data: [1.*val/sum(data) for val in data] toFrequencies = lambda data, originalData: [1.*val*sum(originalData) for val in data] ## curve_fit on powerlaw if method == 0: label = "curve_fit on powerlaw" pfinal, covar = optimize.curve_fit(powerlaw, xdata, ydata) amp, index = pfinal[0], pfinal[1] fit = powerlaw(xdata, amp, index) ## curve_fit on zipf elif method == 1: y = toProbabilities(ydata) ## convert to probabilities label = "curve_fit on zipf" pfinal, covar = optimize.curve_fit(zipf, xdata, y) s = pfinal[0] fit = zipf(xdata, s) fit = toFrequencies(fit, ydata) ## restore frequencies ## polyfit on log with powerlaw elif method == 2: label = "polyfit on log with powerlaw" (a, b) = polyfit(log10(xdata), log10(ydata), 1) b = 10. ** b fit = powerlaw(xdata, b, a) ## polyfit on log with zipf elif method == 3: y = toProbabilities(ydata) ## convert to probabilities label = "polyfit on log with zipf" (a, b) = polyfit(log10(xdata), log10(y), 1) fit = zipf(xdata, -a) fit = toFrequencies(fit, ydata) ## restore frequencies ## leastsq on log with affine + powerlaw elif method == 4: label = "leastsq on log (for parameters) and powerlaw (for drawing)" logx, logy = log10(xdata), log10(ydata) pinit = [1.0, -1.0] if initialParameters == None else initialParameters out = optimize.leastsq(diffErr, pinit, args=(logx, logy, affine), full_output=1) amp, index = 10.0**out[0][0], out[0][1] fit = powerlaw(xdata, amp, index) ## leastsq on log with affine + zipf elif method == 5: y = toProbabilities(ydata) ## convert to probabilities label = "leastsq on log and zipfian model" pinit = [1.0, -1.0] if initialParameters == None else initialParameters out = optimize.leastsq(diffErr, pinit, args=(log10(xdata), log10(y), affine), full_output=1) a = out[0][1] fit = zipf(xdata, -a) fit = toFrequencies(fit, ydata) ## restore frequencies ## polyfit / polyval - 2nd degree elif method == 6: label = "polyfit / polyval - 2nd degree" params= polyfit(xdata, ydata, 2) fit = polyval(params, xdata) if verbose: print "[INFO] Modelling data using "+label+"[method "+str(method)+"]" return fit, label, (params if method == 6 else colors[method])
def test_zetac_negative_even(): pts = [-2, -50, -100] for p in pts: assert_equal(sc.zetac(p), -1)
def zipf_rank_freq(x, a): return 1./((x**a)*(1.+zetac(a)))
f = open('neutr_distr_1.dat', 'w') plt.yscale('log') plt.xscale('log') Q = 0.782318 #MeV eps_0 = 1e-10 #MeV c = 2.99792458e10 #sm / sec T_in = 1e10 #K T_fin = 3e7 tau = 880.1 #sec T_0 = 2.725 #K hpl = 1.054e-27 #erg * sec k = 1.38e-16 #erg / K const = 2 * (zetac(3) + 1) * 6.1e-10 * 30 * 1.998 * 1e20 * c / tau / Q ** 5 / pi ** 2 / 4 fin_result = [] for i in range(len(energ)): eps_0 = energ[i] def fn(x): return (k * x / hpl / c) ** 3 * spline(x) / x ** 3 * (Q - eps_0 * x / T_0) ** 2 result = quad(fn, 3e7, 1e10) fin_result.append(result[0] * const * eps_0 ** 2) f.write(str(eps_0) + ',' + ' ' + str(fin_result[i]) + '\n') fin_result_1 = [] for i in range(len(energ_1)): eps_0 = energ_1[i] def fn(x): return (k * x / hpl / c) ** 3 * spline(x) / x ** 3 * (Q - eps_0 * x / T_0) ** 2
f = open('neutr_distr_1.dat', 'w') plt.yscale('log') plt.xscale('log') Q = 0.782318 #MeV eps_0 = 1e-10 #MeV c = 2.99792458e10 #sm / sec T_in = 1e10 #K T_fin = 3e7 tau = 880.1 #sec T_0 = 2.725 #K hpl = 1.054e-27 #erg * sec k = 1.38e-16 #erg / K b_1 = 5929899500 #K const_1 = pi * 2 * (zetac(3) + 1) * 6.1e-10 * 30 * 1.998 * 1e20 * c / tau / Q ** 5 / pi ** 2 / (4 * pi) const_2 = pi * 2 * (zetac(3) + 1) * 6.1e-10 * 30 * 3.56 * 1e20 * c / tau / Q ** 5 / pi ** 2 / (4 * pi) fin_result = [] for i in range(len(energ)): eps_0 = energ[i] def fn(x): return (k * x / hpl / c) ** 3 * spline(x) / x ** 3 * (Q - eps_0 * x / T_0) ** 2 result_1 = quad(fn, 3e7, b_1) result_2 = quad(fn, b_1, 1e10) fin_result.append((result_1[0] * const_1 + result_2[0] * const_2) * eps_0 ** 2) f.write(str(eps_0) + ',' + ' ' + str(fin_result[i]) + '\n') fin_result_1 = [] for i in range(len(energ_1)): eps_0 = energ_1[i]
_tckb = interpolate.splrep(_xb, _yb) def Jb_spline(X,n=0): """Jb interpolated from a saved spline. Input is (m/T)^2.""" X = numpy.array(X, copy=False) x = X.ravel() y = interpolate.splev(x,_tckb, der=n).ravel() y[x < _xbmin] = interpolate.splev(_xbmin,_tckb, der=n) y[x > _xbmax] = 0 return y.reshape(X.shape) # Now for the low x expansion (require that n <= 50) a,b,c,d = -pi**4/45, pi*pi/12, -pi/6, -1/32. logab = 1.5 - 2*euler_gamma + 2*log(4*pi) l = numpy.arange(50)+1 g = -2*pi**3.5 * (-1)**l*(1+special.zetac(2*l+1))\ *special.gamma(l+.5)/(fac(l+2)*(2*pi)**(2*l+4)) lowCoef_b = (a,b,c,d,logab,l,g) del (a,b,c,d,logab,l,g) # clean up name space a,b,d = -7*pi**4/360, pi*pi/24, 1/32. logaf = 1.5 - 2*euler_gamma + 2*log(pi) l = numpy.arange(50)+1 g = .25*pi**3.5 * (-1)**l*(1+special.zetac(2*l+1))\ *special.gamma(l+.5)*(1-.5**(2*l+1))/(fac(l+2)*pi**(2*l+4)) lowCoef_f = (a,b,d,logaf,l,g) del (a,b,d,logaf,l,g) # clean up name space def Jb_low(x,n=20): """Jb calculated using the low-x (high-T) expansion.""" (a,b,c,d,logab,l,g) = lowCoef_b
def BB_Err2_Qv(p, nu, y, dy): Td, nu0, gam, sqtfR = p #Td, nu0, gam, fcov = p #sqtfR = sqtfR#*pc2cm nu0 = nu0*1.*10**14 Dst = 1.4*10**9#*pc2cm Lav = 6.78*10**46 print p ## make sure R is consistent with Temp there! qIR = (1./nu0)**(gam) from scipy import special as spc R = ma.sqrt( Lav / ( 2.* ma.pi * 8. * ma.pi * qIR * h/c/c * (kb/h)**(4.+gam) * spc.gamma(4+gam) * (spc.zetac(4+gam)+1.) * Td**(4+gam) ) ) if (gam < 0): chi2 = np.inf else: #Rprint = sqtfR/pc2cm #print Rprint pref = np.ones(len(nu)) for i in range(len(nu)): pref[i] = min(1., (nu[i]/nu0)**(gam)) chi = (y - pref*Bv(nu, Td)* 4.*ma.pi**(sqtfR/Dst)**2 )/ dy #chi = (y - pref*Bv(nu, Td)* fcov*(R/Dst)**2 )/ dy chi2 = sum(chi*chi) print chi2 return chi2
import numpy as np a = 2. # parameter s = np.random.zipf(a, 1000) import matplotlib.pyplot as plt import scipy.special as sps count, bins, ignored = plt.hist(s[s < 50], 50, normed=True) x = np.arange(1., 50.) y = x ** (-a) / sps.zetac(a) plt.plot(x, y / max(y), linewidth=2, color='r') plt.show()
alpha=1/137.056; mc=1.27; mb=4.8; mt=164; mw=80.399; mz=91.1876; mu=mb; muw=mw; alphasz=0.119; z=0.31; #mathematica compatibility functions: Log=np.log Pi=np.pi I=1j Zeta=lambda x: special.zetac(x)+1 ArcTan=np.arctan Sqrt=np.sqrt Abs=np.abs #funcion cinematica fz=1-8*z**2+8*z**6-z**8-24*z**4*Log(z); #(* strong coupling constant *) v=lambda y: 1-23/3*alphasz/(2*Pi)*Log(mz/y); alphas=lambda y:alphasz/v(y)*(1-116/23*alphasz/(4*Pi)*Log(v(y))/v(y)); #(* operators at mw scale *) xtw=(mt/mw)**2; yth=lambda mH: (mt/mH)**2; #(* mH=500; U2=1; U1=1; *) c20=1;
_tckb = interpolate.splrep(_xb, _yb) def Jb_spline(X,n=0): """Jb interpolated from a saved spline. Input is (m/T)^2.""" X = numpy.array(X, copy=False) x = X.ravel() y = interpolate.splev(x,_tckb, der=n).ravel() y[x < _xbmin] = interpolate.splev(_xbmin,_tckb, der=n) y[x > _xbmax] = 0 return y.reshape(X.shape) # Now for the low x expansion (require that n <= 50) a,b,c,d = -pi**4/45, pi*pi/12, -pi/6, -1/32. logab = 1.5 - 2*euler_gamma + 2*log(4*pi) l = numpy.arange(50)+1 g = (-2*pi**3.5 * (-1)**l*(1+special.zetac(2*l+1)) * special.gamma(l+.5)/(fac(l+2)*(2*pi)**(2*l+4))) lowCoef_b = (a,b,c,d,logab,l,g) del (a,b,c,d,logab,l,g) # clean up name space a,b,d = -7*pi**4/360, pi*pi/24, 1/32. logaf = 1.5 - 2*euler_gamma + 2*log(pi) l = numpy.arange(50)+1 g = (.25*pi**3.5 * (-1)**l*(1+special.zetac(2*l+1)) * special.gamma(l+.5)*(1-.5**(2*l+1))/(fac(l+2)*pi**(2*l+4))) lowCoef_f = (a,b,d,logaf,l,g) del (a,b,d,logaf,l,g) # clean up name space def Jb_low(x,n=20): """Jb calculated using the low-x (high-T) expansion."""