def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1): p1 = sum(vec2Classify * p1Vec) + log(pClass1) #element-wise mult p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1) # print "classifying" if p1 > p0: return 1 else: return 0
def test(self, img): eps = 1e-9 X = self.img2feat(img) pred_prob = np.zeros(self.class_num) for i in range(self.class_num): pred_prob[i] = -0.5 * (X - self.mean[i]).dot( np.linalg.pinv(self.cov[i])).dot(X - self.mean[i]) pred_prob[i] += -0.5 * log(np.linalg.det(self.cov[i]) + eps) pred_prob[i] += 0.5 * log(self.pw[i]) pred_class = np.argmax(pred_prob) return pred_class
def get_gap_statistics(data, refs=None, nrefs=30, ks=range(1, 11)): # calculating distance dst = scipy.spatial.distance.euclidean # defining shape shape = data.shape # checking condition for given refs if refs == None: tops = data.max(axis=0) bots = data.min(axis=0) dists = scipy.matrix(np.diag(tops - bots)) rands = scipy.random.random_sample(size=(shape[0], shape[1], nrefs)) for i in range(nrefs): rands[:, :, i] = rands[:, :, i] * dists + bots else: rands = refs gaps = np.zeros((len(ks), )) errors = np.zeros((len(ks), )) labels = dict((el, []) for el in ks) for (i, k) in enumerate(ks): (kmc, kml) = scipy.cluster.vq.kmeans2(data, k) disp = sum([dst(data[m, :], kmc[kml[m], :]) for m in range(shape[0])]) labels[k] = kml refdisps = np.zeros((rands.shape[2], )) for j in range(rands.shape[2]): (kmc, kml) = scipy.cluster.vq.kmeans2(rands[:, :, j], k) refdisps[j] = sum( [dst(rands[m, :, j], kmc[kml[m], :]) for m in range(shape[0])]) # Computing gaps gaps[i] = scimath.log(np.mean(refdisps)) - scimath.log(disp) # Computing errors errors[i] = scimath.sqrt( sum(((scimath.log(refdisp) - np.mean(scimath.log(refdisps)))**2) for refdisp in refdisps) / float(nrefs)) * scimath.sqrt(1 + 1 / nrefs) xval = range(1, len(gaps) + 1) yval = gaps plt.errorbar(xval, yval, xerr=None, yerr=errors) plt.xlabel('K Clusters') plt.ylabel('Gap_Statistics') plt.title('Gap Statistics for : nref={}'.format(nrefs)) plt.show() return
def __init__(self, name="sech", position=0.0, width=10.0, peak_power=1e-3, offset_nu=0.0, m=0, C=0.0, initial_phase=0.0, channel=0, using_fwhm=False): if not (-0.5 <= position <= 0.5): raise OutOfRangeError( "position is out of range. Must be in [-0.5, 0.5]") if not (1e-3 < width < 1e3): raise OutOfRangeError( "width is out of range. Must be in (1e-3, 1e3)") if not (0.0 <= peak_power < 1e9): raise OutOfRangeError( "peak_power is out of range. Must be in [0.0, 1e9)") if not (-200.0 < offset_nu < 200.0): raise OutOfRangeError( "offset_nu is out of range. Must be in (-200.0, 200.0)") if not (-1e3 < C < 1e3): raise OutOfRangeError("C is out of range. Must be in (-1e3, 1e3)") if not (0.0 <= initial_phase < 2.0 * pi): raise OutOfRangeError( "initial_phase is out of range. Must be in [0.0, 2.0 * pi)") if not (0 <= channel < 2): raise OutOfRangeError("channel is out of range. Must be in [0, 2)") if int(channel) != channel: raise NotIntegerError("channel must be an integer") self.name = name self.position = position self.width = width # ps self.peak_power = peak_power # W self.offset_nu = offset_nu # THz self.C = C # rad self.initial_phase = initial_phase self.channel = channel self.fwhm = None # For a FWHM pulse width, store then convert to a HWIeM pulse width: if using_fwhm: self.fwhm = width # store fwhm pulse width self.width *= 0.5 / log(1.0 + sqrt(2.0)) self.field = None
def trainNB0(trainMatrix, trainCategory): numTrainDocs = len(trainMatrix) numWords = len(trainMatrix[0]) pCi = sum(trainCategory) / float(numTrainDocs) p0Num = ones(numWords) p1Num = ones(numWords) #change to ones() p0Denom = 2.0 p1Denom = 2.0 #change to 2.0 for i in range(numTrainDocs): if trainCategory[i] == 1: p1Num += trainMatrix[i] p1Denom += sum(trainMatrix[i]) else: p0Num += trainMatrix[i] p0Denom += sum(trainMatrix[i]) p1Vect = log(p1Num / p1Denom) #change to log() p0Vect = log(p0Num / p0Denom) #change to log() #print "training" return p0Vect, p1Vect, pCi
def check_log_probability(self, model): samples_count = 100 for _ in range(0, samples_count): coeffs = randn(model.GetNumberOfPrincipalComponents()) s = model.DrawSample(coeffs) p = model.ComputeProbability(s) lp = model.ComputeLogProbability(s) self.assertTrue( log(p) - lp < 0.05, "Log probability should roughtly equal the log of the probability" )
def bs_put(S, X, T, rf, sigma): """ Black-Scholes-Merton option model put S: current stock price X: exercise price T: maturity date in years rf: risk-free rate (continusouly compounded) sigma: volatility of underlying security """ d1 = (log(S / X) + (rf + sigma * sigma / 2.) * T) / (sigma * sqrt(T)) d2 = d1 - sigma * sqrt(T) return -S * stats.norm.cdf(-d1) + X * exp(-rf * T) * stats.norm.cdf(-d2)
def extract_fit_parameters(self, analysis_type, sweep_values): ''' Curve fit. ''' log_x = True function = linear_func if analysis_type == "words": log_y = True elif analysis_type == "characters": log_y = False if sweep_values: array = list(zip(*sweep_values)) if log_x: xarr = log(array[0]) else: xarr = array[0] if log_y: yarr = log(array[1]) else: yarr = array[1] initial_a = 0 initial_b = 0 popt, pcov = curve_fit(function, xarr, yarr, (initial_a, initial_b)) slope = popt[0] intercept = popt[1] perr = np.sqrt(np.diag(pcov)) std_error_slope = perr[0] std_error_intercept = perr[1] fit = {'samples': len(sweep_values), 'intercept': intercept, 'slope': slope, 'std_error_intercept': std_error_intercept, 'std_error_slope': std_error_slope} setattr(self, analysis_type + "_fit", fit)
def _covgc(x, lag, dt, ind_t, p): logger.info(" Compute pair (%i, %i)" % (p[0], p[1])) # Extract data for a given pair of sources x_ = np.squeeze(x[p[0], ind_t]).reshape(lag + 1, dt) y_ = np.squeeze(x[p[1], ind_t]).reshape(lag + 1, dt) # --------------------------------------------------------------------- # Conditional Entropies # --------------------------------------------------------------------- # h_ycy : H(Y_i+1|Y_i) = H(Y_i+1) - H(Y_i) det_yi1 = det(np.cov(y_)) det_yi = det(np.cov(y_[1::, :])) h_ycy = log(det_yi1) - log(det_yi) # h_ycx : H(Y_i+1|X_i,Y_i) = H(Y_i+1,X_i,Y_i) - H(X_i,Y_i) det_yxi1 = det(np.cov(np.r_[y_, x_[1::, :]])) det_yxi = det(np.cov(np.r_[y_[1::, :], x_[1::, :]])) h_ycx = log(det_yxi1) - log(det_yxi) # h_xcx : H(X_i+1|X_i) = H(X_i+1) - H(X_i) det_xi1 = det(np.cov(x_)) det_xi = det(np.cov(x_[1::, :])) h_xcx = log(det_xi1) - log(det_xi) # h_xcy : H(X_i+1|X_i,Y_i) = H(X_i+1,X_i,Y_i) - H(X_i,Y_i) det_xyi1 = det(np.cov(np.r_[x_, y_[1::, :]])) h_xcy = log(det_xyi1) - log(det_yxi) # h_xxcyy: H(X_i+1,Y_i+1|X_i,Y_i) = H(X_i+1,Y_i+1,X_i,Y_i) - H(X_i,Y_i) det_xyi1 = det(np.cov(np.r_[x_, y_])) h_xxcyy = log(det_xyi1) - log(det_yxi) # --------------------------------------------------------------------- # Causality measures # --------------------------------------------------------------------- gc = np.zeros((3,), dtype=complex) gc[0] = h_ycy - h_ycx # gc[pairs[:, 0] -> pairs[:, 1]] gc[1] = h_xcx - h_xcy # gc[pairs[:, 1] -> pairs[:, 0]] gc[2] = h_ycx + h_xcy - h_xxcyy # gc[x_.y_] return gc
def pot_param(param): """ Calculates potential parameters in Chulkov model. """ global alat,a10,a20,a1,a2,a3,z1,z_im,g0,alph,beta,lamb alat,a10,a1,a2,beta=param h_to_ev=27.2116 a10=a10/h_to_ev a1=a1/h_to_ev a2=a2/h_to_ev g0=2.0*np.pi/alat a20=a2-a1-a10 z1=5.0*np.pi/(4.0*beta) a3=-a20+a2*np.cos(5.0*np.pi/4.0) alph=beta*a2*np.sin(5.0*np.pi/4.0)/a3 lamb=2.0*alph z_im=z1-log(-lamb/(4.0*a3))/alph return -a10, a1, z_im
def __init__(self, name="filter", width_nu=0.1, offset_nu=0.0, m=1, channel=0, using_fwhm=False, type_filt="reflected"): if not (1e-6 < width_nu < 1e3): raise OutOfRangeError( "width_nu is out of range. Must be in (1e-6, 1e3)") if not (-200.0 < offset_nu < 200.0): raise OutOfRangeError( "offset_nu is out of range. Must be in (-200.0, 200.0)") if not (0 < m < 50): raise OutOfRangeError("m is out of range. Must be in (0, 50)") if not (0 <= channel < 2): raise OutOfRangeError("channel is out of range. Must be in [0, 2)") if int(m) != m: raise NotIntegerError("m must be an integer") if int(channel) != channel: raise NotIntegerError("channel must be an integer") self.name = name self.width_nu = width_nu self.offset_nu = offset_nu self.m = m self.channel = channel self.fwhm_nu = None self.type = type_filt # For a FWHM filter width, store then convert to a HWIeM filter width: if using_fwhm: self.fwhm_nu = width_nu # store fwhm filter width self.width_nu *= 0.5 / power(log(2.0), 1.0 / (2 * m)) self.shape = None self.field = None
def _entropy_relative(rho, sigma, base=e, sparse=False): """ ****NEEDS TO BE WORKED ON**** Calculates the relative entropy S(rho||sigma) between two density matrices. Parameters ---------- rho : qobj First density matrix. sigma : qobj Second density matrix. base : {e,2} Base of logarithm. Returns ------- rel_ent : float Value of relative entropy. """ if rho.type != 'oper' or sigma.type != 'oper': raise TypeError("Inputs must be density matrices..") # sigma terms svals = sp_eigs(sigma.data, sigma.isherm, vecs=False, sparse=sparse) snzvals = svals[svals != 0] if base == 2: slogvals = log2(snzvals) elif base == e: slogvals = log(snzvals) else: raise ValueError("Base must be 2 or e.") # rho terms rvals = sp_eigs(rho.data, rho.isherm, vecs=False, sparse=sparse) rnzvals = rvals[rvals != 0] # calculate tr(rho*log sigma) rel_trace = float(real(sum(rnzvals * slogvals))) return -entropy_vn(rho, base, sparse) - rel_trace
def kernel_fredriksen(n): """ Generates kernel for Hilbert transform using FFT. Parameters ---------- n : int Number of equidistant grid points. Returns ------- ndarray Kernel used when performing Hilbert transform using FFT. """ aux = np.zeros(n + 1, dtype=doublenp) for i in range(1, n + 1): aux[i] = i * log(i) m = 2 * n ker = np.zeros(m, dtype=doublenp) for i in range(1, n): ker[i] = aux[i + 1] - 2 * aux[i] + aux[i - 1] ker[m - i] = -ker[i] return fft(ker) / pi
def optimal_bayes_decisions(llr, pi1, Cfn, Cfp, threshold=None): """ Computes optimal Bayes decisions starting from the binary log-likelihoods ratios llr is the array of log-likelihoods ratios pi1 is the prior class probability of class 1 (True) Cfp = C1,0 is the cost of false positive errors, that is the cost of predicting class 1 (True) when the actual class is 0 (False) Cfn = C0,1 is the cost of false negative errors that is the cost of predicting class 0 (False) when the actual class is 1 (True) """ # initialize an empty array for predictions of samples predictions = np.empty(llr.shape, int) # compare the log-likelihood ratio with threshold to predict the class # if the threshold is not specified use the theoretical optimal threshold if (threshold == None): threshold = - log((pi1 * Cfn) / ((1 - pi1) * Cfp)) for i in range(llr.size): if llr[i] > threshold: predictions[i] = 1 else: predictions[i] = 0 return predictions
def phi(x, Dp, Dm, sign=1): """ Calculates the phi function, i.e. eq. C4 in PRB 78, 235424, 2008. Parameters ---------- x : float Energy. Dp : float Bandwidth (positive energy) over temperature Dm : float Bandwidth (negative energy) over temperature sign : int Sign factor to be multiplied with the function Returns ------- double real part of the function value """ Z = 0.5 + x / (2 * np.pi) * 1j ret = sign * (-digamma(Z).real + log(0.5 * (abs(Dp) + abs(Dm)) / (2.0 * pi))) return ret
def entropy_vn(rho, base=e, sparse=False): """ Von-Neumann entropy of density matrix Parameters ---------- rho : qobj Density matrix. base : {e,2} Base of logarithm. sparse : {False,True} Use sparse eigensolver. Returns ------- entropy : float Von-Neumann entropy of `rho`. Examples -------- >>> rho=0.5*fock_dm(2,0)+0.5*fock_dm(2,1) >>> entropy_vn(rho,2) 1.0 """ if rho.type == 'ket' or rho.type == 'bra': rho = ket2dm(rho) vals = sp_eigs(rho.data, rho.isherm, vecs=False, sparse=sparse) nzvals = vals[vals != 0] if base == 2: logvals = log2(nzvals) elif base == e: logvals = log(nzvals) else: raise ValueError("Base must be 2 or e.") return float(real(-sum(nzvals * logvals)))
def calculate_fwhm(self): """ Convert a HWIeM width to a FWHM width. """ if self.fwhm_nu is not None: return self.fwhm_nu else: return self.width_nu * 2.0 * power(log(2.0), 1.0 / (2 * self.m))
def _log(self, tex): if not tex['lower_index']: return scimath.log( self.compute(tex['content']) ) return scimath.log(self.compute(tex['content'])) / np.log(self.compute(tex['lower_index']))
import math from numpy.lib import scimath scimath.log(-math.exp(1)) == (1 + 1j * math.pi)
def word_given_topic(topic, vocab=vocab_size, prior=0.5, burn_in=500): postburn = test100.direct_samples[burn_in:,] subsets = postburn[np.where(postburn==topic)] posterior = prior*np.ones(vocab) for i in range(vocab): posterior[i] = posterior[i] + len(subsets[subsets==i]) return(posterior/posterior.sum()) p_w_given_t = np.apply_along_axis(word_given_topic, 1, np.arange(50).reshape(-1,1)) p_w_given_d = p_w_given_t.T @ p_t_given_d.T p_w_train = (X_train @ p_w_given_d).sum(axis=1) perplexity_train = np.exp(-1/(p_w_train.shape[0])*(sci.log(p_w_train)).sum()) p_w_test = (X_test @ p_w_given_d).sum(axis=1) perplexity_test = np.exp(-1/(p_w_test.shape[0])*(sci.log(p_w_test)).sum()) #Reuters Xreuters_200, jreuters_200 = get_reuters(max_docs=200, min_word_count=10) cutoff = np.where(jreuters_200==100)[0][0] Xr_train, jr_train, Xr_test, jr_test = np.array(Xreuters_200)[:cutoff, :], jreuters_200[:cutoff], np.array(Xreuters_200)[cutoff:,:], jreuters_200[cutoff:] vocab_size = Xr_train.shape[1] %time testr100 = HDP(f='categorical_fast', hypers=(50, 0.5*np.ones(50))).gibbs_direct(np.array(Xr_train), jr_train, iters=2000, Kmax=50)
def log(tex): if not tex['lower_index']: return scimath.log(tex['content']) return scimath.log(tex['content']) / scimath.log(tex['lower_index'])
return (1. - x) * m**2 + x * (x - 1.) * m**2 + x * t def d(x, y, t): return b(x, y)**2 - 4. * a(y) * c(x, y, t) def ym(x, y, t): return (b(x, y) - scimath.sqrt(d(x, y, t))) / (2. * a(y)) def yp(x, y, t): return (b(x, y) + scimath.sqrt(d(x, y, t))) / (2. * a(y)) norm = complex_quad((lambda x: (1. / scimath.sqrt(d(x, n, t))) * (scimath.log( (1. - x + ym(x, n, t)) / ym(x, n, t)) - scimath.log( (1. - x + yp(x, n, t)) / yp(x, n, t)))), 0, 1) print(norm) for i in range(len(s)): e = s[i] val = complex_quad( (lambda x: (1. / scimath.sqrt(d(x, e, t))) * (scimath.log( (1. - x + ym(x, e, t)) / ym(x, e, t)) - scimath.log( (1. - x + yp(x, e, t)) / yp(x, e, t)))), 0, 1) Re[i] = val[0] Im[i] = val[1] plt.xlabel(r'$\sqrt{s}/m_q$') plt.ylabel(r'$\mathcal{M}(s)/\mathcal{M}(0)$') plt.plot(scimath.sqrt(s) / m_pi, Re / norm[0], label='real part')
def log_log_func(variable, coefficient, intercept): ''' Log-log model. ''' return math.e**(coefficient*log(variable) + intercept)
def log_func(variable, coefficient, x_intercept): ''' Logarithmic model. ''' return coefficient*log(variable) + x_intercept
def test_JtJ(self): """ Test that JtJ calculation doesn't crash """ jtj = m2.GetJandJtJInLogParameters(log(params))
def log(tex): if not tex['lower_index']: return scimath.log( tex['content'] ) return scimath.log(tex['content']) / scimath.log(tex['lower_index'])
def func_1vN(Ecb, mu, T, Dm, Dp, itype, limit): """ Function used when generating 1vN, Redfield approach kernel. Parameters ---------- Ecb : float Energy. mu : float Chemical potential. T : float Temperature. Dm,Dp : float Bandwidth. itype : int | Type of integral for first order approach calculations. | itype=0: the principal parts are evaluated using Fortran integration package QUADPACK \ routine dqawc through SciPy. | itype=1: the principal parts are kept, but approximated by digamma function valid for \ large bandwidth D. | itype=2: the principal parts are neglected. | itype=3: the principal parts are neglected and infinite bandwidth D is assumed. limit : int For itype=0 dqawc_limit determines the maximum number of sub-intervals in the partition of the given integration interval. Returns ------- ndarray | Array of four complex numbers [cur0, cur1, en0, en1] containing momentum-integrated current amplitudes. | cur0 - particle current amplitude. | cur1 - hole current amplitude. | en0 - particle energy current amplitude. | en1 - hol energy current amplitude. """ if itype == 0: alpha, Rm, Rp = (Ecb - mu) / T, (Dm - mu) / T, (Dp - mu) / T cur0, err = quad(fermi_func, Rm, Rp, weight='cauchy', wvar=alpha, epsabs=1.0e-6, epsrel=1.0e-6, limit=limit) cur0 = cur0 + (-1.0j * pi * fermi_func(alpha) if Rm < alpha < Rp else 0) cur1 = cur0 + log(abs((Rm - alpha) / (Rp - alpha))) cur1 = cur1 + (1.0j * pi if Rm < alpha < Rp else 0) # const0 = T * ((-Rm if Rm < -40 else log(1 + exp(-Rm))) - (-Rp if Rp < -40 else log(1 + exp(-Rp)))) const1 = const0 + Dm - Dp # en0 = const0 + Ecb * cur0 en1 = const1 + Ecb * cur1 elif itype == 1: alpha, Rm, Rp = (Ecb - mu) / T, Dm / T, Dp / T cur0 = digamma(0.5 + 1.0j * alpha / (2 * pi)).real - log(abs(Rm) / (2 * pi)) cur0 = cur0 - 1.0j * pi * fermi_func(alpha) cur1 = cur0 + log(abs(Rm / Rp)) cur1 = cur1 + 1.0j * pi # en0 = -T * Rm + Ecb * cur0 en1 = -T * Rp + Ecb * cur1 elif itype == 2: alpha, Rm, Rp = (Ecb - mu) / T, (Dm - mu) / T, (Dp - mu) / T cur0 = -1.0j * pi * fermi_func(alpha) if Rm < alpha < Rp else 0 cur1 = cur0 + (1.0j * pi if Rm < alpha < Rp else 0) en0 = Ecb * cur0 en1 = Ecb * cur1 elif itype == 3: alpha = (Ecb - mu) / T cur0 = -1.0j * pi * fermi_func(alpha) cur1 = cur0 + 1.0j * pi en0 = Ecb * cur0 en1 = Ecb * cur1 else: cur0, cur1, en0, en1 = 0, 0, 0, 0 # ------------------------- return np.array([cur0, cur1, en0, en1])
def calculate_fwhm(self): """ Convert a HWIeM width to a FWHM width. """ if self.fwhm is not None: return self.fwhm else: return self.width * 2.0 * log(1.0 + sqrt(2.0))