def model_two_basis_functions(): """ A test function that returns a model similar to model(), except that it uses the shapelet basis functions as the surface brightness and does not normalize. """ data = empty((nepochs, grid_size, grid_size)) beta2 = beta ** 2 for t, z in star_track(nepochs): if t == 0: x = raytrace() else: x = raytrace(rE_true, z) n = 0 K1 = 1.0 / sqrt(2 ** n * sqrt(pi) * factorial(n, 1) * beta) H1 = hermite(n) data[t] = (K1 * H1(x.real / beta) * exp(-x.real ** 2 / (2 * beta2))) * ( K1 * H1(x.imag / beta) * exp(-x.imag ** 2 / (2 * beta2)) ) # data[t] *= 100 # n = 1 # K1 = 1.0/sqrt(2**n * sqrt(pi) * factorial(n,1) * beta) # H1 = hermite(n) # # data[t] += (K1 * H1(x.real/beta) * exp(-x.real**2/(2*beta2))) * \ # (K1 * H1(x.imag/beta) * exp(-x.imag**2/(2*beta2))) return data
def save_bessel_functions(N): """Generate N 2D shapelets and plot.""" beta2 = beta ** 2 B = empty((grid_size, grid_size)) # Don't want matrix behaviour here # --------------------------------------------------------------------------- # Basis function constants, and hermite polynomials # --------------------------------------------------------------------------- vals = [[n, 1.0 / sqrt((2 ** n) * sqrt(pi) * factorial(n, 1) * beta), 0, 0, 0] for n in xrange(N)] expreal = exp(-theta.real ** 2 / (2 * beta2)) expimag = exp(-theta.imag ** 2 / (2 * beta2)) for n, K, H, _, _ in vals: vals[n][3] = K * jn(n, theta.real) * expreal vals[n][4] = K * jn(n, theta.imag) * expimag pylab.figure() l = 0 for v1 in vals: for v2 in vals: B = v1[3] * v2[4] pylab.subplot(N, N, l + 1) pylab.axis("off") pylab.imshow(B.T) l += 1 pylab.suptitle("Shapelets N=%i Beta=%.4f" % (N, beta)) # pylab.savefig("B%i.png" % N) pylab.show()
def integrand(bt, x): global a, st, G, lbd, ym, kp, c, var fun=-1 if var==1: fun=polint(bt)*cos(bt*x)*(lbd+2*G)*(exp(bt*b)*(kp**2-1-2*bt*b+2*kp*bt*b)+exp(-bt*b)*(kp**2-1+2*bt*b-2*kp*bt*b))/((kp-1)*(lbd*(kp*exp(2*bt*b)+kp*exp(-2*bt*b)+2)+G*kp*(exp(2*bt*b)+exp(-2*bt*b)))+2*G*(kp**2+kp+4*bt**2 * b **2 -2)) elif var==2: fun=polint(bt)*cos(bt*x)*(G*(exp(bt*b)*(kp**2-kp+2*kp*bt*b)-exp(-bt*b)*(kp**2-kp-2*kp*bt*b))+lbd*(kp**2-kp)*(exp(bt*b)-exp(-bt*b)))/((kp**2-kp)*(lbd+G)*sinh(2*bt*b)+4*kp*G*b*bt) return fun
def calculate_ideogram(ages, errors, n=500): from numpy import array, linspace, zeros, ones from numpy.core.umath import exp from math import pi ages, errors = array(ages), array(errors) lages = ages - errors * 2 uages = ages + errors * 2 xmax, xmin = uages.max(), lages.min() spread = xmax - xmin xmax += spread * 0.1 xmin -= spread * 0.1 bins = linspace(xmin, xmax, n) probs = zeros(n) for ai, ei in zip(ages, errors): if abs(ai) < 1e-10 or abs(ei) < 1e-10: continue # calculate probability curve for ai+/-ei # p=1/(2*pi*sigma2) *exp (-(x-u)**2)/(2*sigma2) # see http://en.wikipedia.org/wiki/Normal_distribution ds = (ones(n) * ai - bins) ** 2 es = ones(n) * ei es2 = 2 * es * es gs = (es2 * pi) ** -0.5 * exp(-ds / es2) # cumulate probabilities # numpy element_wise addition probs += gs return tuple(bins), tuple(probs), xmin, xmax
def sigmoid(inX): ''' Sigmoid函数 :param inX: :return: 参数 ''' return 1.0 / (1 + exp(-inX))
def ridgeTest(xArr, yArr): ''' 计算30不同的参数lam 所对应的回归系数 数据标准化处理:所有的特征都减去各自的均值并除以方差; :param xArr: 输入值 :param yArr: 真实值 :return: ''' xMat = mat(xArr) yMat = mat(yArr).T # mean()计算均值 xMean = mean(xMat, 0) yMean = mean(yMat, 0) xVar = var(xMat, 0) # var() 计算方差 xMat = (xMat - xMean) / xVar yMat = yMat - yMean numTestPts = 30 # 迭代次数 wMat = zeros((numTestPts, shape(xMat)[1])) # 返回矩阵 for i in range(numTestPts): ws = ridgeRegres(xMat, yMat, exp(i - 10)) # 计算回归系数,指数级 wMat[i, :] = ws.T return wMat
def lwlr(testPoint, xArr, yArr, k=1.0): ''' 局部加权线性回归, 回归系数计算公式:w = (X^TWX)^(-1)X^TWy 高斯核计算公式:w(i, i) = exp{[x^(i) - x] / (-2 * k^2)} :param testPoint: 坐标点 :param xArr: 输入值 :param yArr: 真实值 :param k: 高斯核参数,用户自定义 :return: ''' xMat = mat(xArr) yMat = mat(yArr).T m = shape(xMat)[0] weights = mat(eye((m))) # 初始化权重矩阵 for j in range(m): diffMat = testPoint - xMat[j, :] weights[j, j] = exp(diffMat * diffMat.T / (-2.0 * k ** 2)) # 高斯核 xTx = xMat.T * (weights * xMat) if linalg.det(xTx) == 0.0: # 判断矩阵是否可逆 print "This matrix is singular, cannot do inverse" return ws = xTx.I * (xMat.T * (weights * yMat)) return testPoint * ws
def profile_exponential(R, Ie=0.40, Rd=0.50): return Ie * exp(-abs(R) / Rd) beta = 0.20 # arcsec - Basis function normalization Nbases = 25 # sqrt(Number of basis functions) grid_phys = 2.0 # arcsec - Physical size across grid grid_radius = 71 # pixels grid_size = 2 * grid_radius + 1 # pixels cell_size = grid_phys / grid_size # arcsec/pixel
def pseudo_peak(center, start, stop, step, magnitude=500, peak_width=0.008): x = linspace(start, stop, step) gaussian = lambda x: magnitude * exp(-((center - x) / peak_width) ** 2) for i, d in enumerate(gaussian(x)): if abs(center - x[i]) < peak_width: # d = magnitude d = magnitude + magnitude / 50.0 * random.random() yield d
def pseudo_peak(center, start, stop, step, magnitude=500, peak_width=0.004, channels=1): x = linspace(start, stop, step) gaussian = lambda x: magnitude * exp(-((center - x) / peak_width) ** 2) for i, d in enumerate(gaussian(x)): if abs(center - x[i]) < peak_width: # d = magnitude # for j in xrange(channels): d = magnitude + magnitude / 50.0 * random.random() yield [d * (j + 1) for j in range(channels)]
def test_priority(self): class A(object): def __array__(self): return np.zeros(1) def __array_wrap__(self, arr, context): r = type(self)() r.arr = arr r.context = context return r class B(A): __array_priority__ = 20.0 class C(A): __array_priority__ = 40.0 x = np.zeros(1) a = A() b = B() c = C() f = ncu.minimum self.assertTrue(type(f(x, x)) is np.ndarray) self.assertTrue(type(f(x, a)) is A) self.assertTrue(type(f(x, b)) is B) self.assertTrue(type(f(x, c)) is C) self.assertTrue(type(f(a, x)) is A) self.assertTrue(type(f(b, x)) is B) self.assertTrue(type(f(c, x)) is C) self.assertTrue(type(f(a, a)) is A) self.assertTrue(type(f(a, b)) is B) self.assertTrue(type(f(b, a)) is B) self.assertTrue(type(f(b, b)) is B) self.assertTrue(type(f(b, c)) is C) self.assertTrue(type(f(c, b)) is C) self.assertTrue(type(f(c, c)) is C) self.assertTrue(type(ncu.exp(a) is A)) self.assertTrue(type(ncu.exp(b) is B)) self.assertTrue(type(ncu.exp(c) is C))
def check_priority(self): class A(object): def __array__(self): return zeros(1) def __array_wrap__(self, arr, context): r = type(self)() r.arr = arr r.context = context return r class B(A): __array_priority__ = 20.0 class C(A): __array_priority__ = 40.0 x = zeros(1) a = A() b = B() c = C() f = minimum self.failUnless(type(f(x, x)) is ndarray) self.failUnless(type(f(x, a)) is A) self.failUnless(type(f(x, b)) is B) self.failUnless(type(f(x, c)) is C) self.failUnless(type(f(a, x)) is A) self.failUnless(type(f(b, x)) is B) self.failUnless(type(f(c, x)) is C) self.failUnless(type(f(a, a)) is A) self.failUnless(type(f(a, b)) is B) self.failUnless(type(f(b, a)) is B) self.failUnless(type(f(b, b)) is B) self.failUnless(type(f(b, c)) is C) self.failUnless(type(f(c, b)) is C) self.failUnless(type(f(c, c)) is C) self.failUnless(type(exp(a) is A)) self.failUnless(type(exp(b) is B)) self.failUnless(type(exp(c) is C))
def _iwamoto_step(Ybus, J, F, dx, pvpq, pq, createJ, pvpq_lookup, npv, npq, numba, dVa, dVm, Vm, Va, pv, j1, j2, j3, j4, j5, j6): if npv: dVa[pv] = dx[j1:j2] if npq: dVa[pq] = dx[j3:j4] dVm[pq] = dx[j5:j6] dV = dVm * exp(1j * dVa) if not (dV == 0.0).any(): iwa_multiplier = _get_iwamoto_multiplier(Ybus, J, F, dV, dx, pvpq, pq, createJ, pvpq_lookup, npv, npq, numba) else: iwa_multiplier = 1.0 Vm += iwa_multiplier * dVm Va += iwa_multiplier * dVa return Vm, Va
def gauss_spline(x, n): """Gaussian approximation to B-spline basis function of order n. Parameters ---------- n : int The order of the spline. Must be nonnegative, i.e., n >= 0 References ---------- .. [1] Bouma H., Vilanova A., Bescos J.O., ter Haar Romeny B.M., Gerritsen F.A. (2007) Fast and Accurate Gaussian Derivatives Based on B-Splines. In: Sgallari F., Murli A., Paragios N. (eds) Scale Space and Variational Methods in Computer Vision. SSVM 2007. Lecture Notes in Computer Science, vol 4485. Springer, Berlin, Heidelberg """ signsq = (n + 1) / 12.0 return 1 / sqrt(2 * pi * signsq) * exp(-x**2 / 2 / signsq)
def gauss_spline(x, n): """Gaussian approximation to B-spline basis function of order n. Parameters ---------- n : int The order of the spline. Must be nonnegative, i.e. n >= 0 References ---------- .. [1] Bouma H., Vilanova A., Bescos J.O., ter Haar Romeny B.M., Gerritsen F.A. (2007) Fast and Accurate Gaussian Derivatives Based on B-Splines. In: Sgallari F., Murli A., Paragios N. (eds) Scale Space and Variational Methods in Computer Vision. SSVM 2007. Lecture Notes in Computer Science, vol 4485. Springer, Berlin, Heidelberg """ signsq = (n + 1) / 12.0 return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq)
def adaBoostTrainDS(dataArr, classLabels, numIt=40): ''' 基于单层决策树的AdaBoost训练过程 :param dataArr: 数据集 :param classLabels: 类标签 :param numIt: 迭代次数, 用户自定义指定 :return: weakClassArr, 弱分类器集合;aggClassEst,每个数据点的类别估计累计值 ''' # 初始化 weakClassArr = [] m = shape(dataArr)[0] D = mat(ones((m, 1)) / m) # 初始化概率分布向量,其元素之和为 1 aggClassEst = mat(zeros((m, 1))) for i in range(numIt): # 构建单层决策树 bestStump, error, classEst = buildStump(dataArr, classLabels, D) print "D:", D.T # alpha每个分类器配备的权重值, 计算公式:alpha = (1/2) * ln[(1-e) / e] alpha = float(0.5 * log((1.0 - error) / max(error, 1e-16))) bestStump['alpha'] = alpha weakClassArr.append(bestStump) # 存储最佳决策树 print "classEst: ", classEst.T # 更新权重向量D # 若正确分类,D[t + 1] = [D[t]*exp(-a) / sum(D)] # 若错误分类,D[t + 1] = [D[t]*exp(+a) / sum(D)] expon = multiply(-1 * alpha * mat(classLabels).T, classEst) D = multiply(D, exp(expon)) # Calc New D for next iteration D = D / D.sum() aggClassEst += alpha * classEst # 更新累计类别估计值 print "aggClassEst: ", aggClassEst.T aggErrors = multiply(sign(aggClassEst) != mat(classLabels).T, ones((m, 1))) errorRate = aggErrors.sum() / m # 计算错误率 print "total error: ", errorRate if errorRate == 0.0: break # 为0, 退出循环 return weakClassArr, aggClassEst
def _cumulative_probability(self, ages, errors, xmi, xma): bins = linspace(xmi, xma, N) probs = zeros(N) for ai, ei in zip(ages, errors): if abs(ai) < 1e-10 or abs(ei) < 1e-10: continue # calculate probability curve for ai+/-ei # p=1/(2*pi*sigma2) *exp (-(x-u)**2)/(2*sigma2) # see http://en.wikipedia.org/wiki/Normal_distribution ds = (ones(N) * ai - bins)**2 es = ones(N) * ei es2 = 2 * es * es gs = (es2 * pi)**-0.5 * exp(-ds / es2) # cumulate probabilities # numpy element_wise addition probs += gs return bins, probs
def _cumulative_probability(self, ages, errors, xmi, xma): bins = linspace(xmi, xma, N) probs = zeros(N) for ai, ei in zip(ages, errors): if abs(ai) < 1e-10 or abs(ei) < 1e-10: continue # calculate probability curve for ai+/-ei # p=1/(2*pi*sigma2) *exp (-(x-u)**2)/(2*sigma2) # see http://en.wikipedia.org/wiki/Normal_distribution ds = (ones(N) * ai - bins) ** 2 es = ones(N) * ei es2 = 2 * es * es gs = (es2 * pi) ** -0.5 * exp(-ds / es2) # cumulate probabilities # numpy element_wise addition probs += gs return bins, probs
def kernelTrans(X, A, kTup): ''' 高斯径向基核函数,将数据从一个低维特征空间转换到高维特征空间 K(x, y) = exp{[-(||x - y||)^2] / (2 * &^2} :param X: 数值型变量1 :param A: 数值型变量2 :param kTup: 元组 :return: ''' m, n = shape(X) K = mat(zeros((m, 1))) if kTup[0] == 'lin': K = X * A.T # elif kTup[0] == 'rbf': for j in range(m): deltaRow = X[j, :] - A K[j] = deltaRow * deltaRow.T K = exp(K / (-1 * kTup[1] ** 2)) # else: raise NameError('Houston We Have a Problem -- \ That Kernel is not recognized') return K
trainTlab = sin(2 * pi * trainX) trainT = trainTlab + trainNoise w = polyfit(trainX, trainT, 9) fig = ax[idx] fig.set_title('M=%d' % val) fig.plot(xlab, ylab) fig.plot(trainX, trainT, 'ro') fig.plot(xlab, polynomial(w, xlab), 'g') #Ridge regression n_alphas = 200 start = -40 end = -20 lnAlpha = arange(start, end, float(end - start) / n_alphas) alphas = exp(lnAlpha) #for i in lnAlpha]//arange(start, end, float(end - start) / n_alphas) #alphas = np.logspace(start, end, n_alphas, base = e) clf = linear_model.Ridge(fit_intercept=True) error = [] trainError = [] #myList = [([1. * i / N]) for i in range(N)] powers = range(0, 9) myList = array(x)[:, newaxis]**array(powers) for a in alphas: clf.set_params(alpha=a)
def transformX(self, x): z = [1] for kMean in self.kMeans: z.append(exp((-self.gamma) * (norm(subtract(x,kMean)))**2)) return z
def _i0_2(x): return exp(x) * _chbevl(32.0 / x - 2.0, _i0B) / sqrt(x)
def _i0_1(x): return exp(x) * _chbevl(x / 2.0 - 2, _i0A)
def _i0_2(x): return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def test_expm1(self): assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2) - 1) assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6) - 1)
def gauss_spline(x,n): """Gaussian approximation to B-spline basis function of order n. """ signsq = (n+1) / 12.0 return 1/sqrt(2*pi*signsq) * exp(-x**2 / 2 / signsq)
def crack_2d_scat_kernel( phi_in, phi_out_array, vel_L, vel_T, density, frequency, use_incident_L, use_incident_T, x_nodes, h_nodes, A_x, A_z, S_LL, S_LT, S_TL, S_TT, ): """ work on one incident angle in order to cache the results of two to four linear solve """ # Lamé coefficients, see http://subsurfwiki.org/wiki/Template:Elastic_modulus lame_lambda = density * (vel_L**2 - 2 * vel_T**2) lame_mu = density * vel_T**2 omega = 2 * pi * frequency xi1 = 2 * pi * frequency / vel_L xi2 = 2 * pi * frequency / vel_T lambda_L = vel_L / frequency lambda_T = vel_T / frequency xi = vel_T / vel_L k_L = xi1 # alias k_T = xi2 # alias a_L = -1j * k_L * pi / xi2**2 # incident L wave a_T = -1j * k_T * pi / xi2**2 # incident S wave # normal vector to the crack nv = np.array([0.0, 1.0], np.complex128) # force to complex to please numba sv = np.array([-sin(phi_in), -cos(phi_in)], np.complex128) # force to complex to please numba tv = np.array([sv[1], -sv[0]], np.complex128) if use_incident_L: b_L = exp(1j * k_L * x_nodes * sv[0]) * basis_function( -k_L * h_nodes * sv[0]) b_x = -2 * sv[0] * sv[1] * b_L b_z = -(1 / xi**2 - 2 * sv[0]**2) * b_L vxL = np.linalg.solve(A_x, b_x) vzL = np.linalg.solve(A_z, b_z) if use_incident_T: b_T = exp(1j * k_T * x_nodes * sv[0]) * basis_function( -k_T * h_nodes * sv[0]) b_x = -(tv[0] * sv[1] + tv[1] * sv[0]) * b_T b_z = -2 * tv[1] * sv[1] * b_T vxT = np.linalg.solve(A_x, b_x) vzT = np.linalg.solve(A_z, b_z) for j, phi_out in enumerate(phi_out_array): ev = np.array([sin(phi_out), cos(phi_out)], np.complex128) tv = np.array([ev[1], -ev[0]], np.complex128) c_L = basis_function(xi1 * h_nodes * ev[0]) * exp( -1j * xi1 * ev[0] * x_nodes) c_T = basis_function(xi2 * h_nodes * ev[0]) * exp( -1j * xi2 * ev[0] * x_nodes) if use_incident_L: v_L = np.array([a_L * np.dot(vxL, c_L), a_L * np.dot(vzL, c_L)]) v_T = np.array([a_L * np.dot(vxL, c_T), a_L * np.dot(vzL, c_T)]) S_LL[j] = ( 1 / 4 * sqrt(2 / pi) * exp(-1j * pi / 4) * xi1**(5 / 2) * (lame_lambda / (density * omega**2) * (np.dot(v_L, nv)) + 2 * lame_mu / (density * omega**2) * np.dot(v_L, ev) * np.dot(ev, nv)) / sqrt(lambda_L)) S_LT[j] = (1 / 4 * sqrt(2 / pi) * exp(-1j * pi / 4) * xi2**(5 / 2) * lame_mu / (density * omega**2) * (np.dot(v_T, tv) * np.dot(ev, nv) + np.dot(v_T, ev) * np.dot(tv, nv)) / sqrt(lambda_T)) if use_incident_T: v_L = np.array([a_T * np.dot(vxT, c_L), a_T * np.dot(vzT, c_L)]) v_T = np.array([a_T * np.dot(vxT, c_T), a_T * np.dot(vzT, c_T)]) # This is the same expression as for LL and LT but v_L and v_T are # different. # Add a minus sign compared to the original code because change of # polarisation. S_TL[j] = -( 1 / 4 * sqrt(2 / pi) * exp(-1j * pi / 4) * xi1**(5 / 2) * (lame_lambda / (density * omega**2) * (np.dot(v_L, nv)) + 2 * lame_mu / (density * omega**2) * np.dot(v_L, ev) * np.dot(ev, nv)) / sqrt(lambda_L)) S_TT[j] = -(1 / 4 * sqrt(2 / pi) * exp(-1j * pi / 4) * xi2**(5 / 2) * lame_mu / (density * omega**2) * (np.dot(v_T, tv) * np.dot(ev, nv) + np.dot(v_T, ev) * np.dot(tv, nv)) / sqrt(lambda_T)) return S_LL, S_LT, S_TL, S_TT
def _i0_1(x): return exp(x) * _chbevl(x/2.0-2, _i0A)
trainT = trainTlab + trainNoise w = polyfit(trainX, trainT, 9) fig = ax[idx] fig.set_title('M=%d' % val) fig.plot(xlab, ylab) fig.plot(trainX, trainT, 'ro') fig.plot(xlab, polynomial(w, xlab), 'g') #Ridge regression n_alphas = 200 start = -40 end = -20 lnAlpha = arange(start, end, float(end - start) / n_alphas) alphas = exp(lnAlpha) #for i in lnAlpha]//arange(start, end, float(end - start) / n_alphas) #alphas = np.logspace(start, end, n_alphas, base = e) clf = linear_model.Ridge(fit_intercept=True) error = [] trainError = [] #myList = [([1. * i / N]) for i in range(N)] powers = range(0, 9) myList = array(x)[:,newaxis] ** array(powers) for a in alphas: clf.set_params(alpha=a)
def psi2(z, r): res = complex(0.0) for i in range(1, self.maxn + 1): res += Ts[i - 1] * exp( I * kks[i - 1] * z) * self.phis[i - 1](r) return res
def run_sim(data, N0): """ Run the lensing simulation with the parameters provided in params.py. This consists of four steps: (1) Choose different values for rE. (2) Move the star across the sky and generate "observations". The first is unlensed. (3) Compute the projection and covariance matrices. (4) Computer the effective chi^2. data is a 3 dimensional array consisting of n 2D normalized surface brightness distributions. Using shaplets as the basis function, the data is reconstructed using test masses of a lensing star. The marginalized likelihood for each test mass is returned. """ N = N0 ** 2 # Total number of basis functions nepochs = data.shape[0] grid_size = data.shape[1] print "nepochs = %i grid_size=%i" % (nepochs, grid_size) assert nepochs > 0 assert grid_size > 0 # --------------------------------------------------------------------------- # # --------------------------------------------------------------------------- # The basis functions evaluated at the lensed positions L = empty((N, nepochs, grid_size, grid_size), numpy.float64) # Copied, flattened version of the above divided by sigma^2 Lt = empty((N, nepochs * grid_size * grid_size), numpy.float64) # Projection of data on the model P = mat(empty((N, 1), numpy.float64)) # Covariance matrix C_inv = mat(empty((N, N), numpy.float64)) # Output array probs = empty((num_samples, 2), numpy.float64) # --------------------------------------------------------------------------- # Basis function constants, and hermite polynomials. # Precompute the coefficients and setup the actual form of each hermite # function now, since they are constant over the run. # --------------------------------------------------------------------------- # vals = [[n, 1.0/sqrt((2**n) * sqrt(pi) * factorial(n,1) * beta), hermite(n), 0,0] vals = [[n, 1.0 / sqrt((2 ** n) * sqrt(pi) * factorial(n, 1)), hermite(n), 0, 0] for n in xrange(N0)] sqrt_data = sqrt(data) beta2 = beta ** 2 # --------------------------------------------------------------------------- # Now we start the simulation. # # (1) Choose different values for rE. # --------------------------------------------------------------------------- for i, rE in enumerate(linspace(rE_sample[0], rE_sample[1], num_samples)): # XXX: Just for some specific plots # if i not in [14]: continue # XXX: Just for some specific plots # ----------------------------------------------------------------------- # (2) Move the star across the sky # ----------------------------------------------------------------------- for t, z in star_track(nepochs): # ------------------------------------------------------------------- # (2a) Generate an "observation". The first is unlensed. # ------------------------------------------------------------------- if t == 0: print "%4i] rE=%f Epoch %i NO LENS" % (i, rE, t) xx = raytrace() mask = 1 else: print "%4i] rE=%f Epoch %i @ %f,%f" % (i, rE, t, z.real, z.imag) xx = raytrace(rE, z) mask = star_mask(z) # ------------------------------------------------------------------- # Basis function approximation # ------------------------------------------------------------------- expreal = exp(-xx.real ** 2 / (2 * beta2)) expimag = exp(-xx.imag ** 2 / (2 * beta2)) for n, K, H, _, _ in vals: vals[n][3] = K * H(xx.real / beta) * expreal vals[n][4] = K * H(xx.imag / beta) * expimag n = 0 for _, _, _, b1, _ in vals: for _, _, _, _, b2 in vals: L[n, t] = b1 * b2 / beta * mask n += 1 # ----------------------------------------------------------------------- # (3) Compute the projection and covariance matrices. # ----------------------------------------------------------------------- for n in xrange(N): sum(L[n], out=P[n]) Lt[n] = (L[n] / sqrt_data).flatten() print "Building C_inv" C_inv = mat(inner(Lt, Lt)) print "Done" C = C_inv.I # ----------------------------------------------------------------------- # (3a) Optionally calculate the basis function coefficients and plot # the reconstruction of the image. # ----------------------------------------------------------------------- if 0: plot_reconstructions(i, data, rE, L, C, P, N) # ----------------------------------------------------------------------- # (4) Computer the effective chi^2. Note that we do not subtract the # third term (effectively the gamma_tot) here; we leave that for the # plotting program to do. chi2 here will then be about equal to # gamma_tot. # ----------------------------------------------------------------------- log_det = sum(log(eig(C, right=False).real)) PCP = P.T * C * P chi2 = log_det + PCP probs[i] = rE, chi2 print "rE,chi2 =", rE, chi2 return probs
def sigmoid(theta=theta, a=a, b=b): bs = repeat(reshape(b, (len(b), 1)), numpeople, 1) # print b.shape, a.shape, theta.shape return 1.0 / (1.0 + exp(bs - dot(a, theta)))
def gauss_spline(x, n): """Gaussian approximation to B-spline basis function of order n. """ signsq = (n + 1) / 12.0 return 1 / sqrt(2 * pi * signsq) * exp(-x**2 / 2 / signsq)
def power_law_guess(self, y_s): log_x, log_y = log(range(1, len(y_s))), log(y_s) alpha, logy0, _, _, _ = linregress(log_x[1:], log_y[1:]) y0 = exp(logy0) return y0, alpha
def deVaucouleurs(R, Ie=1, m=4, Re=1.0): bm = 2 * m - 0.324 return Ie * exp(-bm * sqrt((abs(R) / Re) ** 2 + 0.056 ** 2) ** (1 / m))