Пример #1
0
def calcLambdaMat_2(jtj, Dmat):
    n = len(jtj)
    lambdaMat = (1. / scipy.sqrt(2.)) * scipy.sqrt(
        1. + (scipy.outer(scipy.diagonal(jtj), scipy.ones(n, 'd')) -
              scipy.outer(scipy.ones(n, 'd'), scipy.diagonal(jtj))) /
        scipy.sqrt(Dmat))
    return lambdaMat
Пример #2
0
def calcR2mat_Terms(jtj, lambdaMat):
    n = len(jtj)
    jaa = scipy.outer(scipy.diagonal(jtj), scipy.ones(n, 'd'))
    jbb = scipy.outer(scipy.ones(n, 'd'), scipy.diagonal(jtj))
    crosstermMat = 2. * lambdaMat * scipy.sqrt(1. - lambdaMat**2.) * jtj
    diagtermMat = (lambdaMat**2.) * jaa + (1. - lambdaMat**2.) * jbb
    return crosstermMat, diagtermMat
Пример #3
0
def calcR2mat_2(jtj, Dmat):
    n = len(jtj)
    jaa = scipy.outer(scipy.diagonal(jtj), scipy.ones(n, 'd'))
    jbb = scipy.outer(scipy.ones(n, 'd'), scipy.diagonal(jtj))
    r2mat = (1. / 2.) * (scipy.sqrt(Dmat) - 4. * (jtj**2) / scipy.sqrt(Dmat) +
                         (jaa + 4. * jtj * scipy.sqrt((jtj**2) / Dmat) + jbb))
    return r2mat
Пример #4
0
def calcCovarianceMat(origMat):
    covarMat = scipy.dot(scipy.transpose(origMat), origMat)
    n = len(covarMat)
    jaa = scipy.outer(scipy.diagonal(covarMat), scipy.ones(n, 'd'))
    jbb = scipy.outer(scipy.ones(n, 'd'), scipy.diagonal(covarMat))
    covarMat = covarMat / scipy.sqrt(jaa)
    covarMat = covarMat / scipy.sqrt(jbb)
    return covarMat
Пример #5
0
def calcR2mat_3(jtj, Dmat):
    #def calcR2mat_3(jtj,lambdaMat):
    n = len(jtj)
    jaa = scipy.outer(scipy.diagonal(jtj), scipy.ones(n, 'd'))
    jbb = scipy.outer(scipy.ones(n, 'd'), scipy.diagonal(jtj))
    r2mat = (1. / 2.) * (-scipy.sqrt(Dmat) + 4. * (jtj**2) / scipy.sqrt(Dmat) +
                         (jaa - 4. * jtj * scipy.sqrt((jtj**2) / Dmat) + jbb))
    #    r2mat = (lambdaMat**2.)*jaa + (1.-lambdaMat**2.)*jbb + 2.*lambdaMat*scipy.sqrt(1.-lambdaMat**2.)*jtj
    return r2mat
Пример #6
0
def calcR2mat_4(jtj, Dmat):
    n = len(jtj)
    jaa = scipy.outer(scipy.diagonal(jtj), scipy.ones(n, 'd'))
    jbb = scipy.outer(scipy.ones(n, 'd'), scipy.diagonal(jtj))
    r2mat = (1. / 2.) * (-scipy.sqrt(Dmat) +
                         old_div(4. * (jtj**2), scipy.sqrt(Dmat)) +
                         (jaa + 4. * jtj * scipy.sqrt(old_div(
                             (jtj**2), Dmat)) + jbb))
    return r2mat
Пример #7
0
def expectation_prop_inner(m0, V0, Y, Z, F, z, needed):
    #expectation propagation on multivariate gaussian for soft inequality constraint
    #m0,v0 are mean vector , covariance before EP
    #Y is inequality value, Z is sign, 1 for geq, -1 for leq, F is softness variance
    #z is number of ep rounds to run
    #returns mt, Vt the value and variance for observations created by ep
    m0 = sp.array(m0).flatten()
    V0 = sp.array(V0)
    n = V0.shape[0]
    print "expectation prpagation running on " + str(
        n) + " dimensions for " + str(z) + " loops:"
    mt = sp.zeros(n)
    Vt = sp.eye(n) * float(1e10)
    m = sp.empty(n)
    V = sp.empty([n, n])
    conv = sp.empty(z)
    for i in xrange(z):

        #compute the m V give ep obs
        m, V = gaussian_fusion(m0, mt, V0, Vt)
        mtprev = mt.copy()
        Vtprev = Vt.copy()
        for j in [k for k in xrange(n) if needed[k]]:
            print[i, j]
            #the cavity dist at index j
            tmp = 1. / (Vt[j, j] - V[j, j])
            v_ = (V[j, j] * Vt[j, j]) * tmp
            m_ = tmp * (m[j] * Vt[j, j] - mt[j] * V[j, j])
            alpha = sp.sign(Z[j]) * (m_ - Y[j]) / (sp.sqrt(v_ + F[j]))
            pr = PhiR(alpha)

            if sp.isnan(pr):

                pr = -alpha
            beta = pr * (pr + alpha) / (v_ + F[j])
            kappa = sp.sign(Z[j]) * (pr + alpha) / (sp.sqrt(v_ + F[j]))

            #print [alpha,beta,kappa,pr]
            mt[j] = m_ + 1. / kappa
            #mt[j] = min(abs(mt[j]),1e5)*sp.sign(mt[j])
            Vt[j, j] = min(1e10, 1. / beta - v_)
        #print sp.amax(mtprev-mt)
        #print sp.amax(sp.diagonal(Vtprev)-sp.diagonal(Vt))
        #TODO make this a ratio instead of absolute
        delta = max(sp.amax(mtprev - mt),
                    sp.amax(sp.diagonal(Vtprev) - sp.diagonal(Vt)))
        conv[i] = delta
    print "EP finished with final max deltas " + str(conv[-3:])
    V = V0.dot(spl.solve(V0 + Vt, Vt))
    m = V.dot((spl.solve(V0, m0) + spl.solve(Vt, mt)).T)
    return mt, Vt
Пример #8
0
def expectation_prop_inner(m0,V0,Y,Z,F,z,needed):
    #expectation propagation on multivariate gaussian for soft inequality constraint
    #m0,v0 are mean vector , covariance before EP
    #Y is inequality value, Z is sign, 1 for geq, -1 for leq, F is softness variance
    #z is number of ep rounds to run
    #returns mt, Vt the value and variance for observations created by ep
    m0=sp.array(m0).flatten()
    V0=sp.array(V0)
    n = V0.shape[0]
    print "expectation prpagation running on "+str(n)+" dimensions for "+str(z)+" loops:"
    mt =sp.zeros(n)
    Vt= sp.eye(n)*float(1e10)
    m = sp.empty(n)
    V = sp.empty([n,n])
    conv = sp.empty(z)
    for i in xrange(z):
        
        #compute the m V give ep obs
        m,V = gaussian_fusion(m0,mt,V0,Vt)
        mtprev=mt.copy()
        Vtprev=Vt.copy()
        for j in [k for k in xrange(n) if needed[k]]:
            print [i,j]
            #the cavity dist at index j
            tmp = 1./(Vt[j,j]-V[j,j])
            v_ = (V[j,j]*Vt[j,j])*tmp
            m_ = tmp*(m[j]*Vt[j, j]-mt[j]*V[j, j])
            alpha = sp.sign(Z[j])*(m_-Y[j]) / (sp.sqrt(v_+F[j]))
            pr = PhiR(alpha)
            
            
            if sp.isnan(pr):
                
                pr = -alpha
            beta = pr*(pr+alpha)/(v_+F[j])
            kappa = sp.sign(Z[j])*(pr+alpha) / (sp.sqrt(v_+F[j]))
            
            #print [alpha,beta,kappa,pr]
            mt[j] = m_+1./kappa
            #mt[j] = min(abs(mt[j]),1e5)*sp.sign(mt[j])
            Vt[j,j] = min(1e10,1./beta - v_)
        #print sp.amax(mtprev-mt)
        #print sp.amax(sp.diagonal(Vtprev)-sp.diagonal(Vt))
        #TODO make this a ratio instead of absolute
        delta = max(sp.amax(mtprev-mt),sp.amax(sp.diagonal(Vtprev)-sp.diagonal(Vt)))
        conv[i]=delta
    print "EP finished with final max deltas "+str(conv[-3:])
    V = V0.dot(spl.solve(V0+Vt,Vt))
    m = V.dot((spl.solve(V0,m0)+spl.solve(Vt,mt)).T)
    return mt, Vt
Пример #9
0
 def gradEval(self, x, data1):
     #gradient is calculated in a vectorized way
     '''by calling stack fn we will create a giant matrix of variables that 
     mirrors the data. Then, we will apply the gradient operation i.e, 2*(w-data).
     Next, we will call the grad_pooling to add the relevant components; here 
     stacking is done horizontally (784 components are stacked horizontall to be exact)'''
     self.dat_temp = data1
     m = float(sp.shape(x)[1])  #no of columns
     self.gd = sp.ones(m)
     self.x_temp = x
     map(self.stack, data1[1], ['gd' for i in range(len(data1[1]))])
     self.gd = sp.delete(
         self.gd, (0),
         axis=0)  #deleting the first row (of ones created above)
     self.grad_vec_l = 2 * (self.gd - data1[0])
     self.grad_vec = sp.ones((10, 1))
     iter_temp = sp.array([i for i in range(784)])
     map(self.grad_pooling, iter_temp)
     self.grad_vec = sp.delete(self.grad_vec, (0), axis=1)
     '''normalizing gradient vector if necessary
     len_vec is a array of length 10 or no of parameters'''
     len_vec = sp.sqrt(
         sp.diagonal(sp.dot(self.grad_vec, sp.transpose(self.grad_vec))))
     for i in range(len(len_vec)):
         if len_vec[i] > 1000:
             self.grad_vec[i, :] = m * self.grad_vec[i, :] / float(
                 len_vec[i])
     return self.grad_vec  #10X784 matrix
Пример #10
0
 def sgrad(self, x, ndata=100, bound=True, average=True):
     low_no = sp.random.randint(0, high=50000 - ndata)
     high_no = low_no + ndata
     sli = slice(low_no, high_no)
     data2 = []
     data2.append(self.data.train[0][sli])
     data2.append(self.data.train[1][sli])
     gradient = self.gradEval(x, tuple(data2))
     m = int(sp.shape(gradient)[1])
     '''u is the random direction matrix'''
     u = sp.random.randint(0, high=m, size=sp.shape(gradient))
     '''taking element wise product of u and gradient and then row wise sum to 
     get dot product of the matrices. dotprod is 10X1 matrix'''
     dotprod = (u * gradient).sum(axis=1, keepdims=True)
     stoc_grad = dotprod * u
     if bound == True:
         len_vec = sp.sqrt(
             sp.diagonal(sp.dot(stoc_grad, sp.transpose(stoc_grad))))
         #creating the list where len_vec is greater than desired value
         bool_list = list(map(lambda z: z > 10, len_vec))
         #converting boolean list to array of 1 0
         bool_array = sp.array(bool_list, dtype=int)
         #calculating factor to be divided with
         norm_factor = sp.divide(bool_array, float(m) * len_vec)
         norm_factor[norm_factor == 0] = 1  #replacing 0's with 1
         temp_norm = sp.reshape(norm_factor,
                                (len(norm_factor), 1)) * sp.ones(
                                    sp.shape(stoc_grad))
         stoc_grad = sp.divide(stoc_grad, temp_norm)
         '''alternatively we can use this
         for i in range(len(len_vec)): #this for loop is small as len_vec len is 10
             if len_vec[i] > 10:
                 stoc_grad[i,:] = stoc_grad[i,:]/(float(m)*float(len_vec[i]))'''
     return stoc_grad
Пример #11
0
def get_vals(Cl_crosses_in, Cl_inputs):

    ##- Trim off smallest and largest scales (gives 7 bins of ell=104)
    Cl_crosses = Cl_crosses_in[:, 40:768]

    ##- Rebin
    cross_rebin = []
    cross_weights = []

    for i, j in enumerate(Cl_crosses):
        ell, crosses, cross_wei = rebin(Cl_crosses[i])
        cross_rebin.append(crosses)
        cross_weights.append(cross_wei)

    cross_cl = np.asarray(cross_rebin)
    cross_we = np.asarray(cross_weights)
    cross_mean = (np.sum(cross_cl * cross_we, axis=0) /
                  np.sum(cross_we, axis=0))

    input_mean = (np.sum(Cl_inputs, axis=0) / 100)

    ##- Calculate covariance matrices, variance and errors
    QW = (cross_cl - cross_mean) * cross_we
    cross_cov = QW.T.dot(QW) / cross_we.T.dot(cross_we)
    cross_var = sp.diagonal(cross_cov)
    cross_stdev = np.sqrt(cross_var)
    #cross_stdev = np.sqrt(cross_var/100)

    ##- Calc chi2
    chi2 = np.sum((cross_mean - 0.)**2 / cross_stdev**2)

    return ell, cross_mean, cross_stdev, input_mean, chi2
Пример #12
0
def rand_unitary(n):
    x = (sp.randn(n, n) + 1j * sp.randn(n, n)) / np.sqrt(2.0)
    q, r = np.linalg.qr(x)
    d = sp.diagonal(r)
    ph = d / sp.absolute(d)
    q = np.multiply(q, ph, q)
    return q
Пример #13
0
def get_vals(cl_crosses_in):

    ##- Trim off smallest and largest scales (gives 7 bins of ell=104)
    #cl_crosses = cl_crosses_in[:,40:768]

    ##- Rebin
    cross_rebin = []
    cross_weights = []

    for i, j in enumerate(cl_crosses_in):
        ell, crosses, cross_wei = rebin(cl_crosses_in[i].ravel(),
                                        ellmin=48,
                                        ellmax=768,
                                        nell=7)
        cross_rebin.append(crosses)
        cross_weights.append(cross_wei)

    cross_cl = np.asarray(cross_rebin)
    cross_we = np.asarray(cross_weights)
    cross_mean = (np.sum(cross_cl * cross_we, axis=0) /
                  np.sum(cross_we, axis=0))

    ##- Calculate covariance matrices, variance and errors
    QW = (cross_cl - cross_mean) * cross_we
    cross_cov = QW.T.dot(QW) / cross_we.T.dot(cross_we)
    cross_var = sp.diagonal(cross_cov)
    cross_stdev = np.sqrt(cross_var)
    #cross_stdev = np.sqrt(cross_var/100)

    ##- Calc chi2
    chi2 = np.sum((cross_mean - 0.)**2 / cross_stdev**2)
    chi2_cov = cross_mean.dot(np.linalg.inv(cross_cov).dot(cross_mean))
    return ell, cross_mean, cross_stdev, chi2_cov
Пример #14
0
 def wedge(self, da, co):
     we = 1 / sp.diagonal(co)
     w = self.W.dot(we)
     Wwe = self.W * we
     mask = w > 0
     Wwe[mask, :] /= w[mask, None]
     d = Wwe.dot(da)
     return self.r, d, Wwe.dot(co).dot(Wwe.T)
Пример #15
0
def randomHaarUnitary(n):
    """A Haar distributed random n x n unitary matrix
    """
    z = (scipy.randn(n, n) + 1j * scipy.randn(n, n)) / scipy.sqrt(2.0)
    q, r = np.linalg.qr(z)
    d = scipy.diagonal(r)
    ph = d / scipy.absolute(d)
    q = scipy.multiply(q, ph, q)
    return q
Пример #16
0
def haar(d):
    """
    to generate random matrix for Haar measure
    see https://arxiv.org/pdf/math-ph/0609050.pdf
    """
    array = (randn(d, d) + 1j * randn(d, d)) / np.sqrt(2.0)
    ortho, upper = qr(array)
    diag = diagonal(upper)
    temp = diag / absolute(diag)
    result = multiply(ortho, temp, ortho)
    return result
Пример #17
0
    def atomsdb_get_features(self, at_db, return_features=False):
        """
        type(at_db) == quippy.io.AtomsList
        """
        IVs = []
        EIGs = []
        Y = []
        exps, r_cuts = self.iv_params[0], self.iv_params[1]

        # each iv is an independent information channel
        for feature, (r_cut, exp) in enumerate(zip(r_cuts, exps)):
            print("Evaluating database feature %d of %d..." % (feature+1, exps.size))
            ivs = sp.zeros((sp.asarray(at_db.n).sum(), 3))
            eigs = sp.zeros((sp.asarray(at_db.n).sum(), self.n_eigs))
            i = 0
            for atoms in at_db:
                atoms.set_cutoff(8.0)
                atoms.calc_connect()
                for at_idx in frange(atoms.n):
                    if feature == 0: Y.append(sp.array(atoms.force[at_idx])) # get target forces
                    ivs[i] = internal_vector(atoms, at_idx, exp, r_cut, do_calc_connect=False)
                    eigs[i] = coulomb_mat_eigvals(atoms, at_idx, r_cut, do_calc_connect=False, n_eigs=self.n_eigs)
                    i+=1
            IVs.append(ivs)
            EIGs.append(eigs)
        # rescale eigenvalues descriptor
        if self.normalise_scalar:
            eig_means = sp.array([e[e.nonzero()[0], e.nonzero()[1]].mean() for e in EIGs])
            eig_stds = sp.array([e[e.nonzero()[0], e.nonzero()[1]].std() for e in EIGs])
            eig_stds[eig_stds == 0.] = 1.
            EIGs = [(e - mean) / std for e, mean, std in zip(EIGs, eig_means, eig_stds)]
        # rescale internal vector to have average length = 1
        if self.normalise_ivs:
            # iv_stds = [e[e.nonzero()[0], e.nonzero()[1]].std() for e in IVs]
            # iv_stds[iv_stds == 0.] = 1.
            iv_means = [sp.array([LA.norm(vector) for vector in e]).mean() for e in IVs]
            IVs = [iv / mean for iv, mean in zip(IVs, iv_means)]

        # output cleanup: add machine epsilon if force is exactly zero
        Y = sp.asarray(Y)
        Y[sp.array(map(LA.norm, Y)) <= MACHINE_EPSILON] = 10 * MACHINE_EPSILON * sp.ones(3)

        # correlations wrt actual forces
        IV_corr = sp.array([sp.diagonal(spdist.cdist(Y, iv, metric='correlation')).mean() for iv in IVs])

	if return_features:
	    return IVs, EIGs, Y, IV_corr, iv_means, eig_means, eig_stds
	else:
	    self.ivs = IVs
	    self.eigs = EIGs
	    self.y = Y
	    self.iv_corr = IV_corr
        self.iv_means = iv_means
        self.eig_means, self.eig_stds = eig_means, eig_stds
Пример #18
0
def orthogonal(n):
    """Generate a random orthogonal 'd' dimensional matrix, using the
    the technique described in:
    Francesco Mezzadri, "How to generate random matrices from the
    classical compact groups"
    """
    n = int( n )
    z = sc.randn(n, n)
    q,r = sc.linalg.qr(z)
    d = sc.diagonal(r)
    ph = d/sc.absolute(d)
    q = sc.multiply(q, ph, q)
    return q
Пример #19
0
def orthogonal(n):
    """Generate a random orthogonal 'd' dimensional matrix, using the
    the technique described in:
    Francesco Mezzadri, "How to generate random matrices from the
    classical compact groups"
    """
    n = int(n)
    z = sc.randn(n, n)
    q, r = sc.linalg.qr(z)
    d = sc.diagonal(r)
    ph = d / sc.absolute(d)
    q = sc.multiply(q, ph, q)
    return q
Пример #20
0
def addmins(G,X,Y,S,D,xmin,mode=OFFHESSZERO, GRADNOISE=1e-9,EP_SOFTNESS=1e-9,EPROP_LOOPS=20):
    dim=X.shape[1]
    #grad elements are zero
    Xg = sp.vstack([xmin]*dim)
    Yg = sp.zeros([dim,1])
    Sg = sp.ones([dim,1])*GRADNOISE
    Dg = [[i] for i in range(dim)]
    
    #offdiag hessian elements
    nh = ((dim-1)*dim)/2
    Xh = sp.vstack([sp.empty([0,dim])]+[xmin]*nh)
    Dh=[]
    for i in xrange(dim):
        for j in xrange(i):
            Dh.append([i,j])
    class MJMError(Exception):
        pass
    if mode==OFFHESSZERO:
        Yh = sp.zeros([nh,1])
        Sh = sp.ones([nh,1])*GRADNOISE
    elif mode==OFFHESSINFER:
        raise MJMError("addmins with mode offhessinfer not implemented yet")
    else:
        raise MJMError("invalid mode in addmins")
        
    #diag hessian and min
    Xd = sp.vstack([xmin]*(dim+1))
    Dd = [[sp.NaN]]+[[i,i] for i in xrange(dim)]
    [m,V] = G.infer_full_post(Xd,Dd)
    for i in xrange(dim):
        if V[i,i]<0:
            class MJMError(Exception):
                pass
            print [m,V]
            raise MJMError('negative on diagonal')
        
    yminarg = sp.argmin(Y)
    Y_ = sp.array([Y[yminarg,0]]+[0.]*dim)
    Z = sp.array([-1]+[1.]*dim)
    F = sp.array([S[yminarg,0]]+[EP_SOFTNESS]*dim)
    [Yd,Stmp] = eprop.expectation_prop(m,V,Y_,Z,F,EPROP_LOOPS)
    Sd = sp.diagonal(Stmp).flatten()
    Sd.resize([dim+1,1])
    Yd.resize([dim+1,1])
    #concat the obs
    Xo = sp.vstack([X,Xg,Xd,Xh])
    Yo = sp.vstack([Y,Yg,Yd,Yh])
    So = sp.vstack([S,Sg,Sd,Sh])
    Do = D+Dg+Dd+Dh
    
    return [Xo,Yo,So,Do]
Пример #21
0
def h_diag_target_fun(X):
    results = []
    X = sp.asarray(X)
    thX = the.dvector('thX')
    y = - 0.8 * twoD_gauss(thX[0], thX[1], sp.array([-1,-1]), sp.array([3,2]), sp.pi/4
      ) + 1.2 * twoD_gauss(thX[0], thX[1], sp.array([3,0]), sp.array([1,1]), 0
      ) + 2.0 * twoD_gauss(thX[0], thX[1], sp.array([-2,2]), sp.array([1,4]), 0)
    grady = the.grad(y, thX)
    H, updates = theano.scan(lambda i, grady, thX : the.grad(grady[i], thX), 
                             sequences=the.arange(grady.shape[0]), 
                             non_sequences=[grady, thX])    
    hfun =  theano.function([thX], H, updates=updates, allow_input_downcast=True)
    if len(sp.shape(X)) == 0:
        thX = the.scalar('thX')
        return sp.diagonal(hfun(X))
    elif len(X.shape) == 1:
        return sp.diagonal(hfun(X))
    elif len(X.shape) == 2:
        results = []
        for x in X:
            results.append(sp.diagonal(hfun(x)))
        return sp.array(results)
    else:
        print("Bad Input")
Пример #22
0
def aboveDiagFlat(mat, keepDiag=False, offDiagMult=None):
    """
    Return a flattened list of all elements of the 
    matrix above the diagonal.
    
    Use offDiagMult = 2 for symmetric J matrix.
    """
    m = copy.copy(mat)
    if offDiagMult is not None:
        m *= offDiagMult * (1. - scipy.tri(len(m))) + scipy.diag(
            scipy.ones(len(m)))
    if keepDiag: begin = 0
    else: begin = 1
    return scipy.concatenate([ scipy.diagonal(m,i)                          \
                              for i in range(begin,len(m)) ])
Пример #23
0
def haar_measure(n):
    """A Random matrix distributed with the Haar measure.

    For more details, see :cite:`mezzadri2006`.

    Args:
        n (int): matrix size
    Returns:
        array: an nxn random matrix
    """
    z = (sp.randn(n, n) + 1j * sp.randn(n, n)) / np.sqrt(2.0)
    q, r = qr(z)
    d = sp.diagonal(r)
    ph = d / np.abs(d)
    q = np.multiply(q, ph, q)
    return q
Пример #24
0
def random_interferometer(N):
    r"""Random unitary matrix representing an interferometer.

    For more details, see :cite:`mezzadri2006`.

    Args:
        N (int): number of modes

    Returns:
        array: random :math:`N\times N` unitary distributed with the Haar measure
    """
    z = randnc(N, N) / np.sqrt(2.0)
    q, r = sp.linalg.qr(z)
    d = sp.diagonal(r)
    ph = d / np.abs(d)
    U = np.multiply(q, ph, q)
    return U
Пример #25
0
    def estimate(self):
        self.nobs = self.Y.shape[0]
        self.ncoef = self.X.shape[1]

        self.Q_inv = linalg.inv(self.X.T.dot(self.X))
        self.A = dot(self.Q_inv, self.X.T)

        try:
            self.N = dot(self.X, self.A)
        except:
            self.N = dot(self.X.values, self.A)

        self.M = np.eye(self.nobs) - self.N
        XY = dot(self.X.T, self.Y)
        self.b = dot(self.Q_inv, XY)

        self.df_e = self.nobs - self.ncoef
        self.df_r = self.ncoef - 1

        self.e = self.Y - dot(self.X, self.b)
        self.sse = dot(self.e, self.e) / self.df_e  # SSE
        self.se = np.sqrt(diagonal(self.sse * self.Q_inv))  # Non robust SE

        self.t = self.b / self.se
        self.p_value = (1 - stats.t.cdf(abs(self.t), self.df_e)) * 2

        self.cov_params = self.sse * self.Q_inv
        self.white_cov = dot(
            dot(
                dot(dot(self.Q_inv, self.X.T),
                    self.e.values**2 * np.eye(self.nobs)), self.X), self.Q_inv)

        self.t_r = self.b / self.white_cov.diagonal()
        self.p_value_r = (1 - stats.t.cdf(abs(self.t_r), self.df_e)) * 2

        # TODO: make this take optional arg for one-sided vs. default of 2
        self.R2 = 1 - self.e.var() / self.Y.var()
        self.R2adj = 1 - (1 - self.R2) * ((self.nobs - 1) /
                                          (self.nobs - self.ncoef))
        self.F = (self.R2 / self.df_r) / ((1 - self.R2) / self.df_e)
        self.F_p_value = 1 - stats.f.cdf(self.F, self.df_r, self.df_e)
        self.s_sq = self.e.T.dot(self.e) / (self.nobs - self.ncoef
                                            )  # Greene p161
Пример #26
0
def random_interferometer(N):
    r"""Returns a random unitary matrix representing an interferometer.

    For more details, see :cite:`mezzadri2006`.

    Args:
        N (int): number of modes
        passive (bool): if True, returns a passive Gaussian transformation (i.e.,
            one that preserves photon number). If False (default), returns an active
            transformation.
    Returns:
        array: random :math:`N\times N` unitary distributed with the Haar measure
    """
    z = randnc(N, N)/np.sqrt(2.0)
    q, r = sp.linalg.qr(z)
    d = sp.diagonal(r)
    ph = d/np.abs(d)
    U = np.multiply(q, ph, q)
    return U
Пример #27
0
def smooth_cov(da, we, rp, rt, drt=4, drp=4, co=None):

    if co is None:
        co = cov(da, we)

    nda = co.shape[1]
    var = sp.diagonal(co)
    if sp.any(var == 0.):
        print('WARNING: data has some empty bins, impossible to smooth')
        print('WARNING: returning the unsmoothed covariance')
        return co

    cor = co / sp.sqrt(var * var[:, None])

    cor_smooth = sp.zeros([nda, nda])

    dcor = {}
    dncor = {}

    for i in range(nda):
        print("\rsmoothing {}".format(i), end="")
        for j in range(i + 1, nda):
            idrp = round(abs(rp[j] - rp[i]) / drp)
            idrt = round(abs(rt[i] - rt[j]) / drt)
            if not (idrp, idrt) in dcor:
                dcor[(idrp, idrt)] = 0.
                dncor[(idrp, idrt)] = 0

            dcor[(idrp, idrt)] += cor[i, j]
            dncor[(idrp, idrt)] += 1

    for i in range(nda):
        cor_smooth[i, i] = 1.
        for j in range(i + 1, nda):
            idrp = round(abs(rp[j] - rp[i]) / drp)
            idrt = round(abs(rt[i] - rt[j]) / drt)
            cor_smooth[i, j] = dcor[(idrp, idrt)] / dncor[(idrp, idrt)]
            cor_smooth[j, i] = cor_smooth[i, j]

    print("\n")
    co_smooth = cor_smooth * sp.sqrt(var * var[:, None])
    return co_smooth
Пример #28
0
def mask(mat,items):
    """Returns a matrix with the given items retained (and 0's elsewhere).
    items can be:
    - None or 'all': all items (in this case, mat is returned, not a copy)
    - 'diagonal' or 'diag': only diagonal items
    - 'scalar': only a constant multiple times the identity
    - a list of pairs: a list of retained indices
    """
    if items==None or items=='all':
        return mat
    elif items=='diagonal' or items=='diag':
        res = sp.zeros(mat.shape)
        for i in xrange(min(mat.shape[0],mat.shape[1])):
            res[i,i] = mat[i,i].real
        return res
    elif items=='scalar':
        s = sum(v.real for v in sp.diagonal(mat))
        res = sp.eye(mat.shape)*s
        return res
    else:
        res = sp.zeros(mat.shape)
        for (i,j) in items:
            res[i,j] = mat[i,j].real
        return res
Пример #29
0
 def setCovariance(self,cov):
     """ set hyperparameters from given covariance """
     self.setParams(sp.log(sp.diagonal(cov)))
Пример #30
0
 def setCovariance(self, cov):
     """ set hyperparameters from given covariance """
     self.setParams(sp.log(sp.diagonal(cov)))
Пример #31
0
# Make predictions:
# Rstar = scipy.linspace(0.63, 0.93, 24*30)
fits_file = scipy.io.readsav(
    '/home/markchil/codes/gptools/demo/nth_samples_1101014006.save')
# Average over the inexplicably shifting Rmajor array:
Rstar = scipy.mean(fits_file.te_fit.rmajor[0][:, 32:72], axis=1)
# Rstar = fits_file.ne_fit.rmajor[0][:, 0]
Te_nth = fits_file.te_fit.te_comb_fit[0][:, 32:72]
mean_nth, std_nth = gptools.compute_stats(Te_nth, robust=robust)

mean_start = time.time()
# import pdb; pdb.set_trace()
mean, cov = gp.predict(Rstar, noise=False)
mean_elapsed = time.time() - mean_start
mean = scipy.asarray(mean).flatten()
std = scipy.sqrt(scipy.diagonal(cov))

meand_start = time.time()
meand, covd = gp.predict(Rstar, noise=False, n=1)
meand_elapsed = time.time() - meand_start
meand = scipy.asarray(meand).flatten()
stdd = scipy.sqrt(scipy.diagonal(covd))

print("Optimization took: %.2fs\nMean prediction took: %.2fs\nGradient "
      "prediction took: %.2fs\n" % (opt_elapsed, mean_elapsed, meand_elapsed))

# meand_approx = scipy.gradient(mean, Rstar[1] - Rstar[0])

f = plt.figure()

# f.suptitle('Univariate GPR on TS data')
 def computeLogPji(self, transmatrix, j, i):
     """
     This code calculates the logarithm of P_i[tj<ti] (Pji for short)
     for a specific i and j, according to Brian's paper on Dropbox (as of 
     writing this comment, it is on page 5).  
     
     To do this, it first calculates the matrix (I-F_{j}^{i}) which we
     denote here as A, as well as the column vector F_{notj,j}.  The vector p_m 
     is the solution to the equation A*p_m=F_{notj,j}), and the i'th element of 
     this p_m is P_i[tj<ti].  From a linear algebra standpoint, the equation is 
     solved using Cramer's rule, combined with some log math.  
     
     ######Algorithm notes:######
     This routine uses logarithms to calculate Pji, in case of an 
     over/under/whateverflow error.  This is generally safer for large 
     senvitivities.
     
     There is no good reason to use Cramer's rule here, apart from the fact that
     Erik doesn't trust python's Linear Algebra solver.  This will hopefully be 
     replaced by a better method, once Erik gets off of his lazy ass and 
     rewrites all this so that it handles arbitrary precision.
     -- written by Erik, incorporated by Jeremy (date 5/16/2013)
     """
     
     numcolumns = len(transmatrix[0])
     
     # We form F_{j}^{i}, by taking the transition matrix, setting the i'th row 
     # equal to zero, and then taking the j'th principal minor.
     
     Fsupi = sp.copy(transmatrix)
     Fsupi[:,i] = 0
     Fnotjj = sp.delete(transmatrix[:,j],j)
     Fsupisubj = sp.delete(sp.delete(Fsupi,j,0),j,1)
     newi = i
     
     # Since we have deleted a column and row from our system, all the indices 
     # above j have been shifted by one.  Therefore, if i>j, we need change i 
     # correspondingly.
     if i > j:
         newi -= 1
         
     # We define the matrix A=(I-F_{j}^{i})
     Amatrix = sp.eye(numcolumns-1)-Fsupisubj
     
     ### We start calculating p_ji using Cramer's Rule. 
     # We calculate the top matrix in Cramer's Rule
     Aswapped = sp.copy(Amatrix)
     Aswapped[:,newi] = Fnotjj[:]
     
     # To take the determinants of the two matrices, we use LU decomposition.
     Pdenom, Ldenom, Udenom = sp.linalg.lu(Amatrix)
     Pnumer,Lnumer,Unumer = sp.linalg.lu(Aswapped)
     # The determinant is just the product of the elements on the diagonal.
     # Since Pji is guaranteed positive, we can also just take the absolute 
     # value of all the diagonal elements, and sum their logarithms.
     numerdiag = abs(sp.diagonal(Unumer))
     denomdiag = abs(sp.diagonal(Udenom))
     lognumdiag = sp.log(numerdiag)
     logdenomdiag = sp.log(denomdiag)
     logpji = sp.sum(lognumdiag) - sp.sum(logdenomdiag)
     
     # Note that this is returns in ln (log base e), not log base 10.
     return logpji
Пример #33
0
def spectro_perf(fl,iv,re,tol=1e-3,log=None,ndiag_max=27):
    t0 = time.time()
    ## compute R and F
    R = sp.sqrt(iv)*re
    F = R.dot(sp.sqrt(iv)*fl)
    R = R.dot(R.T)

    ## diagonalize
    d,s = linalg.eigh(R)

    ## invert
    d_inv = d*0
    w = d>d.max()*1e-10
    #w=d>1e-20
    d[~w]=0
    d_inv[w]=1/d[w]
    IR = s.dot(d_inv[:,None]*s.T)
    F = IR.dot(F)
    Q = s.dot(sp.sqrt(d[:,None])*s.T)

    norm = Q.sum(axis=1)
    w=norm>0
    Q[w,:] = Q[w,:]/norm[w,None]
    flux = Q.dot(F)
    ivar = norm**2

    ## ndiag is such that the sum of the diagonals > 1-tol
    ## and at most ndiag_max

    ndiag=1
    for i in range(Q.shape[0]):
        imin = i-ndiag/2
        if imin<0:imin=0
        imax = i+ndiag/2
        if imax>=Q.shape[1]:
            imax = Q.shape[1]

        frac=Q[i,imin:imax].sum()
        while frac<1-tol:
            ndiag+=2
            imin = i-ndiag/2
            if imin<0:imin=0
            imax = i+ndiag/2
            if imax>=Q.shape[1]:imax = Q.shape[1]
            frac = Q[i,imin:imax].sum()

    if ndiag>ndiag_max:
        log.write("WARNING, reducing ndiag {} to {}".format(ndiag,ndiag_max))
        ndiag=ndiag_max
    nbins = Q.shape[1]
    reso = sp.zeros([ndiag,nbins])
    for i in range(ndiag):
        offset = ndiag/2-i
        d = sp.diagonal(Q,offset=offset)
        if offset<0:
            reso[i,:len(d)] = d
        else:
            reso[i,nbins-len(d):nbins]=d

    t = time.time()
    sys.stdout.write("spectro perfected in: {} \n".format(t-t0))
    if log is not None:
        log.write("\n final ndiag: {}\n".format(ndiag))
        log.write("spectro perfected in: {} \n".format(t-t0))
        log.flush()
    return flux,ivar,reso
Пример #34
0
# Make predictions:
# Rstar = scipy.linspace(0.63, 0.93, 24*30)
fits_file = scipy.io.readsav("/home/markchil/codes/gptools/demo/nth_samples_1101014006.save")
# Average over the inexplicably shifting Rmajor array:
Rstar = scipy.mean(fits_file.te_fit.rmajor[0][:, 32:72], axis=1)
# Rstar = fits_file.ne_fit.rmajor[0][:, 0]
Te_nth = fits_file.te_fit.te_comb_fit[0][:, 32:72]
mean_nth, std_nth = gptools.compute_stats(Te_nth, robust=robust)

mean_start = time.time()
# import pdb; pdb.set_trace()
mean, cov = gp.predict(Rstar, noise=False)
mean_elapsed = time.time() - mean_start
mean = scipy.asarray(mean).flatten()
std = scipy.sqrt(scipy.diagonal(cov))

meand_start = time.time()
meand, covd = gp.predict(Rstar, noise=False, n=1)
meand_elapsed = time.time() - meand_start
meand = scipy.asarray(meand).flatten()
stdd = scipy.sqrt(scipy.diagonal(covd))

print(
    "Optimization took: %.2fs\nMean prediction took: %.2fs\nGradient "
    "prediction took: %.2fs\n" % (opt_elapsed, mean_elapsed, meand_elapsed)
)

# meand_approx = scipy.gradient(mean, Rstar[1] - Rstar[0])

f = plt.figure()
Пример #35
0
    def predict(self, X, eval_MSE=False, return_k=False):
        """
        This function evaluates the Gaussian Process model at x.

        Parameters

        X : array_like
            An array with shape (n_eval, n_features) giving the point(s) at
            which the prediction(s) should be made.

        eval_MSE : boolean, optional
            A boolean specifying whether the Mean Squared Error should be
            evaluated or not.
            Default assumes evalMSE = False and evaluates only the BLUP (mean
            prediction).

        return_k : boolean, optional
            A boolean specifying whether the function should return the kernel
            vector (kernel of distances between test configurations and database ones).
            Default is False.

        Returns
        -------
        y : array_like, shape (n_samples, ) or (n_samples, n_targets)
            An array with shape (n_eval, ) if the Gaussian Process was trained
            on an array of shape (n_samples, ) or an array with shape
            (n_eval, n_targets) if the Gaussian Process was trained on an array
            of shape (n_samples, n_targets) with the Best Linear Unbiased
            Prediction at x.

        MSE : array_like, optional (if eval_MSE == True)
            An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
            with the Mean Squared Error at x.

        k : array_like, optional (if return_k == True)
            An array with shape (n_eval, ) or (n_eval, n_targets) as with y
            
        """
        
        # Check input shapes
        X = array2d(X)
        n_eval, _ = X.shape
        n_samples, n_features = self.X.shape
        n_samples_y, n_targets = self.y.shape
        
        if X.shape[1] != n_features:
            raise ValueError(("The number of features in X (X.shape[1] = %d) "
                              "should match the number of features used "
                              "for fit() "
                              "which is %d.") % (X.shape[1], n_features))

        X = (X - self.X_mean) / self.X_std

        # Initialize output
        y = sp.zeros(n_eval)
        if eval_MSE:
            MSE = sp.zeros(n_eval)

        # Get distances between each new point in X and all input training set
        # dx = sp.asarray([[ LA.norm(p-q) for q in self.X] for p in X]) # SLOW!!!

        if self.metric == 'euclidean':
            dx = (((self.X - X[:,None])**2).sum(axis=2))**0.5
        elif self.metric == 'cityblock':
            dx = (sp.absolute(self.X - X[:,None])).sum(axis=2)
        else:
            print "ERROR: metric not understood"

        if self.normalise == -1:
            natoms_db = (self.X != 0.).sum(1)
            natoms_t = (X != 0.).sum(1)
            dx = dx / sp.sqrt(natoms_db * natoms_t[:, None])

        # Evaluate correlation
        k = kernel(dx, self.theta0, self.corr)

        # UNNECESSARY: feature relevance
        if self.do_features_projection:
            self.feat_proj = self.alpha.flatten() * k
            y_scaled = self.feat_proj.sum(axis=1)
        else:
            # Scaled predictor
            y_scaled = sp.dot(k, self.alpha)
        # Predictor
        y = (self.y_mean + self.y_std * y_scaled).reshape(n_eval, n_targets)
        if self.y_ndim_ == 1:
            y = y.ravel()

        # Calculate mean square error of each prediction
        if eval_MSE:
            MSE = sp.dot(sp.dot(k, self.inverse), k.T)
            if k.ndim > 1: MSE = sp.diagonal(MSE)
            MSE = kernel(0.0, self.theta0, self.corr) + self.nugget - MSE
            # Mean Squared Error might be slightly negative depending on
            # machine precision: force to zero!
            MSE[MSE < MACHINE_EPSILON] = 0.
            if self.y_ndim_ == 1:
                MSE = MSE.ravel()
                if return_k:
                    return y, MSE, k
                else:
                    return y, MSE
        elif return_k:
            return y, k
        else:
            return y
Пример #36
0
def smooth_cov_wick(infile, Wick_infile, outfile):
    """
    Model the missing correlation in the Wick computation
    with an exponential

    Args:
        infile (str): path to the correlation function
            (produced by picca_cf, picca_xcf)
        Wick_infile (str): path to the Wick correlation function
            (produced by picca_wick, picca_xwick)
        outfile (str): poutput path

    Returns:
        None
    """

    h = fitsio.FITS(infile)
    da = sp.array(h[2]['DA'][:])
    we = sp.array(h[2]['WE'][:])
    head = h[1].read_header()
    nt = head['NT']
    np = head['NP']
    h.close()

    co = cov(da, we)

    nbin = da.shape[1]
    var = sp.diagonal(co)
    if sp.any(var == 0.):
        print('WARNING: data has some empty bins, impossible to smooth')
        print('WARNING: returning the unsmoothed covariance')
        return co

    cor = co / sp.sqrt(var * var[:, None])
    cor1d = cor.reshape(nbin * nbin)

    h = fitsio.FITS(Wick_infile)
    cow = sp.array(h[1]['CO'][:])
    h.close()

    varw = sp.diagonal(cow)
    if sp.any(varw == 0.):
        print('WARNING: Wick covariance has bins with var = 0')
        print('WARNING: returning the unsmoothed covariance')
        return co

    corw = cow / sp.sqrt(varw * varw[:, None])
    corw1d = corw.reshape(nbin * nbin)

    Dcor1d = cor1d - corw1d

    #### indices
    ind = sp.arange(nbin)
    rtindex = ind % nt
    rpindex = ind // nt
    idrt2d = abs(rtindex - rtindex[:, None])
    idrp2d = abs(rpindex - rpindex[:, None])
    idrt1d = idrt2d.reshape(nbin * nbin)
    idrp1d = idrp2d.reshape(nbin * nbin)

    #### reduced covariance  (50*50)
    Dcor_red1d = sp.zeros(nbin)
    for idr in range(0, nbin):
        print("\rsmoothing {}".format(idr), end="")
        Dcor_red1d[idr] = sp.mean(Dcor1d[(idrp1d == rpindex[idr])
                                         & (idrt1d == rtindex[idr])])
    Dcor_red = Dcor_red1d.reshape(np, nt)
    print("")

    #### fit for L and A at each drp
    def corrfun(idrp, idrt, L, A):
        r = sp.sqrt(float(idrt)**2 + float(idrp)**2) - float(idrp)
        return A * sp.exp(-r / L)

    def chisq(L, A, idrp):
        chi2 = 0.
        idrp = int(idrp)
        for idrt in range(1, nt):
            chi = Dcor_red[idrp, idrt] - corrfun(idrp, idrt, L, A)
            chi2 += chi**2
        chi2 = chi2 * np * nbin
        return chi2

    Lfit = sp.zeros(np)
    Afit = sp.zeros(np)
    for idrp in range(np):
        m = iminuit.Minuit(chisq,
                           L=5.,
                           error_L=0.2,
                           limit_L=(1., 400.),
                           A=1.,
                           error_A=0.2,
                           idrp=idrp,
                           fix_idrp=True,
                           print_level=1,
                           errordef=1.)
        m.migrad()
        Lfit[idrp] = m.values['L']
        Afit[idrp] = m.values['A']

    #### hybrid covariance from wick + fit
    co_smooth = sp.sqrt(var * var[:, None])

    cor0 = Dcor_red1d[rtindex == 0]
    for i in range(nbin):
        print("\rupdating {}".format(i), end="")
        for j in range(i + 1, nbin):
            idrp = idrp2d[i, j]
            idrt = idrt2d[i, j]
            newcov = corw[i, j]
            if (idrt == 0):
                newcov += cor0[idrp]
            else:
                newcov += corrfun(idrp, idrt, Lfit[idrp], Afit[idrp])
            co_smooth[i, j] *= newcov
            co_smooth[j, i] *= newcov

    print("\n")

    h = fitsio.FITS(outfile, 'rw', clobber=True)
    h.write([co_smooth], names=['CO'], extname='COR')
    h.close()
    print(outfile, ' written')

    return
Пример #37
0
    ### Append the data
    da = sp.append(data[0]['DA'], data[1]['DA'], axis=1)
    we = sp.append(data[0]['WE'], data[1]['WE'], axis=1)

    ### Compute the covariance
    co = cov(da, we)

    ### Get the cross-covariance
    size1 = data[0]['DA'].shape[1]
    cross_co = co.copy()
    cross_co = cross_co[:, size1:]
    cross_co = cross_co[:size1, :]

    ### Get the cross-correlation
    var = sp.diagonal(co)
    cor = co / sp.sqrt(var * var[:, None])
    cross_cor = cor.copy()
    cross_cor = cross_cor[:, size1:]
    cross_cor = cross_cor[:size1, :]

    ### Test if valid
    try:
        scipy.linalg.cholesky(co)
    except scipy.linalg.LinAlgError:
        print('WARNING: Matrix is not positive definite')

    ### Save
    h = fitsio.FITS(args.out, 'rw', clobber=True)
    h.write([cross_co, cross_cor],
            names=['CO', 'COR'],