def board_status(b):
        '''
            Checks for a winner of the game.
        '''
        board = np.array(b)
        for r in range(3):
            row = board[r]
            column = board[:,r]
            if np.all(row == user.X):
                return user.X
            if np.all(row == user.O):
                return user.O
            if np.all(column == user.X):
                return user.X
            if np.all(column == user.O):
                return user.O

        diagonal = np.diagonal(board)

        if np.all(diagonal == user.O):
            return user.O

        if np.all(diagonal == user.X):
            return user.X

        board_flip =  np.fliplr(board)
        reverse_diagonal = np.diagonal(board_flip)

        if np.all(reverse_diagonal == user.O):
            return user.O

        if np.all(reverse_diagonal == user.X):
            return user.X

        return user.available
    def calculate_energy(self):
        """
        The full energy function involves calculation of all pairwise
        energy interactions. We sum across rows down columns separately
        """
        energy = 0.
        for row in self.system:
            energy += -1 * np.sum(
                [self.coupling_constant * i * j for i,j in zip(row[:-1], row[1:])]
                )
            energy += -1 * np.sum(self.external_field * row)
            if self.next_nearest:
                energy += -1 * np.sum(			
                     [self.coupling_constant * i * j for i,j in zip(row[:-2], row[2:])]
                     )
        for col in np.rollaxis(self.system, -1):
            energy += -1 * np.sum(
                [self.coupling_constant * i * j for i,j in zip(col[:-1], col[1:])]
                )
            if self.next_nearest:
                 energy += -1 * np.sum(			
                     [self.coupling_constant * i * j for i,j in zip(row[:-2], row[2:])]
                     )
        if self.next_nearest:        #get diagnal elements 
             diags=[]
             for i in range(-len(self.system),len(self.system)):
                diags.append(np.diagonal(self.system,i,0,1))
                diags.append(np.diagonal(self.system,i,1,0))
             for diag in diags:  
                 energy += -1 * np.sum(
                    [self.coupling_constant * i * j for i,j in zip(diag[:-1], diag[1:])]
                    )

        return energy
Пример #3
0
def kl_Divergence(mean1, cov1, mean2, cov2):
	N = mean1.size
	#check length of the mean vectors
	if N != mean2.size:
		raise Exception("Mean sizes do not match!")
	#check that cov matrices have the same length as the mean
	if cov1.shape[0] != N or cov1.shape[1] != N:
		raise Exception("cov1 sizes do not equal mean length!")
	if cov2.shape[0] != N or cov2.shape[1] != N:
		raise Exception("cov2 sizes do not equal mean length!")

	#return Cholesky decompositions for covariance matrices
	chol1 = np.linalg.cholesky(cov1)
	chol2 = np.linalg.cholesky(cov2)
	#begin distance calculation
	ld1 = 2 * np.sum(np.log10(np.diagonal(chol1)), axis=0)
	ld2 = 2 * np.sum(np.log10(np.diagonal(chol2)), axis=0)

	#calculate det
	ldet = ld2 - ld1
	#inverse from Cholesky decomposition
	S1i = np.dot((np.linalg.inv(np.transpose(chol1))), np.linalg.inv(chol1))
	tr = np.sum(np.diagonal(np.dot(S1i, cov2)), axis=0)
	m2mm1 = np.subtract(mean2, mean1)
	
	#asNumeric equivalent in python...
	qf = np.dot(np.transpose(m2mm1), np.dot(S1i, m2mm1))
	r = 0.5 * (ldet + tr + qf - N)
	return r
Пример #4
0
 def __init__( 
         self, 
         data3d=None, # 3d-array,
         cell=None, # 3*3 array, vectors of cell 
         grid_measure=None, # 3*3 array, vectors of grid increament
         delta_grid=None, # 1*3 array, (dx, dy, dz)
         cell_origin=None, # 1*3 array, (x0, y0, z0)
         inptype=None, # raw input, or file type 'cube'...
         inpfname=None, # 
         ):
     '''
     Dealing with types of input:
     '''
     if inptype=='cube' and inpfname:
         self.cell_origin, self.grid_measure, self.data3d = read_cube(inpfname)
     else:
         sys.exit('Crewp:ScalarField: Input for ScalarField, not implemented.')
     '''
     Case: get ``delta_grid`` from ``grid_measure`` matrix.
     '''
     if (not hasattr(self, 'delta_grid')) and hasattr(self, 'grid_measure'):
         if np.count_nonzero(self.grid_measure - np.diag(np.diagonal(self.grid_measure))) != 0 : # test if grid_measure diagonal.
             sys.exit('Crewp:ScalarField: Non-Cartesian grid of ScalarField, not implemented.')
         else:
             self.delta_grid = np.diagonal(self.grid_measure)
     self.gridshape = self.data3d.shape
Пример #5
0
    def plot(self, eta1, u1, v1, eta2=None, u2=None, v2=None):
        self.fig.add_subplot(self.gs[0, 0])
        self.sp_eta.set_data(eta1)

        self.fig.add_subplot(self.gs[0, 1])
        self.sp_u.set_data(u1)

        self.fig.add_subplot(self.gs[0, 2])
        self.sp_v.set_data(v1)
            
        self.fig.add_subplot(self.gs[1, 0])
        self.sp_radial1.set_ydata(eta1.ravel());

        self.fig.add_subplot(self.gs[1, 1])
        self.sp_x_axis1.set_ydata(eta1[(self.ny+2)/2,:])
        self.sp_y_axis1.set_ydata(eta1[:,(self.nx+2)/2])

        self.fig.add_subplot(self.gs[1, 2])
        self.sp_x_diag1.set_ydata(np.diagonal(eta1, offset=-abs(self.nx-self.ny)/2))
        self.sp_y_diag1.set_ydata(np.diagonal(eta1.T, offset=abs(self.nx-self.ny)/2))
        
        if (eta2 is not None):
            self.fig.add_subplot(self.gs[2, 0])
            self.sp_radial2.set_ydata(eta2.ravel());

            self.fig.add_subplot(self.gs[2, 1])
            self.sp_x_axis2.set_ydata(eta2[(self.ny+2)/2,:])
            self.sp_y_axis2.set_ydata(eta2[:,(self.nx+2)/2])

            self.fig.add_subplot(self.gs[2, 2])
            self.sp_x_diag2.set_ydata(np.diagonal(eta2, offset=-abs(self.nx-self.ny)/2))
            self.sp_y_diag2.set_ydata(np.diagonal(eta2.T, offset=abs(self.nx-self.ny)/2))
        
        plt.draw()
        time.sleep(0.001)
Пример #6
0
def main():
    mu = np.array([[5],
                   [6],
                   [7],
                   [8],
                   ])
    S = np.array([[3.01602775, 1.02746769, -3.60224613, -2.08792829],
                  [1.02746769, 5.65146472, -3.98616664, 0.48723704],
                  [-3.60224613, -3.98616664, 13.04508284, -1.59255406],
                  [-2.08792829, 0.48723704, -1.59255406, 8.28742469],
                  ])
    mu_est, S_est = estimate(mu, S)

    print('mean estimation:', mu_est)
    print('S estimation:')
    print(S_est)

    print()

    print('mean % difference:', (np.diagonal(mu / mu_est) - 1) * 100)
    print('S % difference:')
    print(((S / S_est) - 1) * 100)

    print()

    mu_est, S_est = estimate_more(mu, S)
    print('mean estimation:', mu_est)
    print('S estimation:')
    print(S_est)

    print()

    print('mean % difference:', (np.diagonal(mu / mu_est) - 1) * 100)
    print('S % difference:')
    print(((S / S_est) - 1) * 100)
Пример #7
0
  def compute_distances_no_loops(self, X):
    """
    Compute the distance between each test point in X and each training point
    in self.X_train using no explicit loops.

    Input / Output: Same as compute_distances_two_loops
    """
    num_test = X.shape[0]
    num_train = self.X_train.shape[0]
    dists = np.zeros((num_test, num_train)) 
    X1=X
    X2=self.X_train
    #########################################################################
    # TODO:                                                                 #
    # Compute the l2 distance between all test points and all training      #
    # points without using any explicit loops, and store the result in      #
    # dists.                                                                #
    #                                                                       #
    # You should implement this function using only basic array operations; #
    # in particular you should not use functions from scipy.                #
    #                                                                       #
    # HINT: Try to formulate the l2 distance using matrix multiplication    #
    #       and two broadcast sums.                                         #
    #########################################################################
    X1X1Tdiag = np.array(np.diagonal(np.dot(X1,X1.T)))[np.newaxis]
    X1X1T = np.tile(X1X1Tdiag.T,(1,num_train))
    X2X2Tdiag = np.array(np.diagonal(np.dot(X2,X2.T)))[np.newaxis]
    X2X2T = np.tile(X2X2Tdiag,(num_test,1))
    X1X2T = np.dot(X1,X2.T)
    dists = np.sqrt(X1X1T - 2*X1X2T + X2X2T)

    #########################################################################
    #                         END OF YOUR CODE                              #
    #########################################################################
    return dists
Пример #8
0
def partial_svd(transform,
                r=1,
                extra_rank=2,
                max_its = 1000,
                tol = 1e-8,
                initial=None,
                return_full = False,
                debug=False):

    """
    Compute the partial SVD of the linear_transform X using the Mazumder/Hastie algorithm in (TODO: CITE)
    """

    if isinstance(transform, np.ndarray):
        transform = linear_transform(transform)

    n = transform.output_shape[0]
    p = transform.input_shape[0]


    r = np.int(np.min([r,p]))
    q = np.min([r + extra_rank, p])
    if initial is not None:
        if initial.shape == (n,q):
            U = initial
        elif len(initial.shape) == 1:
            U = np.hstack([initial.reshape((initial.shape[0],1)), np.random.standard_normal((n,q-1))])            
        else:
            U = np.hstack([initial, np.random.standard_normal((n,q-initial.shape[1]))])            
    else:
        U = np.random.standard_normal((n,q))

    if return_full:
        ind = np.arange(q)
    else:
        ind = np.arange(r)
    old_singular_values = np.zeros(r)
    change_ind = np.arange(r)

    itercount = 0
    singular_rel_change = 1.


    while itercount < max_its and singular_rel_change > tol:
        if debug and itercount > 0:
            print itercount, singular_rel_change, np.sum(np.fabs(singular_values)>1e-12), np.fabs(singular_values[range(np.min([5,len(singular_values)]))])
        V,_ = np.linalg.qr(transform.adjoint_map(U))
        X_V = transform.linear_map(V)
        U,R = np.linalg.qr(X_V)
        singular_values = np.diagonal(R)[change_ind]
        singular_rel_change = np.linalg.norm(singular_values - old_singular_values)/np.linalg.norm(singular_values)
        old_singular_values = singular_values * 1.
        itercount += 1
    singular_values = np.diagonal(R)[ind]

    nonzero = np.where(np.fabs(singular_values) > 1e-12)[0]
    if len(nonzero):
        return U[:,ind[nonzero]] * np.sign(singular_values[nonzero]), np.fabs(singular_values[nonzero]),  V[:,ind[nonzero]].T
    else:
        return U[:,ind[0]], np.zeros((1,1)),  V[:,ind[0]].T
def main():

    sample='q'
    sm_bin='10.0_10.5'
    catalogue = 'sm_9.5_s0.2_sfr_c-0.75_250'

    #load in fiducial mock
    filepath = './'
    filename = 'sm_9.5_s0.2_sfr_c-0.8_Chinchilla_250_wp_fiducial_'+sample+'_'+sm_bin+'_cov.npy'
    cov = np.matrix(np.load(filepath+filename))
    diag = np.diagonal(cov)
    filepath = cu.get_output_path() + 'analysis/central_quenching/observables/'
    filename = 'sm_9.5_s0.2_sfr_c-0.8_Chinchilla_250_wp_fiducial_'+sample+'_'+sm_bin+'.dat'
    data = ascii.read(filepath+filename)
    rbins = np.array(data['r'])
    mu = np.array(data['wp'])
    
    #load in comparison mock
    
    
    
    
    plt.figure()
    plt.errorbar(rbins, mu, yerr=np.sqrt(np.diagonal(cov)), color='black')
    plt.plot(rbins, wp,  color='red')
    plt.xscale('log')
    plt.yscale('log')
    plt.show()
    
    inv_cov = cov.I
    Y = np.matrix((wp-mu))
    
    X = Y*inv_cov*Y.T
    
    print(X)
Пример #10
0
    def MStep(self, posterior, data, mix_pi=None):
        if isinstance(data, DataSet):
            x = data.internalData
        elif hasattr(data, "__iter__"):
            x = data
        else:
            raise TypeError, "Unknown/Invalid input to MStep."

        post = posterior.sum() # sum of posteriors
        self.mean = np.dot(posterior, x) / post

        # centered input values (with new mus)
        centered = np.subtract(x, np.repeat([self.mean], len(x), axis=0));


        # estimating correlation factor
        sigma = np.dot(np.transpose(np.dot(np.identity(len(posterior)) * posterior, centered)), centered) / post # sigma/covariance matrix

        diagsigma = np.diagflat(1.0 / np.diagonal(sigma)) # vector with diagonal entries of sigma matrix
        correlation = np.dot(np.dot(diagsigma, np.multiply(sigma, sigma)), diagsigma) # correlation matrix with entries sigma_xy^2/(sigma^2_x * sigma^2_y)

        correlation = correlation - np.diagflat(np.diagonal(correlation)) # making diagonal entries = 0

        # XXX - check this
        parents = self.maximunSpanningTree(correlation) # return maximun spanning tree from the correlation matrix
        self.parents = self.directTree(parents, 0) # by default direct tree from 0


        # XXX note that computational time could be saved as these functions share same suficient statistics
        ConditionalGaussDistribution.MStep(self, posterior, data, mix_pi)
Пример #11
0
def gp_plot_prediction(
        predict_x,
        mean,
        variance=None
):
    """
    Plot a gp's prediction using pylab including error bars if variance specified

    Error bars are 2 * standard_deviation as in `Gaussian Processes for Machine Learning`__ by Rasmussen and Williams. 

    __ http://www.amazon.co.uk/Gaussian-Processes-Learning-Adaptive-Computation/dp/026218253X/
    """
    from pylab import plot, concatenate, fill
    if None != variance:
        # check variances are just about +ve - could signify a bug if not
        assert diagonal(variance).all() > -1e-10
        data = [
            (x, y, max(v, 0.0))
            for x, y, v
            in zip(predict_x, mean.flat, diagonal(variance))
        ]
    else:
        data = [
            (x, y)
            for x, y
            in zip(predict_x, mean)
        ]
    data.sort(key=lambda d: d[0])  # sort on X axis
    predict_x = [d[0] for d in data]
    predict_y = asarray([d[1] for d in data])
    plot(predict_x, predict_y, color='k', linestyle=':')
Пример #12
0
    def getWorldSIP(self, position):
        u = position[0] - self.pixReference[0]
        v = position[1] - self.pixReference[1]
        fuvArray = numpy.zeros((4, 4))
        fuvArray[0][0] = 1
        fuvArray[0][1] = v
        fuvArray[0][2] = v * v
        fuvArray[0][3] = v * v * v
        fuvArray[1][0] = u
        fuvArray[1][1] = u * v
        fuvArray[1][2] = u * v * v
        fuvArray[2][0] = u * u
        fuvArray[2][1] = u * u * v
        fuvArray[3][0] = u * u * u

        uprime = numpy.sum(numpy.diagonal(numpy.dot(self.SIP_A, fuvArray)))
        vprime = numpy.sum(numpy.diagonal(numpy.dot(self.SIP_B, fuvArray)))

        # print "u': " + str(uprime)
        # print "v': " + str(vprime)
        u = u + uprime
        v = v + vprime
        CD = numpy.array(self.linearTransform)
        pixel = numpy.array((u, v))
        world = numpy.dot(CD, pixel)
        world = (world[0] + self.raDeg, world[1] + self.dec)

        return (world[0], world[1])
Пример #13
0
    def getPixel(self, position):
        pixel = (0, 0)
        u = position[0] - self.raDeg
        v = position[1] - self.dec
        # print "Incoming", u, v
        world = numpy.array((u, v))
        CD = numpy.array(self.linearTransform)
        invCD = numpy.linalg.inv(CD)
        pixel = numpy.dot(invCD, world)

        fuvArray = numpy.zeros((4, 4))
        fuvArray[0][0] = 1
        fuvArray[0][1] = v
        fuvArray[0][2] = v * v
        fuvArray[0][3] = v * v * v
        fuvArray[1][0] = u
        fuvArray[1][1] = u * v
        fuvArray[1][2] = u * v * v
        fuvArray[2][0] = u * u
        fuvArray[2][1] = u * u * v
        fuvArray[3][0] = u * u * u

        pixel[0] = pixel[0] + numpy.sum(numpy.diagonal(numpy.dot(self.SIP_AP, fuvArray)))
        pixel[1] = pixel[1] + numpy.sum(numpy.diagonal(numpy.dot(self.SIP_BP, fuvArray)))

        # print "Reference", self.pixReference, "offset", pixel
        pixel = pixel + self.pixReference
        return (pixel[0], pixel[1])
Пример #14
0
def distance_diagonal_law(matrix, positions=None):
    n = min(matrix.shape)
    if positions is None:
        return np.array([np.average(np.diagonal(matrix, j)) for j in range(n)])
    else:
        contigs = positions_to_contigs(positions)

    def is_intra(i, j):
        return contigs[i] == contigs[j]

    max_intra_distance = max((len(contigs == u) for u in set(contigs)))

    intra_contacts = []
    inter_contacts = [np.average(np.diagonal(matrix, j))
                      for j in range(max_intra_distance, n)]
    for j in range(max_intra_distance):
        D = np.diagonal(matrix, j)
        for i in range(len(D)):
            diagonal_intra = []
            if is_intra(i, j):
                diagonal_intra.append(D[i])
#            else:
#                diagonal_inter.append(D[i])
#        inter_contacts.append(np.average(np.array(diagonal_inter)))
        intra_contacts.append(np.average(np.array(diagonal_intra)))

    intra_contacts.extend(inter_contacts)

    return [positions, np.array(intra_contacts)]
Пример #15
0
    def mixing_constants(self, T, x):
        """ Compute Am, Bm, dAmdT and d2AmdT2,
        the "mixing constants" of the model and
        associated derivatives."""

        Tcmat = self.Tcmat
        Pcmat = self.Pcmat
        Vcmat = self.Vcmat
        Omat = self.Omat
        C = 0.37464 + 1.54226*Omat - 0.26992*Omat**2.0
        B = 0.07796*RGAS*(np.diagonal(Tcmat)/
                np.diagonal(Pcmat))
        A = (0.457236*((RGAS*Tcmat)**2.0)/Pcmat)*\
                ((1.0 +  C*(1.0 -
                    np.sqrt(np.tensordot(T, 1./Tcmat, 0))))**2.0)

        Am = np.tensordot((A*x[:,:,:,:,None]*x[:,:,:,None,:]),
                np.ones(Tcmat.shape), 2)
        Bm = np.tensordot(x, B, 1)

        G = C*np.sqrt(np.tensordot(T, 1./Tcmat, axes=0))/(
                1.0 + C*(1.0 -
                np.sqrt(np.tensordot(T, 1./Tcmat, axes=0))))
        
        dAmdT = (-1./T)*(np.tensordot(
            G*A*x[:,:,:,None,:]*x[:,:,:,:,None],
            np.ones(Tcmat.shape),
                    2))
        d2AmdT2 = 0.457236*(RGAS**2)/(2.0*T*np.sqrt(T))*\
            np.tensordot(
                (C*(1.+C)*Tcmat*np.sqrt(Tcmat)/Pcmat)*
                x[:,:,:,None,:]*x[:,:,:,:,None],
                np.ones(Tcmat.shape),
                                2)
        return Am, Bm, dAmdT, d2AmdT2
Пример #16
0
 def estimate(self, X, Y, shared):
     m = len(Y)
     d = len(X.T)
     # Split spam and no spam
     nspam = np.array([x[(Y==0.0).T[0]] for x in X.T]).T
     spam = np.array([x[(Y==1.0).T[0]] for x in X.T]).T
     p = [float(len(nspam))/float(len(X)), float(len(spam))/float(len(X))]
     # Estimate u0, u1
     u0, u1 = nspam.mean(0), spam.mean(0)
     # Initialize covariance
     u = (u0, u1)
     if shared:
         cov = np.zeros((d,d))
     else:
         cov = [np.zeros((d,d)), np.zeros((d,d))]
     # Estimate covariance
     for i in range(m):
         d = (X[i] - u[int(Y[i])])
         if shared:
             cov += np.dot(d[:,None],d[None,:])
         else:
             cov[int(Y[i])] += np.dot(d[:,None],d[None,:])
     # Normalize covariance
     if shared:
         cov /= float(m)
         #print np.diagonal(cov)
         np.fill_diagonal(cov, np.diagonal(cov)+1e-5)
         #print np.diagonal(cov)
     else:
         cov = [c/float(m) for c in cov]
         [np.fill_diagonal(c, np.diagonal(c)+1e-5) for c in cov]
     # Print stats and return
     return u,cov,p
Пример #17
0
    def __cal_shift_integrand(self, alpha=0, beta=0, gamma=0):
        """
        calculate shift current integrand and store it in 'shift_integrand'
        all parameters in this function are in hamiltonian gauge
        :param alpha, beta, gamma: 0: x, 1: y, 2: z
        """
        fermi_energy = self.fermi_energy
        nkpts = self.nkpts
        self.calculate('eigenvalue')
        self.calculate('A_h_ind', alpha)
        self.calculate('A_h_ind', beta)
        self.calculate('A_h_ind', gamma)
        self.calculate('A_h_ind_ind', beta, alpha)
        self.calculate('A_h_ind_ind', gamma, alpha)
        for i in range(nkpts):
            A_alpha = self.kpt_data['A_h_ind'][alpha][:, :, i]
            A_beta = self.kpt_data['A_h_ind'][beta][:, :, i]
            A_gamma = self.kpt_data['A_h_ind'][gamma][:, :, i]
            A_beta_alpha = self.kpt_data['A_h_ind_ind'][beta][alpha][:, :, i]
            A_gamma_alpha = self.kpt_data['A_h_ind_ind'][gamma][alpha][:, :, i]
            fermi = np.zeros(self.num_wann, dtype='float')
            fermi[self.kpt_data['eigenvalue'][:, i] > fermi_energy] = 0
            fermi[self.kpt_data['eigenvalue'][:, i] < fermi_energy] = 1
            fermi = fermi[:, None] - fermi[None, :]
            ki = np.diagonal(A_alpha)[:, None] - np.diagonal(A_alpha)[None, :]

            self.kpt_data['shift_integrand'][alpha][beta][gamma][:, :, i] = \
                np.imag(fermi *
                        (A_beta.T * (A_gamma_alpha - 1j * ki * A_gamma) +
                         A_gamma.T * (A_beta_alpha - 1j * ki * A_beta))
                        ) / 2
Пример #18
0
def covariance_ci(covariance, fits, parnames, sigma=1, verbose=True):
	npars = fits.size
	out = np.zeros( (npars, 3) )
	out[:,0] = fits
	out[:,1] = fits - sigma*np.sqrt( np.diagonal(covariance) )
	out[:,2] = fits + sigma*np.sqrt( np.diagonal(covariance) )
	if sigma==1:
		percent = '68%'
	if sigma==2:
		percent = '95%'
	if verbose:
		print 'covariance '+percent+' ci'
		hfmt = '%-12s %12s %12s %12s \n'
		s = hfmt % ('Param', 'Best-Fit', 'Lower Bound', 'Upper Bound')
		s += hfmt % ('-'*5, '-'*8, '-'*11, '-'*11)
		for name, val, lower, upper in zip(parnames, out[:,0],
			out[:,1], out[:,2]):
			s += '%-12s %12g ' % (name, val)
			s += '%12g ' % lower
			s += '%12g \n' % upper
		print s
	result={}
	for i in range(npars):
		result[parnames[i]]=out[i,[1,2]]
	return result
Пример #19
0
def UniformGamma(num_candidates, ranking_size, allow_repetitions):
    validDocs=ranking_size
    if not allow_repetitions:
        validDocs=min(ranking_size, num_candidates)
                
    gamma=numpy.empty((num_candidates*validDocs, num_candidates*validDocs), dtype=numpy.float64)
    if num_candidates==1:
        gamma.fill(1.0)
    else:
        #First set all the off-diagonal blocks
        if allow_repetitions:
            gamma.fill(1.0/(num_candidates*num_candidates))
        else:
            gamma.fill(1.0/(num_candidates*(num_candidates-1)))
            #Correct the diagonal of each off-diagonal block: Pairwise=0
            for p in range(1,validDocs):
                diag=numpy.diagonal(gamma, offset=p*num_candidates)
                diag.setflags(write=True)
                diag.fill(0)
                        
                diag=numpy.diagonal(gamma, offset=-p*num_candidates)
                diag.setflags(write=True)
                diag.fill(0)
                        
        #Now correct the diagonal blocks: Diagonal matrix with marginals = 1/m
        for j in range(validDocs):
            currentStart=j*num_candidates
            currentEnd=(j+1)*num_candidates
            gamma[currentStart:currentEnd, currentStart:currentEnd]=0
            numpy.fill_diagonal(gamma, 1.0/num_candidates)

    gammaInv=scipy.linalg.pinv(gamma)
    return (num_candidates, gammaInv)
Пример #20
0
    def init_amps(self, eris):
        time0 = time.clock(), time.time()
        nocc = self.nocc()
        nvir = self.nmo() - nocc
        nkpts = self.nkpts
        t1 = numpy.zeros((nkpts, nocc, nvir), dtype=numpy.complex128)
        t2 = numpy.zeros((nkpts, nkpts, nkpts, nocc, nocc, nvir, nvir), dtype=numpy.complex128)
        woovv = numpy.empty((nkpts, nkpts, nkpts, nocc, nocc, nvir, nvir), dtype=numpy.complex128)
        self.emp2 = 0
        foo = eris.fock[:, :nocc, :nocc].copy()
        fvv = eris.fock[:, nocc:, nocc:].copy()
        eris_oovv = eris.oovv.copy()
        eia = numpy.zeros((nocc, nvir))
        eijab = numpy.zeros((nocc, nocc, nvir, nvir))

        kconserv = self.kconserv
        for ki in range(nkpts):
            for kj in range(nkpts):
                for ka in range(nkpts):
                    kb = kconserv[ki, ka, kj]
                    eia = np.diagonal(foo[ki]).reshape(-1, 1) - np.diagonal(fvv[ka])
                    ejb = np.diagonal(foo[kj]).reshape(-1, 1) - np.diagonal(fvv[kb])
                    eijab = lib.direct_sum("ia,jb->ijab", eia, ejb)
                    woovv[ki, kj, ka] = 2 * eris_oovv[ki, kj, ka] - eris_oovv[ki, kj, kb].transpose(0, 1, 3, 2)
                    t2[ki, kj, ka] = eris_oovv[ki, kj, ka] / eijab

        t2 = numpy.conj(t2)
        self.emp2 = numpy.einsum("pqrijab,pqrijab", t2, woovv).real
        self.emp2 /= nkpts
        logger.info(self, "Init t2, MP2 energy = %.15g", self.emp2)
        logger.timer(self, "init mp2", *time0)
        return self.emp2, t1, t2
Пример #21
0
def main():
    parser = OptionParser(description='Fitting to a noisy data generated by a known function')
    parser.add_option("--npoints", type="int",   help="number of data points") 
    parser.add_option("--low",     type="float", help="smallest data point") 
    parser.add_option("--high",    type="float", help="highest data point") 
    parser.add_option("--sigma",   type="float", help="std of noise") 
    (options, args) = parser.parse_args() 

    pl.figure(1,(7,6))
    ax = pl.subplot(1,1,1)

    pl.connect('key_press_event',kevent.press)
    
    
    sigma = options.sigma    
    Ls   = np.append(np.linspace(options.low,options.high,options.npoints),46)
    nLs  = np.linspace(min(Ls),max(Ls),100)
    Mis  = HalfLog(Ls,.5,0.5)
    errs = np.random.normal(0,sigma, len(Mis))
    Mis  = Mis+errs
    pl.errorbar(Ls,Mis,errs,ls='',marker='s',color='b')
    print sigma/Mis 

    coeff, var_matrix = curve_fit(FreeLog,Ls,Mis,(1.0,1.0,1.0))
    err = np.sqrt(np.diagonal(var_matrix))
    dof     = len(Ls) - len(coeff)
    chisq   = sum(((Mis-FreeLog(Ls,coeff[0],coeff[1],coeff[2]))/sigma)**2)
    cdf     = special.chdtrc(dof,chisq)
    print 'Free:  a = %0.2f(%0.2f); b = %0.2f(%0.2f); c = %0.2f(%0.2f); p-value = %0.2f ' %(coeff[0],err[0],coeff[1],err[1],coeff[2],err[2],cdf)
    pl.plot(nLs,FreeLog(nLs,coeff[0],coeff[1],coeff[2]),label='Free',color='y')

    coeff, var_matrix = curve_fit(ZeroLog,Ls,Mis,(1.0,1.0))
    err = np.sqrt(np.diagonal(var_matrix))
    dof     = len(Ls) - len(coeff)
    chisq   = sum(((Mis-ZeroLog(Ls,coeff[0],coeff[1]))/sigma)**2)
    cdf     = special.chdtrc(dof,chisq)
    print 'Zero:  a = %0.2f(%0.2f);                 c = %0.2f(%0.2f); p-value = %0.2f' %(coeff[0],err[0],coeff[1],err[1],cdf)
    pl.plot(nLs,ZeroLog(nLs,coeff[0],coeff[1]),label='Zero',color='g')
    pl.tight_layout()

    coeff, var_matrix = curve_fit(HalfLog,Ls,Mis,(1.0,1.0))
    err = np.sqrt(np.diagonal(var_matrix))
    dof     = len(Ls) - len(coeff)
    chisq   = sum(((Mis-HalfLog(Ls,coeff[0],coeff[1]))/sigma)**2)
    cdf     = special.chdtrc(dof,chisq)
    print 'Half:  a = %0.2f(%0.2f);                 c = %0.2f(%0.2f); p-value = %0.2f' %(coeff[0],err[0],coeff[1],err[1],cdf)
    pl.plot(nLs,HalfLog(nLs,coeff[0],coeff[1]),label='Half',color='b')
    pl.tight_layout()

    coeff, var_matrix = curve_fit(OneLog,Ls,Mis,(1.0,1.0))
    err = np.sqrt(np.diagonal(var_matrix))
    dof     = len(Ls) - len(coeff)
    chisq   = sum(((Mis-OneLog(Ls,coeff[0],coeff[1]))/sigma)**2)
    cdf     = special.chdtrc(dof,chisq)
    print 'Unity: a = %0.2f(%0.2f);                 c = %0.2f(%0.2f); p-value = %0.2f' %(coeff[0],err[0],coeff[1],err[1],cdf)
    pl.plot(nLs,OneLog(nLs,coeff[0],coeff[1]),label='Unity',color='r')
    pl.tight_layout()

    pl.legend()
    pl.show()
Пример #22
0
def checkSquare(board, playerChar):
    otherPlayerChar = notPlayer(playerChar)
    lookupBoard = numpy.array(board)
    foundLocation = (INVALID, INVALID)
    
    # Rows and cols
    for i in range(0, BOARD_SIZE):
        #print("        Processing square index " + str(i))
        rowSet = lookupBoard[i, :].tolist()
        foundIndex = checkSet(rowSet, otherPlayerChar)
        if (foundIndex != INVALID):
            foundLocation = (i, foundIndex)
        
        colSet = lookupBoard[:, i].tolist()
        foundIndex = checkSet(colSet, otherPlayerChar)
        if (foundIndex >= 0):
            foundLocation = (foundIndex, i)

    # Diagonals
    foundIndex = checkSet(numpy.diagonal(lookupBoard).tolist(), otherPlayerChar) 
    if (foundIndex != INVALID):
        foundLocation = (foundIndex, foundIndex)
        
    foundIndex = checkSet(numpy.diagonal(lookupBoard[::-1]).tolist(), otherPlayerChar) 
    if (foundIndex != INVALID):
        revFoundIndex = BOARD_SIZE - foundIndex - 1
        foundLocation = (revFoundIndex, foundIndex)
                        
    #print(foundLocation)
    return foundLocation
  def compute_distances_no_loops(self, X):
    """
    Compute the distance between each test point in X and each training point
    in self.X_train using no explicit loops.

    Input / Output: Same as compute_distances_two_loops
    """
    num_test = X.shape[0]
    num_train = self.X_train.shape[0]
    dists = np.zeros((num_test, num_train)) 
    #########################################################################
    # TODO:                                                                 #
    # Compute the l2 distance between all test points and all training      #
    # points without using any explicit loops, and store the result in      #
    # dists.                                                                #
    #                                                                       #
    # You should implement this function using only basic array operations; #
    # in particular you should not use functions from scipy.                #
    #                                                                       #
    # HINT: Try to formulate the l2 distance using matrix multiplication    #
    #  and two broadcast sums.                                         #
    #########################################################################
    dists = -2*np.dot(X, np.transpose(self.X_train))
    assert(dists.shape == (num_test, num_train))
    train_diags = np.diagonal(np.dot(self.X_train, np.transpose(self.X_train)))
    assert(train_diags.shape == (num_train,))
    test_diags = np.diagonal(np.dot(X, np.transpose(X)))
    dists = dists + train_diags # broadcast sum #1
    dists = np.transpose(np.transpose(dists) + test_diags)  # broadcast sum #2
    #########################################################################
    #                         END OF YOUR CODE                              #
    #########################################################################
    return np.sqrt(dists)
Пример #24
0
def plot_classifier(cal,trues,falses):
    x = np.linspace(-1,1,200)
    y = np.linspace(-1,1,200)
    (X,Y) = np.meshgrid(x,y)
    XY = np.dstack([X,Y])
    Z = lr.evaluate_poly(cal,lr.dim2_deg4[:,None,None,:],XY)

    f = plt.figure()
    ax = f.add_subplot(111)
    ax.grid(True)
    ax.axis('equal')

    ax.contour(X,Y,Z,levels=np.linspace(np.min(Z),np.max(Z),20),colors='k')

    # adjust for the way the random samples were distributed
    adjustment = 1

    if len(falses)>0:
        falses = np.diagonal(falses,axis1=1,axis2=2)[:,:2] - [adjustment]*2
        ax.scatter(*falses.T,color='red')

    if len(trues)>0:
        trues = np.diagonal(trues,axis1=1,axis2=2)[:,:2] - [adjustment]*2
        ax.scatter(*trues.T,color='green')

    return f
Пример #25
0
def test():
    # feed = DataFeed("/mnt/e/data/algae_dataset", "/mnt/e/data/algae_patches", False)
    # feed = DataFeed("/mnt/e/data/algae_dataset_cells_only", "/mnt/e/data/algae_patches_cells_only", False, False)
    feed = EqualizedDataFeed("/mnt/e/data/algae_dataset_equal_batches", False)
    sess = tf.InteractiveSession()
    batch_size = 200
    tile_size = (128,128)
    lr = 1e-3
    eps = 1.0
    a = 0.001
    net = AlexNet(batch_size, tile_size, sess, lr, eps, a)
    saver = tf.train.Saver(net.train_vars)

    saver.restore(sess, "AlexNet_4ft_1mp_2fc_1softmax_random_symmetry_201611141618_best.ckpt")

    conf_mat = np.zeros((4,4))

    for i in range(5):
        X,Y = feed.next_batch(batch_size)
        pred = net.forward(X).argmax(axis=1)
        
        targets = Y.argmax(axis=1)
        
        for i in range(4):
            for j in range(4):
                conf_mat[i,j] += ((pred==i)*(targets==j)).sum()

    print conf_mat
    print np.diagonal(conf_mat).sum()*1./(conf_mat.sum())
Пример #26
0
def MMD_Diff_Var(Kyy,Kzz,Kxy,Kxz):
    '''
    Compute the variance of the difference statistic MMDXY-MMDXZ
    See http://arxiv.org/pdf/1511.04581.pdf Appendix for derivations
    '''
    m = Kxy.shape[0];
    n = Kyy.shape[0];
    r = Kzz.shape[0];
    
    
    Kyynd = Kyy-np.diag(np.diagonal(Kyy));
    Kzznd = Kzz-np.diag(np.diagonal(Kzz));
    
    u_yy=np.sum(Kyynd)*( 1./(n*(n-1)) );
    u_zz=np.sum(Kzznd)*( 1./(r*(r-1)) );
    u_xy=np.sum(Kxy)/(m*n);
    u_xz=np.sum(Kxz)/(m*r);
    
    #compute zeta1
    t1=(1./n**3)*np.sum(Kyynd.T.dot(Kyynd))-u_yy**2;
    t2=(1./(n**2*m))*np.sum(Kxy.T.dot(Kxy))-u_xy**2;
    t3=(1./(n*m**2))*np.sum(Kxy.dot(Kxy.T))-u_xy**2;
    t4=(1./r**3)*np.sum(Kzznd.T.dot(Kzznd))-u_zz**2;
    t5=(1./(r*m**2))*np.sum(Kxz.dot(Kxz.T))-u_xz**2;
    t6=(1./(r**2*m))*np.sum(Kxz.T.dot(Kxz))-u_xz**2;
    t7=(1./(n**2*m))*np.sum(Kyynd.dot(Kxy.T))-u_yy*u_xy;
    t8=(1./(n*m*r))*np.sum(Kxy.T.dot(Kxz))-u_xz*u_xy;
    t9=(1./(r**2*m))*np.sum(Kzznd.dot(Kxz.T))-u_zz*u_xz;
    
    zeta1=(t1+t2+t3+t4+t5+t6-2.*(t7+t8+t9)); 
    
    zeta2=(1/m/(m-1))*np.sum((Kyynd-Kzznd-Kxy.T-Kxy+Kxz+Kxz.T)**2)-(u_yy - 2.*u_xy - (u_zz-2.*u_xz))**2;
    
    
    data=dict({'t1':t1,
               't2':t2,
               't3':t3,
               't4':t4,
               't5':t5,
               't6':t6,
               't7':t7,
               't8':t8,
               't9':t9,
               'zeta1':zeta1,
               'zeta2':zeta2,
                })
    #TODO more precise version for zeta2 
    #    xx=(1/m^2)*sum(sum(Kxxnd.*Kxxnd))-u_xx^2;
    # yy=(1/n^2)*sum(sum(Kyynd.*Kyynd))-u_yy^2;
    #xy=(1/(n*m))*sum(sum(Kxy.*Kxy))-u_xy^2;
    #xxy=(1/(n*m^2))*sum(sum(Kxxnd*Kxy))-u_xx*u_xy;
    #yyx=(1/(n^2*m))*sum(sum(Kyynd*Kxy'))-u_yy*u_xy;
    #zeta2=(xx+yy+xy+xy-2*(xxy+xxy +yyx+yyx))
    
    
    Var=(4.*(m-2)/(m*(m-1)))*zeta1;
    Var_z2=Var+(2./(m*(m-1)))*zeta2;

    return Var,Var_z2,data
Пример #27
0
    def reorder(self):
        self.Update_World()
        nspcs = len(self.allspcs)
        y = self.update_y_from_world()
        y[:]*=0
        y[:]+=1

        t = 0
        drate_per_dspc = self.drate_per_dspc
        rate_const = self.rate_const
        rate_const[:] *= 0
        rate_const[:] += 1
        exec(self.fill_drate_exp)
        inmatrix = self.drate_per_dspc
        nonzero = inmatrix[:] != 0
        density = nonzero.sum(0) + nonzero.sum(1)
        densityasc = density.argsort()
        neworder = np.concatenate([densityasc[::-1][::2][::-1], densityasc[::-1][1::2]])
        from_left = nonzero.argmax(1)
        from_right = nonzero[:, ::-1].argmax(1)
        from_bottom = nonzero.argmax(0)
        from_top = nonzero[::-1].argmax(0)
        neworder = (from_bottom - from_top).argsort()
        neworder = (from_left - from_right).argsort()
        testmatrix = inmatrix[neworder][:, neworder].copy()
        """Returns ml and mu, the lower and upper band sizes of a."""
        a = testmatrix
        nrows, ncols = a.shape
        ml = 0
        for k in range(-nrows+1, 0):
            if np.diag(a, k).any():
                ml = -k
                break
        mu = 0
        for k in range(nrows-1, 0, -1):
            if np.diag(a, k).any():
                mu = k
                break
        subd = ml; superd = mu
        self.allspcs = [self.allspcs[i] for i in neworder]
        del self.world['y']
        self.set_spcidx()
        self.set_dy_exp()
        self.set_rate_exp()
        self.ml = subd
        self.mu = superd
        return neworder, subd, superd
        
        drate_per_dspc = self.drate_per_dspc
        rate_const = self.rate_const
        drate_per_dspc = self.drate_per_dspc
        exec(self.fill_drate_exp)
        for d in range(subd, nspcs):
            assert((np.diagonal(drate_per_dspc, -d) == 0).all())
            
        for d in range(superd, nspcs):
            assert((np.diagonal(drate_per_dspc, d) == 0).all())
            
        return neworder, subd, superd    
Пример #28
0
def plot_control(samples, degrees=True, filename=None):
    # get time from timestamp and sample time
    t = samples.bicycle.dt.mean() * samples.ts

    n = samples.x.shape[1] + 1
    cols = 2
    rows = math.ceil(n/cols)
    fig, axes = plt.subplots(rows, cols, sharex=True)
    axes = np.ravel(axes)

    state_cost_weight = np.diagonal(samples.lqr.Q.mean(axis=0))
    integral_cost_weight = np.diagonal(samples.lqr.Qi.mean(axis=0))
    input_cost_weight = np.diagonal(samples.lqr.R.mean(axis=0))

    for n in range(samples.x.shape[1]):
        ax = axes[n + 1]

        x_state = state_name[n]
        x_unit = unit(x_state, degrees)

        x = samples.x[:, n]
        r = hold_masked_values(samples.lqr.r[:, n])
        q = samples.lqr.q[:, n]
        if degrees and '°' in x_unit:
            x = np.rad2deg(x)
            r = np.rad2deg(r)
            q = np.rad2deg(q)

        ax.set_xlabel('{} [{}]'.format('time', unit('time')))
        ax.set_ylabel('{} [{}]'.format(x_state, x_unit))
        ax.plot(t, x, color=state_color[2*n + 1],
                label='true (q{} = {:0.2g})'.format(n, state_cost_weight[n]))
        if integral_cost_weight[n]:
            ax.plot(t, r, color=state_color[2*n],
                    label='reference (qi{} = {:0.2g})'.format(
                        n, integral_cost_weight[n]))
            scale = np.around(np.min(np.abs(ax.get_ylim())) / np.max(np.abs(q)),
                              decimals=3)
            ax.plot(t, q * scale, color=_grey_color(state_color[2*n + 1]),
                    label='integral error * {:0.2g}'.format(scale))
        ax.legend()

    ax = axes[0]
    ax.set_xlabel('{} [{}]'.format('time', unit('time')))
    ax.set_ylabel('{} [{}]'.format('torque', unit('torque')))
    ax.set_title('control signals')
    title_components = []
    for n in range(samples.u.shape[1]):
        u = samples.u[:, n]
        if not u.any():
            continue
        u_control = control_name[n]
        label = '{} (r{} = {:0.2g})'.format(u_control, n, input_cost_weight[n])
        ax.plot(t, u, color=control_color[n], label=label)
    ax.legend()

    title = 'system control'
    _set_suptitle(fig, title, filename)
    return fig, axes
Пример #29
0
def main():
    """
    DO NOT TOUCH THIS FUNCTION. IT IS USED FOR COMPUTER EVALUATION OF YOUR CODE
    """
    results = my_info() + "\t\t"
    results += np.array_str(np.diagonal(simple_EC_classifier())) + "\t\t"
    results += np.array_str(np.diagonal(KNN()))
    print results + "\n"
Пример #30
0
def main():
    """
    DO NOT TOUCH THIS FUNCTION. IT IS USED FOR COMPUTER EVALUATION OF YOUR CODE
    """
    results = my_info() + '\t\t'
    results += np.array_str(np.diagonal(one_vs_all())) + '\t\t'
    results += np.array_str(np.diagonal(all_vs_all()))
    print results + '\t\t'
Пример #31
0
def psf_fit(data,
            fluxguess,
            spsf,
            psfctr,
            scale,
            shift,
            make="bpf",
            mask=None,
            weights=None,
            step=None,
            pos=None):
    """
    Fits a supersampled PSF to a data image. The position is fitted at
    discrete postions while the stellar and sky fluxes are fitted with
    scipy's leastsq function.

    Parameters:
    -----------
    data:  2D ndarray
           The science image we are trying to fit.

    fluxguess: 2-element tuple  [flux, sky]
           Tuple giving the starting point to fit the total star flux
           and sky flux level.

    spsf: 2D ndarray
          The supersampled PSF image.

    psfctr: 2-element tuple  [y, x]
            y, x-position of the center of the PSF.

    scale:  scalar
            Ratio of the PSF and data pixel-scales.

    shift: 2-element tuple  [yshift, xshift]
           Each element is a 1D array containing the shifts of the
           center of the PSF to the center of the image at which the
           fit will be evaluated.

    mask : ndarray
           Mask of bad pixel values, same shape as data. Good pixels
           have value 1; bad pixels have value 0, and will not be
           considered in the fit.

    weights: ndarray
             Weights for the minimization, for scientific data the
             weights should be 1/sqrt(variance). Same shape as data.

    step : scalar
           The initial step of the number of elements to jump when
           evaluating shift.

    pos : 2-element list
          The index of the elements in shift where to start the
          evaluation.

    Example:
    --------

    >>> import psf_fit as pf
    >>> import sys, os, time
    >>> import numpy as np
    >>> sys.path.append('/home/esp01/events/wa008b-patricio/wa008bs41/lib/')
    >>> sys.path.append('/home/patricio/ast/esp01/convert/lib/python/gaussian/')
    >>> import manageevent as me
    >>> import pyfits      as pyf

    >>> # Example #1:
    >>> # Using a Spitzer supplied PSF and make_psf_interp:

    >>> # Get a PSF and its center:
    >>> e = me.loadevent('/home/esp01/events/wa008b-patricio/wa008bs41/run/fgc/wa008bs41_ctr', load=['data','uncd','mask'])
    >>> sst_psf = np.copy(e.psfim)
    >>> psfctr = np.copy(e.psfctr)

    >>> # The scale factor:
    >>> scale = 5.0

    >>> # Let's create an image to fit:
    >>> # The image size will be 21 by 21:
    >>> shape = np.array([21,21])

    >>> # Define the position of the center of the PSF, and fluxes:
    >>> params = [1.75, 0.5, 5e4, 2e2]
    >>> # Make the image:
    >>> image, center = pf.make_psf_interp(sst_psf, shape, scale, params, psfctr)
    >>> # Add some noise:
    >>> noise = np.sqrt(image) * np.random.randn(21,21)
    >>> # The image to fit:
    >>> y = image + noise
    >>> var = np.abs(image)

    >>> # Let's say our prior guess lies whitin 1 pixel from the given position:
    >>> yguess = params[0] + 2*(np.random.rand()-0.5)
    >>> xguess = params[1] + 2*(np.random.rand()-0.5)

    >>> # Array of Y,X shifs around our guess where to search:
    >>> noffset = 201
    >>> offsetrad = 1.0  # search within a 1 pixel radius:
    >>> offset = offsetrad * np.linspace(-1.0, 1.0, noffset)

    >>> # The shifts are relative to the center of the image:
    >>> yshift = yguess + offset
    >>> xshift = xguess + offset
    >>> shift = (yshift, xshift)

    >>> # Starting point, guess for the fluxes:
    >>> fluxguess = (0.1e5, 80)

    >>> # Find the best fit:
    >>> pos, bestp, chisq = pf.psf_fit(y, fluxguess, sst_psf, psfctr, scale, shift, mask=None, weights=1/var, make='ipf')
    >>> # Best position:
    >>> print(pos)
    >>> # Best flux fit:
    >>> print(bestp)

    >>> # Example #2:
    >>> # Using a Tiny Tim supplied PSF and make_psf_binning:

    >>> # Get a PSF and its center:
    >>> ttpsf = pyf.getdata('/home/esp01/events/wa008b-patricio/Tiny_tim/irac4_5600K_100x.fits')
    >>> psfctr = np.asarray(np.shape(ttpsf))/2
    >>> # The scale factor:
    >>> scale = 100

    >>> # Create an image to fit:
    >>> shape = np.array([21,21])
    >>> params = [1043, 915, 5e5, 200]
    >>> image, center = pf.make_psf_binning(ttpsf, shape, scale, params, psfctr)
    >>> # Add some noise:
    >>> noise = np.sqrt(image) * np.random.randn(21,21)
    >>> # The image to fit:
    >>> y = image + noise
    >>> var = np.abs(image)

    >>> # Let's say our guess is whitin 1 pixel from the given position:
    >>> yguess = params[0] + np.random.randint(-scale,scale)
    >>> xguess = params[1] + np.random.randint(-scale,scale)

    >>> # Array of Y,X shifs around our guess where to search:
    >>> offsetrad = 1.0  # search within a 1 pixel radius:
    >>> noffset = int(2*scale*offsetrad + 1)
    >>> offset = np.arange(noffset) - noffset/2

    >>> # The shifts are relative to the position of the PSF:
    >>> yshift = yguess + offset
    >>> xshift = xguess + offset
    >>> shift = (yshift, xshift)

    >>> # Starting point, guess for the fluxes:
    >>> fluxguess = (1e4, 80)

    >>> # Find the best fit:
    >>> tini = time.time()
    >>> pos, bestp, chisq = pf.psf_fit(y, fluxguess, ttpsf, psfctr, scale, shift, mask=None, weights=1/var, make='bpf')
    >>> print(time.time()-tini)
    >>> # Best position:
    >>> print(pos)
    >>> # Best flux fit:
    >>> print(bestp)

    Modification History:
    ---------------------
    2011-05-21  patricio  Initial version.  [email protected]
    2011-05-27  patricio  Include gradient parameter in leastsq.
    2011-07-26  patricio  Unified both make_psf.
  """
    shape = np.shape(data)

    # Default mask: all good
    if mask is None:
        mask = np.ones(shape)

    # Default weights: no weighting
    if weights is None:
        weights = np.ones(shape)

    # Unpack shift
    y, x = shift
    # Lengths of the dependent varables:
    ny = len(y)
    nx = len(x)

    # Default initial step:
    if step is None:
        step = int(ny / 2)

    # Default initial position:
    if pos is None:
        pos = [int(ny / 2), int(nx / 2)]

    # Allocate space for subpsf in make_psf_bin outside the loop:
    ns = (np.asarray(shape, float) * scale).astype(int)
    subpsf = np.zeros(ns)

    # Define PSF constructor:
    if make == "ipf":
        maker = make_psf_interp
        # Discard values on the edge of the mask:
        j = 2
        mask[0:j, :] = mask[:, 0:j] = mask[-j:, :] = mask[:, -j:] = 0

    elif make == "bpf":
        maker = make_psf_binning
    else:
        print("Unacceptable PSF constructor. Must be 'ipf' or 'bpf'")
        return

    # Initialize a chi-square grid:
    chisq = -np.ones((ny, nx))

    # goodratio = np.sum(mask)/np.size(mask)
    # print(goodratio)

    while (step > 0):
        # Calculate chisq in the surrounding:
        for shifty in np.arange(-1, 2):
            # y position to evaluate:
            posy = np.clip(pos[0] + shifty * step, 0, ny - 1)
            for shiftx in np.arange(-1, 2):
                # x position to evaluate:
                posx = np.clip(pos[1] + shiftx * step, 0, nx - 1)
                if chisq[posy, posx] == -1:
                    # Make a psf model for given y,x position:
                    model, center = maker(
                        spsf, shape, scale,
                        [int(y[posy]),
                         int(x[posx]),
                         int(1.0),
                         int(0.0)], psfctr, subpsf)

                    # Weighted, masked values:
                    mmodel = model[np.where(mask)]
                    mdata = data[np.where(mask)]
                    mweights = weights[np.where(mask)]
                    args = (mdata, mmodel, mweights)
                    # The fitting:
                    p, cov, info, msg, flag = so.leastsq(residuals,
                                                         fluxguess,
                                                         args,
                                                         Dfun=gradient,
                                                         full_output=True,
                                                         col_deriv=1)
                    err = np.sqrt(np.diagonal(cov))
                    # Chi-square per degree of freedom:
                    cspdof = (np.sum((info['fvec'])**2.0) /
                              (len(info["fvec"]) - len(fluxguess)))
                    chisq[posy, posx] = cspdof

        # Is the current position the minimum chi-square?
        # Minimum chi-square position:
        mcp = np.where(chisq == np.amin(chisq[np.where(chisq >= 0)]))

        # If it is, then reduce the step size:
        if pos[0] == mcp[0][0] and pos[1] == mcp[1][0]:
            step = int(np.round(step / 2.0))
        # If not, then move to the position of min. chi-square:
        else:
            pos[0] = mcp[0][0]
            pos[1] = mcp[1][0]

    # The best fitting parameters at the best position:
    model, center = maker(
        spsf, shape, scale,
        [int(y[pos[0]]), int(x[pos[1]]), 1, 0], psfctr, subpsf)

    # This is the fix I need to do:
    mmodel = model[np.where(mask)]
    mdata = data[np.where(mask)]
    mweights = weights[np.where(mask)]
    args = (mdata, mmodel, mweights)
    p, cov, info, msg, flag = so.leastsq(residuals,
                                         fluxguess,
                                         args,
                                         Dfun=gradient,
                                         full_output=True,
                                         col_deriv=1)
    err = np.sqrt(np.diagonal(cov))

    # Return the position of min chisq, the best parameters, and the chisq grid:
    return center, p, chisq
Пример #32
0
def main(options):
    Log.CreatePipeOutput(options)
    #VC.OptionsCheck(options)
    Log.PrintMainHeader(options)

    try:
        fdf = glob.glob(options.onlyTSdir + '/RUN.fdf')
        TSrun = True
    except:
        fdf = glob.glob(options.FCwildcard +
                        '/RUN.fdf')  # This should be made an input flag
        TSrun = False
    SCDM = Supercell_DynamicalMatrix(fdf, TSrun)

    # Write high-symmetry path
    WritePath(options.DestDir + '/symmetry-path', SCDM.Sym.path, options.steps)

    # Write mesh
    k1, k2, k3 = ast.literal_eval(options.mesh)
    rvec = 2 * N.pi * N.array([SCDM.Sym.b1, SCDM.Sym.b2, SCDM.Sym.b3])
    import Inelastica.physics.mesh as Kmesh
    # Full mesh
    kmesh = Kmesh.kmesh(2**k1,
                        2**k2,
                        2**k3,
                        meshtype=['LIN', 'LIN', 'LIN'],
                        invsymmetry=False)
    WriteKpoints(options.DestDir + '/mesh_%ix%ix%i' % tuple(kmesh.Nk),
                 N.dot(kmesh.k, rvec))
    # Mesh reduced by inversion symmetry
    kmesh = Kmesh.kmesh(2**k1,
                        2**k2,
                        2**k3,
                        meshtype=['LIN', 'LIN', 'LIN'],
                        invsymmetry=True)
    WriteKpoints(options.DestDir + '/mesh_%ix%ix%i_invsym' % tuple(kmesh.Nk),
                 N.dot(kmesh.k, rvec))

    # Evaluate electron k-points
    if options.kfile:
        # Prepare Hamiltonian etc in Gamma for whole supercell
        natoms = SIO.GetFDFlineWithDefault(fdf[0], 'NumberOfAtoms', int, -1,
                                           'Error')
        SCDM.PrepareGradients(options.onlySdir,
                              N.array([0., 0., 0.]),
                              1,
                              natoms,
                              AbsEref=False,
                              atype=N.complex,
                              TSrun=TSrun)
        SCDM.nao = SCDM.h0.shape[-1]
        SCDM.FirstOrb = SCDM.OrbIndx[0][0]  # First atom = 1
        SCDM.LastOrb = SCDM.OrbIndx[SCDM.Sym.basis.NN -
                                    1][1]  # Last atom = Sym.NN
        SCDM.rednao = SCDM.LastOrb + 1 - SCDM.FirstOrb
        # Read kpoints
        kpts, dk, klabels, kticks = ReadKpoints(options.kfile)
        if klabels:
            # Only write ascii if labels exist
            WriteKpoints(options.DestDir + '/kpoints', kpts, klabels)
        # Prepare netcdf
        ncfn = options.DestDir + '/Electrons.nc'
        ncf = NC4.Dataset(ncfn, 'w')
        # Grid
        ncf.createDimension('gridpts', len(kpts))
        ncf.createDimension('vector', 3)
        grid = ncf.createVariable('grid', 'd', ('gridpts', 'vector'))
        grid[:] = kpts
        grid.units = '1/Angstrom'
        # Geometry
        ncf.createDimension('atoms', SCDM.Sym.basis.NN)
        xyz = ncf.createVariable('xyz', 'd', ('atoms', 'vector'))
        xyz[:] = SCDM.Sym.basis.xyz
        xyz.units = 'Angstrom'
        pbc = ncf.createVariable('pbc', 'd', ('vector', 'vector'))
        pbc.units = 'Angstrom'
        pbc[:] = [SCDM.Sym.a1, SCDM.Sym.a2, SCDM.Sym.a3]
        rvec1 = ncf.createVariable('rvec', 'd', ('vector', 'vector'))
        rvec1.units = '1/Angstrom (incl. factor 2pi)'
        rvec1[:] = rvec
        ncf.sync()
        # Loop over kpoints
        for i, k in enumerate(kpts):
            if i < 100:  # Print only for the first 100 points
                ev, evec = SCDM.ComputeElectronStates(k,
                                                      verbose=True,
                                                      TSrun=TSrun)
            else:
                ev, evec = SCDM.ComputeElectronStates(k,
                                                      verbose=False,
                                                      TSrun=TSrun)
                # otherwise something simple
                if i % 100 == 0:
                    print '%i out of %i k-points computed' % (i, len(kpts))
            if i == 0:
                ncf.createDimension('nspin', SCDM.nspin)
                ncf.createDimension('orbs', SCDM.rednao)
                if options.nbands and options.nbands < SCDM.rednao:
                    nbands = options.nbands
                else:
                    nbands = SCDM.rednao
                ncf.createDimension('bands', nbands)
                evals = ncf.createVariable('eigenvalues', 'd',
                                           ('gridpts', 'nspin', 'bands'))
                evals.units = 'eV'
                evecsRe = ncf.createVariable(
                    'eigenvectors.re', 'd',
                    ('gridpts', 'nspin', 'orbs', 'bands'))
                evecsIm = ncf.createVariable(
                    'eigenvectors.im', 'd',
                    ('gridpts', 'nspin', 'orbs', 'bands'))
                # Check eigenvectors
                print 'SupercellPhonons: Checking eigenvectors at', k
                for j in range(SCDM.nspin):
                    ev2 = N.diagonal(
                        MM.mm(MM.dagger(evec[j]), SCDM.h0_k[j], evec[j]))
                    print ' ... spin %i: Allclose=' % j, N.allclose(ev[j],
                                                                    ev2,
                                                                    atol=1e-5,
                                                                    rtol=1e-3)
                ncf.sync()
            # Write to NetCDF
            evals[i, :] = ev[:, :nbands]
            evecsRe[i, :] = evec[:, :, :nbands].real
            evecsIm[i, :] = evec[:, :, :nbands].imag
        ncf.sync()
        # Include basis orbitals in netcdf file
        if SCDM.Sym.basis.NN == len(SCDM.OrbIndx):
            lasto = N.zeros(SCDM.Sym.basis.NN + 1, N.float)
            lasto[:SCDM.Sym.basis.NN] = SCDM.OrbIndx[:SCDM.Sym.basis.NN, 0]
            lasto[SCDM.Sym.basis.NN] = SCDM.OrbIndx[SCDM.Sym.basis.NN - 1,
                                                    1] + 1
        else:
            lasto = SCDM.OrbIndx[:SCDM.Sym.basis.NN + 1, 0]
        orbbasis = SIO.BuildBasis(fdf[0], 1, SCDM.Sym.basis.NN, lasto)
        # Note that the above basis is for the geometry with an atom FC-moved in z.
        #print dir(orbbasis)
        #print orbbasis.xyz # Hence, this is not the correct geometry of the basis atoms!
        center = ncf.createVariable('orbcenter', 'i', ('orbs', ))
        center[:] = N.array(orbbasis.ii - 1, dtype='int32')
        center.description = 'Atom index (counting from 0) of the orbital center'
        nn = ncf.createVariable('N', 'i', ('orbs', ))
        nn[:] = N.array(orbbasis.N, dtype='int32')
        ll = ncf.createVariable('L', 'i', ('orbs', ))
        ll[:] = N.array(orbbasis.L, dtype='int32')
        mm = ncf.createVariable('M', 'i', ('orbs', ))
        mm[:] = N.array(orbbasis.M, dtype='int32')
        # Cutoff radius and delta
        Rc = ncf.createVariable('Rc', 'd', ('orbs', ))
        Rc[:] = orbbasis.coff
        Rc.units = 'Angstrom'
        delta = ncf.createVariable('delta', 'd', ('orbs', ))
        delta[:] = orbbasis.delta
        delta.units = 'Angstrom'
        # Radial components of the orbitals
        ntb = len(orbbasis.orb[0])
        ncf.createDimension('ntb', ntb)
        rii = ncf.createVariable('rii', 'd', ('orbs', 'ntb'))
        rii[:] = N.outer(orbbasis.delta, N.arange(ntb))
        rii.units = 'Angstrom'
        radialfct = ncf.createVariable('radialfct', 'd', ('orbs', 'ntb'))
        radialfct[:] = orbbasis.orb
        # Sort eigenvalues to connect crossing bands?
        if options.sorting:
            for i in range(SCDM.nspin):
                evals[:, i, :] = SortBands(evals[:, i, :])
        # Produce nice plots if labels exist
        if klabels:
            if SCDM.nspin == 1:
                PlotElectronBands(options.DestDir + '/Electrons.agr', dk,
                                  evals[:, 0, :], kticks)
            elif SCDM.nspin == 2:
                PlotElectronBands(options.DestDir + '/Electrons.UP.agr', dk,
                                  evals[:, 0, :], kticks)
                PlotElectronBands(options.DestDir + '/Electrons.DOWN.agr', dk,
                                  evals[:, 1, :], kticks)
        ncf.close()

    if TSrun:  # only electronic calculation
        return SCDM.Sym.path

    # Compute phonon eigenvalues
    if options.qfile:
        SCDM.SymmetrizeFC(options.radius)
        SCDM.SetMasses()
        qpts, dq, qlabels, qticks = ReadKpoints(options.qfile)
        if qlabels:
            # Only write ascii if labels exist
            WriteKpoints(options.DestDir + '/qpoints', qpts, qlabels)
        # Prepare netcdf
        ncfn = options.DestDir + '/Phonons.nc'
        ncf = NC4.Dataset(ncfn, 'w')
        # Grid
        ncf.createDimension('gridpts', len(qpts))
        ncf.createDimension('vector', 3)
        grid = ncf.createVariable('grid', 'd', ('gridpts', 'vector'))
        grid[:] = qpts
        grid.units = '1/Angstrom'
        # Geometry
        ncf.createDimension('atoms', SCDM.Sym.basis.NN)
        xyz = ncf.createVariable('xyz', 'd', ('atoms', 'vector'))
        xyz[:] = SCDM.Sym.basis.xyz
        xyz.units = 'Angstrom'
        pbc = ncf.createVariable('pbc', 'd', ('vector', 'vector'))
        pbc.units = 'Angstrom'
        pbc[:] = [SCDM.Sym.a1, SCDM.Sym.a2, SCDM.Sym.a3]
        rvec1 = ncf.createVariable('rvec', 'd', ('vector', 'vector'))
        rvec1.units = '1/Angstrom (incl. factor 2pi)'
        rvec1[:] = rvec
        ncf.sync()
        # Loop over q
        for i, q in enumerate(qpts):
            if i < 100:  # Print only for the first 100 points
                hw, U = SCDM.ComputePhononModes_q(q, verbose=True)
            else:
                hw, U = SCDM.ComputePhononModes_q(q, verbose=False)
                # otherwise something simple
                if i % 100 == 0:
                    print '%i out of %i q-points computed' % (i, len(qpts))
            if i == 0:
                ncf.createDimension('bands', len(hw))
                ncf.createDimension('displ', len(hw))
                evals = ncf.createVariable('eigenvalues', 'd',
                                           ('gridpts', 'bands'))
                evals.units = 'eV'
                evecsRe = ncf.createVariable('eigenvectors.re', 'd',
                                             ('gridpts', 'bands', 'displ'))
                evecsIm = ncf.createVariable('eigenvectors.im', 'd',
                                             ('gridpts', 'bands', 'displ'))
                # Check eigenvectors
                print 'SupercellPhonons.Checking eigenvectors at', q
                tmp = MM.mm(N.conjugate(U), SCDM.FCtilde, N.transpose(U))
                const = PC.hbar2SI * (1e20 / (PC.eV2Joule * PC.amu2kg))**0.5
                hw2 = const * N.diagonal(tmp)**0.5  # Units in eV
                print ' ... Allclose=', N.allclose(hw,
                                                   N.absolute(hw2),
                                                   atol=1e-5,
                                                   rtol=1e-3)
                ncf.sync()
            evals[i] = hw
            evecsRe[i] = U.real
            evecsIm[i] = U.imag
        ncf.sync()
        # Sort eigenvalues to connect crossing bands?
        if options.sorting:
            evals = SortBands(evals)
        # Produce nice plots if labels exist
        if qlabels:
            PlotPhononBands(options.DestDir + '/Phonons.agr', dq,
                            N.array(evals[:]), qticks)
        ncf.close()

    # Compute e-ph couplings
    if options.kfile and options.qfile:
        SCDM.ReadGradients(AbsEref=False)
        ncf = NC4.Dataset(options.DestDir + '/EPH.nc', 'w')
        ncf.createDimension('kpts', len(kpts))
        ncf.createDimension('qpts', len(qpts))
        ncf.createDimension('modes', len(hw))
        ncf.createDimension('nspin', SCDM.nspin)
        ncf.createDimension('bands', SCDM.rednao)
        ncf.createDimension('vector', 3)
        kgrid = ncf.createVariable('kpts', 'd', ('kpts', 'vector'))
        kgrid[:] = kpts
        qgrid = ncf.createVariable('qpts', 'd', ('qpts', 'vector'))
        qgrid[:] = qpts
        evalfkq = ncf.createVariable('evalfkq', 'd',
                                     ('kpts', 'qpts', 'nspin', 'bands'))
        # First (second) band index n (n') is the initial (final) state, i.e.,
        # Mkq(k,q,mode,spin,n,n') := < n',k+q | dV_q(mode) | n,k >
        MkqAbs = ncf.createVariable(
            'Mkqabs', 'd',
            ('kpts', 'qpts', 'modes', 'nspin', 'bands', 'bands'))
        GkqAbs = ncf.createVariable(
            'Gkqabs', 'd',
            ('kpts', 'qpts', 'modes', 'nspin', 'bands', 'bands'))
        ncf.sync()
        # Loop over k-points
        for i, k in enumerate(kpts):
            kpts[i] = k
            # Compute initial electronic states
            evi, eveci = SCDM.ComputeElectronStates(k, verbose=True)
            # Loop over q-points
            for j, q in enumerate(qpts):
                # Compute phonon modes
                hw, U = SCDM.ComputePhononModes_q(q, verbose=True)
                # Compute final electronic states
                evf, evecf = SCDM.ComputeElectronStates(k + q, verbose=True)
                evalfkq[i, j, :] = evf
                # Compute electron-phonon couplings
                m, g = SCDM.ComputeEPHcouplings_kq(
                    k, q)  # (modes,nspin,bands,bands)
                # Data to file
                # M (modes,spin,i,l) = m(modes,k,j) init(i,j) final(k,l)
                #                            0 1 2       0,1        0 1
                #                                ^-------^
                #                              ^----------------------^
                for ispin in range(SCDM.nspin):
                    evecfd = MM.dagger(evecf[ispin])  # (bands,bands)
                    M = N.tensordot(N.tensordot(m[:, ispin],
                                                eveci[ispin],
                                                axes=[2, 0]),
                                    evecfd,
                                    axes=[1, 1])
                    G = N.tensordot(N.tensordot(g[:, ispin],
                                                eveci[ispin],
                                                axes=[2, 0]),
                                    evecfd,
                                    axes=[1, 1])
                    MkqAbs[i, j, :, ispin] = N.absolute(M)
                    GkqAbs[i, j, :, ispin] = N.absolute(G)
                ncf.sync()
        ncf.close()
    return SCDM.Sym.path
Пример #33
0
import numpy as np

N = 4
M = 5

V = np.random.randint(low=-9, high=10, size=(N, M))
print("Матрица:\r\n{}\n".format(V))

a = np.diagonal(V, 1)
a_sum = a.sum()
print("Элементы которые выше главной диагонали: \n" + str(a) + "\nИх сумма = " + str(a_sum))
b = np.diagonal(V, -1)
b_sum = b.sum()
print("Элементы которые ниже главной диагонали: \n" + str(b) + "\nИх сумма = " + str(a_sum))
def coherence_f(pat_gData, lowcut, highcut, fs=400):

   n_sample=pat_gData.shape[0] #40 = 600s/15s
   L_segment=pat_gData.shape[1]
   n_channel=pat_gData.shape[2]


   n_segment=10
   l_segment=np.int(L_segment/n_segment)

   for m in range(0,n_sample):
       for channel in range(0,n_channel):
           y= pat_gData[m,:,channel]
           y_mean=sum(y)/len(y)
           y= y-y_mean  # mean centralization
           y_sigma=0 
           if sum(y*y)>0:
              y_sigma=np.sqrt(sum(y*y)/len(y)) 
              y= y/y_sigma  # normalization
           pat_gData[m,:,channel]=y

   freq = np.fft.rfftfreq(L_segment, d=1./fs)

#   f_y, Sf_y = signal.periodogram(y, fs)
#   plt.semilogy(f_y,Sf_y)

   fl=np.fft.rfft(y)
#   yy=np.fft.irfft(fl)
#   print(yy-y)

#   f_y, Sf_y = signal.periodogram(yy, fs)
#   plt.semilogy(f_y,Sf_y)

   fl2=fl*(freq>=lowcut)*(freq<=highcut)
#   print(len(fl2))

#   print(fl)
#   print(fl2)
   yy2=np.fft.irfft(fl2)
#   print(yy2)

#   f_y, Sf_y = signal.periodogram(yy2, fs)
#   plt.semilogy(f_y,Sf_y)
#   plt.savefig('bandpass_sf.png')
#   plt.close()

   freq = np.fft.rfftfreq(l_segment, d=1./fs)
   s_range= np.arange(len(freq))

   freq_ = freq[(freq>=lowcut)*(freq<=highcut)]
   s_range_ = s_range[(freq>=lowcut)*(freq<=highcut)]

   for m in range(n_sample):
       for i in range(n_segment):   
             pat_f=np.fft.rfft(pat_gData[m,i*l_segment:(i+1)*l_segment,:], axis=0)  # fourier for one segement of a sample
             for s in s_range_:
                 if s==s_range_[0]:
                    x=np.outer(np.conjugate(pat_f[s,:]),pat_f[s,:]).flatten()
                 else:
                    x=np.c_[x,np.outer(np.conjugate(pat_f[s,:]),pat_f[s,:]).flatten()]
             if i==0:
                X=x
             else:
                X=X+x
       X=np.absolute(X/n_segment)

       for s in range(len(freq_)):
             y_=X[:,s].reshape(n_channel,n_channel)
             y=np.diagonal(y_)
             Y=np.outer(np.sqrt(y),np.sqrt(y))
             z=np.divide(y_,Y)
             if s==0:
                XX=z
             else:
                XX=XX+z                     
       XX=XX/len(freq_)
       XX[np.isnan(XX)] = 0
       w, v = np.linalg.eig(XX)
       #w = np.absolute(w)
       idx = w.argsort()[::-1]   
       w = w[idx]
       v = v[:,idx]

       u = col_row_max(XX)
       XX = upper_right_triangle(XX)
       if m==0:
          c=XX
          W=w
          V=v[0]
          U=u
       else:
          c=np.c_[c,XX]
          W=np.c_[W,w]
          V=np.c_[V,v[0]]
          U=np.c_[U,u]

   dict_return={'corr_y': c.T,
       'corr_max': U.T,
       'e_values': W.T,
       'e_vector': V.T
   }


   return dict_return
Пример #35
0
    def dirichletAllocate(self):  ###########################  GIBBSSAMP
        oo = self

        signal.signal(signal.SIGINT, signal_handler)

        ooTR = oo.TR
        ook = oo.k
        ooN = oo.N

        runTO = oo.ITERS - 1
        oo.allocateSmp(runTO + 1, Bsmpx=oo.doBsmpx)
        #oo.allocateSmp(oo.burn + oo.NMC)
        oo.x00 = _N.array(oo.smpx[:, 2])
        oo.V00 = _N.zeros((ooTR, ook, ook))

        _kfar.init(oo.N, oo.k, oo.TR)

        if oo.dohist:
            oo.loghist = _N.zeros(oo.Hbf.shape[0])
        else:
            print("fixed hist is")
            print(oo.loghist)

        ARo = _N.zeros((ooTR, ooN + 1))

        kpOws = _N.empty((ooTR, ooN + 1))
        lv_f = _N.zeros((ooN + 1, ooN + 1))
        lv_u = _N.zeros((ooTR, ooTR))
        Bii = _N.zeros((ooN + 1, ooN + 1))

        #alpC.reverse()
        #  F_alfa_rep = alpR + alpC  already in right order, no?

        Wims = _N.empty((ooTR, ooN + 1, ooN + 1))
        Oms = _N.empty((ooTR, ooN + 1))
        smWimOm = _N.zeros(ooN + 1)
        smWinOn = _N.zeros(ooTR)
        bConstPSTH = False
        D_f = _N.diag(_N.ones(oo.B.shape[0]) * oo.s2_a)  #  spline
        iD_f = _N.linalg.inv(D_f)
        D_u = _N.diag(_N.ones(oo.TR) * oo.s2_u)  #  This should
        iD_u = _N.linalg.inv(D_u)
        iD_u_u_u = _N.dot(iD_u, _N.ones(oo.TR) * oo.u_u)
        BDB = _N.dot(oo.B.T, _N.dot(D_f, oo.B))
        DB = _N.dot(D_f, oo.B)
        BTua = _N.dot(oo.B.T, oo.u_a)

        it = 0

        ###############################  MCMC LOOP  ########################
        ###  need pointer to oo.us, but reshaped for broadcasting to work
        ###############################  MCMC LOOP  ########################
        oous_rs = oo.us.reshape((ooTR, 1))  #  done for broadcasting rules

        sd01 = _N.zeros((oo.nStates, oo.TR, oo.TR))
        _N.fill_diagonal(sd01[0], oo.s[0])
        _N.fill_diagonal(sd01[1], oo.s[1])

        smpx01 = _N.zeros((oo.nStates, oo.TR, oo.N + 1))
        zsmpx = _N.empty((oo.TR, oo.N + 1))

        #  zsmpx created
        #  PG

        zd = _N.zeros((oo.TR, oo.TR))
        izd = _N.zeros((oo.TR, oo.TR))
        ll = _N.zeros(oo.nStates)
        Bp = _N.empty((oo.nStates, oo.N + 1))

        for m in range(ooTR):
            oo.f_V[m, 0] = oo.s2_x00
            oo.f_V[m, 1] = oo.s2_x00

        THR = _N.empty(oo.TR)
        dirArgs = _N.empty(oo.nStates)  #  dirichlet distribution args
        expT = _N.empty(ooN + 1)
        BaS = _N.dot(oo.B.T, oo.aS)

        alpR = oo.F_alfa_rep[0:oo.R]
        alpC = oo.F_alfa_rep[oo.R:]

        print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
        print(oo.F_alfa_rep)
        print("*****************************")
        print(alpR)
        print(alpC)

        oo.nSMP_smpxC = 0

        if oo.mcmcRunDir is None:
            oo.mcmcRunDir = ""
        elif (len(oo.mcmcRunDir) > 0) and (oo.mcmcRunDir[-1] != "/"):
            oo.mcmcRunDir += "/"

        #  H shape    100 x 9
        Hbf = oo.Hbf

        RHS = _N.empty((oo.histknots, 1))

        cInds = _N.arange(oo.iHistKnotBeginFixed, oo.histknots)
        vInds = _N.arange(0, oo.iHistKnotBeginFixed)
        RHS[cInds, 0] = 0

        Msts = []
        for m in range(ooTR):
            Msts.append(_N.where(oo.y[m] == 1)[0])
        HcM = _N.empty((len(vInds), len(vInds)))

        HbfExpd = _N.empty((oo.histknots, ooTR, oo.N + 1))
        #  HbfExpd is 11 x M x 1200
        #  find the mean.  For the HISTORY TERM
        for i in range(oo.histknots):
            for m in range(oo.TR):
                sts = Msts[m]
                HbfExpd[i, m, 0:sts[0]] = 0
                for iss in range(len(sts) - 1):
                    t0 = sts[iss]
                    t1 = sts[iss + 1]
                    HbfExpd[i, m, t0 + 1:t1 + 1] = Hbf[0:t1 - t0, i]
                HbfExpd[i, m, sts[-1] + 1:] = 0

        _N.dot(oo.B.T, oo.aS, out=BaS)
        if oo.hS is None:
            oo.hS = _N.zeros(oo.histknots)

        _N.dot(Hbf, oo.hS, out=oo.loghist)
        oo.stitch_Hist(ARo, oo.loghist, Msts)

        K = _N.empty((oo.TR, oo.N + 1, oo.k))  #  kalman gain

        iterBLOCKS = oo.ITERS // oo.peek
        smpx_tmp = _N.empty((oo.TR, oo.N + 1, oo.k))

        ##  ORDER OF SAMPLING
        ##  f_xx, f_V
        ##  BINARY state
        ##  DA:  PG, kpOws
        ##  history, build ARo
        ##  psth
        ##  offset
        ##  DA:  latent state
        ##  AR coefficients
        ##  q2
        oo.gau_var = _N.array(oo.ws)

        #iterBLOCKS = 1
        #oo.peek = 1
        for itrB in range(iterBLOCKS):
            it = itrB * oo.peek
            if it > 0:
                print("it: %(it)d    mnStd  %(mnstd).3f   m %(m).3f" % {
                    "it": itrB * oo.peek,
                    "mnstd": oo.mnStds[it - 1],
                    "m": oo.m[0]
                })

            #tttA = _tm.time()
            if interrupted:
                break
            for it in range(itrB * oo.peek, (itrB + 1) * oo.peek):

                lowsts = _N.where(oo.Z[:, 0] == 1)
                #print "lowsts   %s" % str(lowsts)
                t1 = _tm.time()
                oo.f_x[:, 0] = oo.x00
                if it == 0:
                    for m in range(ooTR):
                        oo.f_V[m, 0] = oo.s2_x00
                else:
                    oo.f_V[:, 0] = _N.mean(oo.f_V[:, 1:], axis=1)

                    t2 = _tm.time()
                t3 = _tm.time()

                ######  PG generate
                for m in range(ooTR):
                    lw.rpg_devroye(oo.rn,
                                   oo.smpx[m, 2:, 0] + oo.us[m] + BaS +
                                   ARo[m] + oo.knownSig[m],
                                   out=oo.ws[m])  ######  devryoe
                    #oo.smpx[m, 2:, 0] + oo.us[m] + BaS + ARo[m] + oo.knownSig[m], out=oo.ws[m])  ######  devryoe
                    #lw.rpg_devroye(oo.rn, zsmpx[m] + oo.us[m] + BaS + ARo[m] + oo.knownSig[m], out=oo.ws[m])  ######  devryoe  ####TRD change

                _N.divide(oo.kp, oo.ws, out=kpOws)

                if oo.dohist:
                    O = kpOws - oo.smpx[..., 2:, 0] - oo.us.reshape(
                        (ooTR, 1)) - BaS - oo.knownSig
                    #O = kpOws - zsmpx - oo.us.reshape((ooTR, 1)) - BaS -  oo.knownSig

                    for ii in range(len(vInds)):
                        #print("i   %d" % i)
                        #print(_N.sum(HbfExpd[i]))
                        i = vInds[ii]
                        for jj in range(len(vInds)):
                            j = vInds[jj]
                            #print("j   %d" % j)
                            #print(_N.sum(HbfExpd[j]))
                            HcM[ii,
                                jj] = _N.sum(oo.ws * HbfExpd[i] * HbfExpd[j])

                        RHS[ii, 0] = _N.sum(oo.ws * HbfExpd[i] * O)
                        for cj in cInds:
                            RHS[ii, 0] -= _N.sum(
                                oo.ws * HbfExpd[i] * HbfExpd[cj]) * RHS[cj, 0]

                    # print HbfExpd
                    # print HcM
                    # print RHS[vInds]
                    vm = _N.linalg.solve(HcM, RHS[vInds])
                    Cov = _N.linalg.inv(HcM)
                    cfs = _N.random.multivariate_normal(vm[:, 0], Cov, size=1)

                    RHS[vInds, 0] = cfs[0]
                    oo.smp_hS[:, it] = RHS[:, 0]

                    #RHS[2:6, 0] = vm[:, 0]
                    #print HcM
                    #vv = _N.dot(Hbf, RHS)
                    #print vv.shape
                    #print oo.loghist.shape
                    _N.dot(Hbf, RHS[:, 0], out=oo.loghist)
                    oo.smp_hist[:, it] = oo.loghist
                    oo.stitch_Hist(ARo, oo.loghist, Msts)

                ########     PSTH sample  Do PSTH after we generate zs
                if oo.bpsth:
                    Oms = kpOws - oo.smpx[..., 2:,
                                          0] - ARo - oous_rs - oo.knownSig
                    #Oms  = kpOws - zsmpx - ARo - oous_rs - oo.knownSig
                    _N.einsum("mn,mn->n", oo.ws, Oms, out=smWimOm)  #  sum over
                    ilv_f = _N.diag(_N.sum(oo.ws, axis=0))
                    _N.fill_diagonal(lv_f, 1. / _N.diagonal(ilv_f))
                    lm_f = _N.dot(lv_f, smWimOm)  #  nondiag of 1./Bi are inf
                    #  now sample
                    iVAR = _N.dot(oo.B, _N.dot(ilv_f, oo.B.T)) + iD_f
                    VAR = _N.linalg.inv(iVAR)  #  knots x knots
                    #iBDBW = _N.linalg.inv(BDB + lv_f)   # BDB not diag
                    #Mn    = oo.u_a + _N.dot(DB, _N.dot(iBDBW, lm_f - BTua))

                    Mn = oo.u_a + _N.dot(
                        DB, _N.linalg.solve(BDB + lv_f, lm_f - BTua))
                    oo.aS = _N.random.multivariate_normal(Mn, VAR,
                                                          size=1)[0, :]
                    oo.smp_aS[it, :] = oo.aS

                    #iBDBW = _N.linalg.inv(BDB + lv_f)   # BDB not diag
                    #Mn    = oo.u_a + _N.dot(DB, _N.dot(iBDBW, lm_f - BTua))
                    #oo.aS   = _N.random.multivariate_normal(Mn, VAR, size=1)[0, :]
                    #oo.smp_aS[it, :] = oo.aS
                else:
                    oo.aS[:] = 0
                _N.dot(oo.B.T, oo.aS, out=BaS)

                ########     per trial offset sample
                #Ons  = kpOws - zsmpx - ARo - BaS - oo.knownSig
                Ons = kpOws - oo.smpx[..., 2:, 0] - ARo - BaS - oo.knownSig

                #  solve for the mean of the distribution
                H = _N.ones((oo.TR - 1, oo.TR - 1)) * _N.sum(oo.ws[0])
                uRHS = _N.empty(oo.TR - 1)
                for dd in range(1, oo.TR):
                    H[dd - 1, dd - 1] += _N.sum(oo.ws[dd])
                    uRHS[dd - 1] = _N.sum(oo.ws[dd] * Ons[dd] -
                                          oo.ws[0] * Ons[0])

                MM = _N.linalg.solve(H, uRHS)
                Cov = _N.linalg.inv(H)

                oo.us[1:] = _N.random.multivariate_normal(MM, Cov, size=1)
                oo.us[0] = -_N.sum(oo.us[1:])
                oo.smp_u[:, it] = oo.us

                t4 = _tm.time()
                ####  Sample latent state
                oo.gau_obs = kpOws - BaS - ARo - oous_rs - oo.knownSig
                #oo.gau_obs = _N.dot(izd, kpOws - BaS - ARo - oous_rs - oo.knownSig)
                #oo.copyParams(oo.F0, oo.q2)
                #  (MxM)  (MxN) = (MxN)  (Rv is MxN)
                #_N.dot(_N.dot(izd, izd), 1. / oo.ws, out=oo.gau_var)
                oo.gau_var = 1 / oo.ws

                t5 = _tm.time()

                _kfar.armdl_FFBS_1itrMP(oo.gau_obs, oo.gau_var, oo.Fs,
                                        _N.linalg.inv(oo.Fs), oo.q2, oo.Ns,
                                        oo.ks, oo.f_x, oo.f_V, oo.p_x, oo.p_V,
                                        smpx_tmp, K)

                oo.smpx[:, 2:] = smpx_tmp
                oo.smpx[:, 1, 0:ook - 1] = oo.smpx[:, 2, 1:]
                oo.smpx[:, 0, 0:ook - 2] = oo.smpx[:, 2, 2:]

                if oo.doBsmpx and (it % oo.BsmpxSkp == 0):
                    oo.Bsmpx[:, it // oo.BsmpxSkp, 2:] = oo.smpx[:, 2:, 0]

                stds = _N.std(oo.smpx[:, 2:, 0], axis=1)
                oo.mnStds[it] = _N.mean(stds, axis=0)
                ###
                _arcfs.ARcfSmpl(ooN + 1,
                                ook,
                                oo.AR2lims,
                                oo.smpx[:, 1:, 0:ook],
                                oo.smpx[:, 0:, 0:ook - 1],
                                oo.q2,
                                oo.R,
                                oo.Cs,
                                oo.Cn,
                                alpR,
                                alpC,
                                oo.TR,
                                prior=oo.use_prior,
                                accepts=8,
                                aro=oo.ARord,
                                sig_ph0L=oo.sig_ph0L,
                                sig_ph0H=oo.sig_ph0H)
                oo.F_alfa_rep = alpR + alpC  #  new constructed
                prt, rank, f, amp = ampAngRep(oo.F_alfa_rep, f_order=True)
                #ut, wt = FilteredTimeseries(ooN+1, ook, oo.smpx[:, 1:, 0:ook], oo.smpx[:, :, 0:ook-1], oo.q2, oo.R, oo.Cs, oo.Cn, alpR, alpC, oo.TR)
                #ranks[it]    = rank
                oo.allalfas[it] = oo.F_alfa_rep

                for m in range(ooTR):
                    #oo.wts[m, it, :, :]   = wt[m, :, :, 0]
                    #oo.uts[m, it, :, :]   = ut[m, :, :, 0]
                    if not oo.bFixF:
                        oo.amps[it, :] = amp
                        oo.fs[it, :] = f

                oo.F0 = (-1 * _Npp.polyfromroots(oo.F_alfa_rep)[::-1].real)[1:]
                for tr in range(oo.TR):
                    oo.Fs[tr, 0] = oo.F0[:]

                #print "len(lwsts) %(l)d   len(hists) %(h)d" % {"l" : len(lwsts), "h" : len(hists)}
                # sts2chg = hists
                # #if (it > oo.startZ) and oo.doS and len(sts2chg) > 0:
                # if oo.doS and len(sts2chg) > 0:
                #     AL = 0.5*_N.sum(oo.smpx[sts2chg, 2:, 0]*oo.smpx[sts2chg, 2:, 0]*oo.ws[sts2chg])
                #     BRL = kpOws[sts2chg] - BaS - oous_rs[sts2chg] - ARo[sts2chg] - oo.knownSig[sts2chg]
                #     BL = _N.sum(oo.ws[sts2chg]*BRL*oo.smpx[sts2chg, 2:, 0])
                #     UL = BL / (2*AL)
                #     #sgL= 1/_N.sqrt(2*AL)
                #     sg2= 1./(2*AL)

                #     q2_pr = 0.0025  # 0.05**2
                #     u_pr  = 1.
                #     U = (u_pr * sg2 + UL * q2_pr) / (sg2 + q2_pr)
                #     sg= _N.sqrt((sg2*q2_pr) / (sg2 + q2_pr))

                #     #print "U  %(U).4f    UL %(UL).4f s  %(s).3f" % {"U" : U, "s" : sg, "UL" : UL}
                #     if _N.isnan(U):
                #         print("U is nan  UL %.4f" % UL)
                #         print("U is nan  AL %.4f" % AL)
                #         print("U is nan  BL %.4f" % BL)
                #         print("U is nan  BaS ")
                #         print("hists")
                #         print(hists)
                #         print("lwsts")
                #         print(lwsts)

                #     oo.s[1] = 1#U + sg*_N.random.randn()

                #     _N.fill_diagonal(sd01[0], oo.s[0])
                #     _N.fill_diagonal(sd01[1], oo.s[1])
                #     #print oo.s[1]
                #     oo.smp_ss[it] = oo.s[1]

                oo.a2 = oo.a_q2 + 0.5 * (ooTR * ooN + 2)  #  N + 1 - 1
                BB2 = oo.B_q2
                for m in range(ooTR):
                    #   set x00
                    #oo.x00[m]      = oo.smpx[m, 2]*0.1
                    oo.x00[m] = oo.smpx[m, 2] * 0.1

                    #####################    sample q2
                    rsd_stp = oo.smpx[m, 3:, 0] - _N.dot(
                        oo.smpx[m, 2:-1], oo.F0).T
                    BB2 += 0.5 * _N.dot(rsd_stp, rsd_stp.T)
                oo.q2[:] = _ss.invgamma.rvs(oo.a2, scale=BB2)

                oo.smp_q2[:, it] = oo.q2
                t7 = _tm.time()
Пример #36
0
def accuracy(C):
    ''' Compute accuracy given Numpy array confusion matrix C. Returns a floating point value '''

    return sum(np.diagonal(C)) / sum(sum(C))
def get_cbnepath(params, rpath, wpath):

    S, T, beta, sigma, chi_n_vec, b_ellip, mu, l_tilde, bvec1, b_ss, n_ss, TPI_tol, EulDiff = params
    cpath = np.zeros((S, T + S - 2))
    bpath = np.append(bvec1.reshape((S - 1, 1)),
                      np.zeros((S - 1, T + S - 3)),
                      axis=1)
    npath = np.zeros((S, T + S - 2))
    EulErrPath_inter = np.zeros((S - 1, T + S - 2))
    EulErrPath_intra = np.zeros((S, T + S - 2))
    # Solve the incomplete remaining lifetime decisions of agents alive
    # in period t=1 but not born in period t=1
    #    cpath[S - 1, 0] = ((1 + rpath[0]) * bvec1[S - 2] +\
    #                       wpath[0] * nvec[S - 1])
    # get the labor saving decision for the oldest agent

    b_last = bpath[-1, 0]
    w0 = wpath[0]
    r0 = rpath[0]
    n_err_last_params = (sigma, chi_n_vec, b_ellip, mu, l_tilde)
    n_last = opt.fsolve(n_err_last,
                        x0=n_ss[-1],
                        args=(n_err_last_params, b_last, w0, r0),
                        xtol=TPI_tol)
    #    print(n_last)
    cpath[S - 1, 0] = w0 * n_last + (1 + r0) * b_last
    npath[-1, 0] = n_last
    print(cpath[-1, 0], w0, n_last, r0, b_last)
    pl_params = (S, beta, sigma, chi_n_vec, b_ellip, mu, l_tilde, TPI_tol,
                 EulDiff)
    for p in range(2, S):
        b_guess = np.diagonal(bpath[S - p:, :p - 1])
        n_guess = np.append([n_ss[S - p]],
                            np.diagonal(npath[S - p + 1:, :p - 1]))
        #        n_guess = 0.5 * n_ss[S - p: ] + 0.5
        #        n_guess = n_ss[S - p:]
        #        print(n_guess)
        bveclf, nveclf, cveclf, b_err_veclf, n_err_veclf = paths_life(
            pl_params, S - p + 1, bvec1[S - p - 1], rpath[:p], wpath[:p],
            b_guess, n_guess)
        #        print(len(wpath[:p]), len(nveclf), len(bveclf))
        # Insert the vector lifetime solutions diagonally (twist donut)
        # into the cpath, bpath, and EulErrPath matrices
        DiagMaskb = np.eye(p - 1, dtype=bool)
        DiagMaskc = np.eye(p, dtype=bool)
        bpath[S - p:, 1:p] = DiagMaskb * bveclf + bpath[S - p:, 1:p]
        cpath[S - p:, :p] = DiagMaskc * cveclf + cpath[S - p:, :p]
        #        print(cpath[-1, 0])
        npath[S - p:, :p] = DiagMaskc * nveclf + npath[S - p:, :p]
        EulErrPath_inter[S - p:, 1:p] = (DiagMaskb * b_err_veclf +
                                         EulErrPath_inter[S - p:, 1:p])
        EulErrPath_intra[S - p:, :p] = (DiagMaskc * n_err_veclf +
                                        EulErrPath_intra[S - p, :p])
    # Solve for complete lifetime decisions of agents born in periods
    # 1 to T and insert the vector lifetime solutions diagonally (twist
    # donut) into the cpath, bpath, and EulErrPath matrices
    DiagMaskb = np.eye(S - 1, dtype=bool)
    DiagMaskc = np.eye(S, dtype=bool)
    for t in range(1, T):  # Go from periods 1 to T-1
        b_guess = np.diagonal(bpath[:, t - 1:t + S - 2])
        if t == 1:
            n_guess = np.append(n_ss[0], np.diagonal(npath[1:,
                                                           t - 1:t + S - 2]))
        else:
            n_guess = np.diagonal(npath[:, t - 2:t + S - 2])
        bveclf, nveclf, cveclf, b_err_veclf, n_err_veclf = paths_life(
            pl_params, 1, 0, rpath[t - 1:t + S - 1], wpath[t - 1:t + S - 1],
            b_guess, n_guess)
        # Insert the vector lifetime solutions diagonally (twist donut)
        # into the cpath, bpath, and EulErrPath matrices
        bpath[:, t:t + S - 1] = (DiagMaskb * bveclf + bpath[:, t:t + S - 1])
        cpath[:, t - 1:t + S - 1] = (DiagMaskc * cveclf +
                                     cpath[:, t - 1:t + S - 1])
        npath[:, t - 1:t + S - 1] = (DiagMaskc * nveclf +
                                     npath[:, t - 1:t + S - 1])
        EulErrPath_inter[:, t:t + S - 1] = (DiagMaskb * b_err_veclf +
                                            EulErrPath_inter[:, t:t + S - 1])
        EulErrPath_intra[:, t - 1:t + S -
                         1] = (DiagMaskc * n_err_veclf +
                               EulErrPath_intra[:, t - 1:t + S - 1])

    return cpath, bpath, npath, EulErrPath_inter, EulErrPath_intra
Пример #38
0
def evaluateclassifier(features,
                       class_names,
                       n_exp,
                       classifier_name,
                       Params,
                       parameterMode,
                       perTrain=0.90):
    '''
    ARGUMENTS:
        features:     a list ([numOfClasses x 1]) whose elements containt numpy matrices of features.
                each matrix features[i] of class i is [n_samples x numOfDimensions]
        class_names:    list of class names (strings)
        n_exp:        number of cross-validation experiments
        classifier_name: svm or knn or randomforest
        Params:        list of classifier parameters (for parameter tuning during cross-validation)
        parameterMode:    0: choose parameters that lead to maximum overall classification ACCURACY
                1: choose parameters that lead to maximum overall f1 MEASURE
    RETURNS:
         bestParam:    the value of the input parameter that optimizes the selected performance measure
    '''

    # feature normalization:
    (features_norm, MEAN, STD) = normalizeFeatures(features)
    #features_norm = features;
    n_classes = len(features)
    ac_all = []
    f1_all = []
    precision_classes_all = []
    recall_classes_all = []
    f1_classes_all = []
    cms_all = []

    # compute total number of samples:
    n_samples_total = 0
    for f in features:
        n_samples_total += f.shape[0]
    if n_samples_total > 1000 and n_exp > 50:
        n_exp = 50
        print(
            "Number of training experiments changed to 50 due to high number of samples"
        )
    if n_samples_total > 2000 and n_exp > 10:
        n_exp = 10
        print(
            "Number of training experiments changed to 10 due to high number of samples"
        )

    for Ci, C in enumerate(Params):
        # for each param value
        cm = numpy.zeros((n_classes, n_classes))
        for e in range(n_exp):
            # for each cross-validation iteration:
            print("Param = {0:.5f} - classifier Evaluation "
                  "Experiment {1:d} of {2:d}".format(C, e + 1, n_exp))
            # split features:
            f_train, f_test = randSplitFeatures(features_norm, perTrain)
            # train multi-class svms:
            if classifier_name == "svm":
                classifier = trainSVM(f_train, C)
            elif classifier_name == "svm_rbf":
                classifier = trainSVM_RBF(f_train, C)
            elif classifier_name == "knn":
                classifier = trainKNN(f_train, C)
            elif classifier_name == "randomforest":
                classifier = trainRandomForest(f_train, C)
            elif classifier_name == "gradientboosting":
                classifier = trainGradientBoosting(f_train, C)
            elif classifier_name == "extratrees":
                classifier = trainExtraTrees(f_train, C)

            cmt = numpy.zeros((n_classes, n_classes))
            for c1 in range(n_classes):
                n_test_samples = len(f_test[c1])
                res = numpy.zeros((n_test_samples, 1))
                for ss in range(n_test_samples):
                    [res[ss],
                     _] = classifierWrapper(classifier, classifier_name,
                                            f_test[c1][ss])
                for c2 in range(n_classes):
                    cmt[c1][c2] = float(len(numpy.nonzero(res == c2)[0]))
            cm = cm + cmt
        cm = cm + 0.0000000010
        rec = numpy.zeros((cm.shape[0], ))
        pre = numpy.zeros((cm.shape[0], ))

        for ci in range(cm.shape[0]):
            rec[ci] = cm[ci, ci] / numpy.sum(cm[ci, :])
            pre[ci] = cm[ci, ci] / numpy.sum(cm[:, ci])
        precision_classes_all.append(pre)
        recall_classes_all.append(rec)
        f1 = 2 * rec * pre / (rec + pre)
        f1_classes_all.append(f1)
        ac_all.append(numpy.sum(numpy.diagonal(cm)) / numpy.sum(cm))

        cms_all.append(cm)
        f1_all.append(numpy.mean(f1))

    print("\t\t, end=" "")
    for i, c in enumerate(class_names):
        if i == len(class_names) - 1:
            print("{0:s}\t\t".format(c), end="")
        else:
            print("{0:s}\t\t\t".format(c), end="")
    print("OVERALL")
    print("\tC", end="")
    for c in class_names:
        print("\tPRE\tREC\tf1", end="")
    print("\t{0:s}\t{1:s}".format("ACC", "f1"))
    best_ac_ind = numpy.argmax(ac_all)
    best_f1_ind = numpy.argmax(f1_all)
    for i in range(len(precision_classes_all)):
        print("\t{0:.3f}".format(Params[i]), end="")
        for c in range(len(precision_classes_all[i])):
            print("\t{0:.1f}\t{1:.1f}\t{2:.1f}".format(
                100.0 * precision_classes_all[i][c],
                100.0 * recall_classes_all[i][c],
                100.0 * f1_classes_all[i][c]),
                  end="")
        print("\t{0:.1f}\t{1:.1f}".format(100.0 * ac_all[i],
                                          100.0 * f1_all[i]),
              end="")
        if i == best_f1_ind:
            print("\t best f1", end="")
        if i == best_ac_ind:
            print("\t best Acc", end="")
        print("")

    if parameterMode == 0:  # keep parameters that maximize overall classification accuracy:
        print("Confusion Matrix:")
        printConfusionMatrix(cms_all[best_ac_ind], class_names)
        return Params[best_ac_ind]
    elif parameterMode == 1:  # keep parameters that maximize overall f1 measure:
        print("Confusion Matrix:")
        printConfusionMatrix(cms_all[best_f1_ind], class_names)
        return Params[best_f1_ind]
Пример #39
0
 def diagonal_err(self, cov=[]):
     return np.sqrt(np.diagonal(cov))
Пример #40
0
def next_move_by_policy(board_state, winning_length, side, expert=True):
    from datetime import datetime
    random.seed(datetime.now())
    if len(list(available_moves(board_state))) == len(board_state[0]) * len(
            board_state[0]):
        move = [
            random.randint(
                math.floor(float((len(board_state[0]) - 1) / 2.0)) - 1,
                math.ceil(float((len(board_state[0]) - 1) / 2.0)) + 1),
            random.randint(
                math.floor(float((len(board_state[0]) - 1) / 2.0)) - 1,
                math.ceil(float((len(board_state[0]) - 1) / 2.0)) + 1)
        ]
        return move
    elif len(list(available_moves(
            board_state))) == len(board_state[0]) * len(board_state[0]) - 1:
        while True:
            move = [
                random.randint(
                    math.floor(float((len(board_state[0]) - 1) / 2.0)) - 1,
                    math.ceil(float((len(board_state[0]) - 1) / 2.0)) + 1),
                random.randint(
                    math.floor(float((len(board_state[0]) - 1) / 2.0)) - 1,
                    math.ceil(float((len(board_state[0]) - 1) / 2.0)) + 1)
            ]
            if tuple(move) in list(available_moves(board_state)):
                return move

    original_state = np.copy(board_state)
    board_state = board_state[0, :, :, 0]
    board_width = len(board_state)
    board_height = len(board_state[0])
    new_length = 0
    new_length_b = 0
    new_position = []
    new_position_b = []

    # check rows
    for x in range(board_width):
        max_length, position = _possible_move(board_state[x, :],
                                              winning_length, side)
        max_length_b, position_b = _possible_move(board_state[x, :],
                                                  winning_length, -side)

        if max_length > new_length:
            new_length = max_length
            new_position.clear()
            new_position.append([x, position])
        elif max_length == new_length:
            new_position.append([x, position])

        if max_length_b > new_length_b:
            new_length_b = max_length_b
            new_position_b.clear()
            new_position_b.append([x, position_b])
        elif max_length_b == new_length_b:
            new_position_b.append([x, position_b])

    # check columns
    for y in range(board_height):
        max_length, position = _possible_move(board_state[:, y],
                                              winning_length, side)
        max_length_b, position_b = _possible_move(board_state[:, y],
                                                  winning_length, -side)

        if max_length > new_length:
            new_length = max_length
            new_position.clear()
            new_position.append([position, y])
        elif max_length == new_length:
            new_position.append([position, y])

        if max_length_b > new_length_b:
            new_length_b = max_length_b
            new_position_b.clear()
            new_position_b.append([position_b, y])
        elif max_length_b == new_length_b:
            new_position_b.append([position_b, y])

    # Check diagonals
    for d in range(0, (board_height - winning_length + 1)):
        max_length, position = _possible_move(np.diagonal(board_state, d),
                                              winning_length, side)
        max_length_b, position_b = _possible_move(np.diagonal(board_state, d),
                                                  winning_length, -side)

        if max_length > new_length:
            new_length = max_length
            new_position.clear()
            new_position.append([position, position + d])
        elif max_length == new_length:
            new_position.append([position, position + d])

        if max_length_b > new_length_b:
            new_length_b = max_length_b
            new_position_b.clear()
            new_position_b.append([position_b, position_b + d])
        elif max_length_b == new_length_b:
            new_position_b.append([position_b, position_b + d])

    for d in range(1, (board_height - winning_length + 1)):
        max_length, position = _possible_move(np.diagonal(board_state, -d),
                                              winning_length, side)
        max_length_b, position_b = _possible_move(np.diagonal(board_state, -d),
                                                  winning_length, -side)

        if max_length > new_length:
            new_length = max_length
            new_position.clear()
            new_position.append([position + d, position])
        elif max_length == new_length:
            new_position.append([position + d, position])

        if max_length_b > new_length_b:
            new_length_b = max_length_b
            new_position_b.clear()
            new_position_b.append([position_b + d, position_b])
        elif max_length_b == new_length_b:
            new_position_b.append([position_b + d, position_b])

    for d in range(0, (board_height - winning_length + 1)):
        max_length, position = _possible_move(
            np.diagonal(np.fliplr(board_state), d), winning_length, side)
        max_length_b, position_b = _possible_move(
            np.diagonal(np.fliplr(board_state), d), winning_length, -side)

        if max_length > new_length:
            new_length = max_length
            new_position.clear()
            new_position.append(
                [position, len(board_state[0]) - 1 - position - d])
        elif max_length == new_length:
            new_position.append(
                [position, len(board_state[0]) - 1 - position - d])

        if max_length_b > new_length_b:
            new_length_b = max_length_b
            new_position_b.clear()
            # print(np.diagonal(np.fliplr(board_state), d))
            # print('max_length_b', max_length_b, 'position_b', position_b)
            new_position_b.append(
                [position_b,
                 len(board_state[0]) - 1 - position_b - d])
        elif max_length_b == new_length_b:
            new_position_b.append(
                [position_b,
                 len(board_state[0]) - 1 - position_b - d])

    for d in range(1, (board_height - winning_length + 1)):
        max_length, position = _possible_move(
            np.diagonal(np.fliplr(board_state), -d), winning_length, side)
        max_length_b, position_b = _possible_move(
            np.diagonal(np.fliplr(board_state), -d), winning_length, -side)

        if max_length > new_length:
            new_length = max_length
            new_position.clear()
            new_position.append(
                [position + d,
                 len(board_state[0]) - 1 - position])
        elif max_length == new_length:
            new_position.append(
                [position + d,
                 len(board_state[0]) - 1 - position])

        if max_length_b > new_length_b:
            new_length_b = max_length_b
            new_position_b.clear()
            new_position_b.append(
                [position_b + d,
                 len(board_state[0]) - 1 - position_b])
        elif max_length_b == new_length_b:
            new_position_b.append(
                [position_b + d,
                 len(board_state[0]) - 1 - position_b])

    if new_length == winning_length - 1:
        return random.choice(new_position)

    if expert:
        if new_length_b >= winning_length - 2:
            return random.choice(new_position_b)

    if new_length != 0:
        return random.choice(new_position)
    else:
        if new_length_b != 0:
            return random.choice(new_position_b)
        else:
            return random.choice(list(available_moves(original_state)))
Пример #41
0
def silhouette_samples(X, labels, *, metric='euclidean', **kwds):
    """Compute the Silhouette Coefficient for each sample.

    The Silhouette Coefficient is a measure of how well samples are clustered
    with samples that are similar to themselves. Clustering models with a high
    Silhouette Coefficient are said to be dense, where samples in the same
    cluster are similar to each other, and well separated, where samples in
    different clusters are not very similar to each other.

    The Silhouette Coefficient is calculated using the mean intra-cluster
    distance (``a``) and the mean nearest-cluster distance (``b``) for each
    sample.  The Silhouette Coefficient for a sample is ``(b - a) / max(a,
    b)``.
    Note that Silhouette Coefficient is only defined if number of labels
    is 2 <= n_labels <= n_samples - 1.

    This function returns the Silhouette Coefficient for each sample.

    The best value is 1 and the worst value is -1. Values near 0 indicate
    overlapping clusters.

    Read more in the :ref:`User Guide <silhouette_coefficient>`.

    Parameters
    ----------
    X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
             [n_samples_a, n_features] otherwise
        Array of pairwise distances between samples, or a feature array.

    labels : array, shape = [n_samples]
             label values for each sample

    metric : string, or callable
        The metric to use when calculating distance between instances in a
        feature array. If metric is a string, it must be one of the options
        allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
        the distance array itself, use "precomputed" as the metric. Precomputed
        distance matrices must have 0 along the diagonal.

    `**kwds` : optional keyword parameters
        Any further parameters are passed directly to the distance function.
        If using a ``scipy.spatial.distance`` metric, the parameters are still
        metric dependent. See the scipy docs for usage examples.

    Returns
    -------
    silhouette : array, shape = [n_samples]
        Silhouette Coefficient for each samples.

    References
    ----------

    .. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
       Interpretation and Validation of Cluster Analysis". Computational
       and Applied Mathematics 20: 53-65.
       <https://www.sciencedirect.com/science/article/pii/0377042787901257>`_

    .. [2] `Wikipedia entry on the Silhouette Coefficient
       <https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_

    """
    X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])

    # Check for non-zero diagonal entries in precomputed distance matrix
    if metric == 'precomputed':
        atol = np.finfo(X.dtype).eps * 100
        if np.any(np.abs(np.diagonal(X)) > atol):
            raise ValueError(
                'The precomputed distance matrix contains non-zero '
                'elements on the diagonal. Use np.fill_diagonal(X, 0).')

    le = LabelEncoder()
    labels = le.fit_transform(labels)
    n_samples = len(labels)
    label_freqs = np.bincount(labels)
    check_number_of_labels(len(le.classes_), n_samples)

    kwds['metric'] = metric
    reduce_func = functools.partial(_silhouette_reduce,
                                    labels=labels,
                                    label_freqs=label_freqs)
    results = zip(
        *pairwise_distances_chunked(X, reduce_func=reduce_func, **kwds))
    intra_clust_dists, inter_clust_dists = results
    intra_clust_dists = np.concatenate(intra_clust_dists)
    inter_clust_dists = np.concatenate(inter_clust_dists)

    denom = (label_freqs - 1).take(labels, mode='clip')
    with np.errstate(divide="ignore", invalid="ignore"):
        intra_clust_dists /= denom

    sil_samples = inter_clust_dists - intra_clust_dists
    with np.errstate(divide="ignore", invalid="ignore"):
        sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
    # nan values are for clusters of size 1, and should be 0
    return np.nan_to_num(sil_samples)
Пример #42
0
def evaluateClassifier(features,
                       ClassNames,
                       nExp,
                       ClassifierName,
                       Params,
                       parameterMode,
                       perTrain=0.90):
    '''
    ARGUMENTS:
        features:     a list ([numOfClasses x 1]) whose elements containt numpy matrices of features.
                each matrix features[i] of class i is [numOfSamples x numOfDimensions]
        ClassNames:    list of class names (strings)
        nExp:        number of cross-validation experiments
        ClassifierName: svm or knn or randomforest
        Params:        list of classifier parameters (for parameter tuning during cross-validation)
        parameterMode:    0: choose parameters that lead to maximum overall classification ACCURACY
                1: choose parameters that lead to maximum overall F1 MEASURE
    RETURNS:
         bestParam:    the value of the input parameter that optimizes the selected performance measure
    '''

    # feature normalization:
    (featuresNorm, MEAN, STD) = normalizeFeatures(features)
    #featuresNorm = features;
    nClasses = len(features)
    CAll = []
    acAll = []
    F1All = []
    PrecisionClassesAll = []
    RecallClassesAll = []
    ClassesAll = []
    F1ClassesAll = []
    CMsAll = []

    # compute total number of samples:
    nSamplesTotal = 0
    for f in features:
        nSamplesTotal += f.shape[0]
    if nSamplesTotal > 1000 and nExp > 50:
        nExp = 50
        print(
            "Number of training experiments changed to 50 due to high number of samples"
        )
    if nSamplesTotal > 2000 and nExp > 10:
        nExp = 10
        print(
            "Number of training experiments changed to 10 due to high number of samples"
        )

    for Ci, C in enumerate(Params):  # for each param value
        CM = numpy.zeros((nClasses, nClasses))
        for e in range(nExp):  # for each cross-validation iteration:
            print(
                "Param = {0:.5f} - Classifier Evaluation Experiment {1:d} of {2:d}"
                .format(C, e + 1, nExp))
            # split features:
            featuresTrain, featuresTest = randSplitFeatures(
                featuresNorm, perTrain)
            # train multi-class svms:
            if ClassifierName == "svm":
                Classifier = trainSVM(featuresTrain, C)
            elif ClassifierName == "svm_rbf":
                Classifier = trainSVM_RBF(featuresTrain, C)
            elif ClassifierName == "knn":
                Classifier = trainKNN(featuresTrain, C)
            elif ClassifierName == "randomforest":
                Classifier = trainRandomForest(featuresTrain, C)
            elif ClassifierName == "gradientboosting":
                Classifier = trainGradientBoosting(featuresTrain, C)
            elif ClassifierName == "extratrees":
                Classifier = trainExtraTrees(featuresTrain, C)

            CMt = numpy.zeros((nClasses, nClasses))
            for c1 in range(nClasses):
                #Results = Classifier.pred(featuresTest[c1])
                nTestSamples = len(featuresTest[c1])
                Results = numpy.zeros((nTestSamples, 1))
                for ss in range(nTestSamples):
                    [Results[ss],
                     _] = classifierWrapper(Classifier, ClassifierName,
                                            featuresTest[c1][ss])
                for c2 in range(nClasses):
                    CMt[c1][c2] = float(len(numpy.nonzero(Results == c2)[0]))
            CM = CM + CMt
        CM = CM + 0.0000000010
        Rec = numpy.zeros((CM.shape[0], ))
        Pre = numpy.zeros((CM.shape[0], ))

        for ci in range(CM.shape[0]):
            Rec[ci] = CM[ci, ci] / numpy.sum(CM[ci, :])
            Pre[ci] = CM[ci, ci] / numpy.sum(CM[:, ci])
        PrecisionClassesAll.append(Pre)
        RecallClassesAll.append(Rec)
        F1 = 2 * Rec * Pre / (Rec + Pre)
        F1ClassesAll.append(F1)
        acAll.append(numpy.sum(numpy.diagonal(CM)) / numpy.sum(CM))

        CMsAll.append(CM)
        F1All.append(numpy.mean(F1))
        # print "{0:6.4f}{1:6.4f}{2:6.1f}{3:6.1f}".format(nu, g, 100.0*acAll[-1], 100.0*F1All[-1])

    print("\t\t, end=" "")
    for i, c in enumerate(ClassNames):
        if i == len(ClassNames) - 1:
            print("{0:s}\t\t".format(c), end="")
        else:
            print("{0:s}\t\t\t".format(c), end="")
    print("OVERALL")
    print("\tC", end="")
    for c in ClassNames:
        print("\tPRE\tREC\tF1", end="")
    print("\t{0:s}\t{1:s}".format("ACC", "F1"))
    bestAcInd = numpy.argmax(acAll)
    bestF1Ind = numpy.argmax(F1All)
    for i in range(len(PrecisionClassesAll)):
        print("\t{0:.3f}".format(Params[i]), end="")
        for c in range(len(PrecisionClassesAll[i])):
            print("\t{0:.1f}\t{1:.1f}\t{2:.1f}".format(
                100.0 * PrecisionClassesAll[i][c],
                100.0 * RecallClassesAll[i][c], 100.0 * F1ClassesAll[i][c]),
                  end="")
        print("\t{0:.1f}\t{1:.1f}".format(100.0 * acAll[i], 100.0 * F1All[i]),
              end="")
        if i == bestF1Ind:
            print("\t best F1", end="")
        if i == bestAcInd:
            print("\t best Acc", end="")
        print("")

    if parameterMode == 0:  # keep parameters that maximize overall classification accuracy:
        print("Confusion Matrix:")
        printConfusionMatrix(CMsAll[bestAcInd], ClassNames)
        return Params[bestAcInd]
    elif parameterMode == 1:  # keep parameters that maximize overall F1 measure:
        print("Confusion Matrix:")
        printConfusionMatrix(CMsAll[bestF1Ind], ClassNames)
        return Params[bestF1Ind]
Пример #43
0
def population_vector_correlation(stack_0, stack_1, **kwargs):
    """Calculates the bin-wise correlation between two stacks of rate maps
    
    Each stack corresponds to a separate Task, or trial. Each layer is the 
    ratemap for a single cell from that Task. The same units should be given in
    the same order in each stack.
    
    Take a single column through the stack (i.e. 1 single bin/location in
    arena, with a firing rate for each cell), from each stack
    
    In the original MatLab implementation, three output modes were supported
        * 1D: (`numYbins`) - iterate over `i`
            1) Take a 2D slice from each stack - all cells at all `X` positions at a
              single `Y` position `i`
            2) Reshape from 2D to 1D 
            3) Calculate the Pearson correlation coefficient between the two 1D
              arrays
            4) The value of `pv_corr_1d[i]` is the Pearson correlation coefficient
              arising from `Y` position `i`
        * 2D (`numXbins` x `numYbins`) - iterate over `i`
            1) Take a 2D slice from each stack - all cells at all `X` positions at a
              single `Y` position `i`
            2) Calculate the 2D array (`numXbins` x `numYbins`) where the `[j,k]`th
              value is the Pearson correlation coefficient between all
              observations at the `j`'th `X` location in `stack_left` and the `k`'th
              location in `stack_right`
            3) The `i`'th row of `pv_corr_2d` is the DIAGONAL of the correlation matrix
              i.e. where `j==k` i.e. the correlation of the the SAME location in
              each stack for all observations (`numCells`)
        * 3D (`numXbins` x `numYbins` x iteration(=`numYbins`))
            Same as 2D BUT take the whole correlation matrix, not the diagonal
            i.e. the full [j,k] correlatio between all X locations
    
    A note on correlation in Numpy vs Matlab
    
    Matlab's `corr(a, b)` function returns the correlation of ab
    Numpy's `corrcoef` function returns the normalised covariance matrix,
    which is:
            aa  ab
            ba  aa
    The normalised covariance matrix *should* be hermitian, but due to
    floating point accuracy, this is not actually guaranteed
    the MatLab function can be reproduced by taking either [0, 1] or [1,0]
    of the normalised covariance matrix. 

    If `a`, `b` are 2D matricies, then they should have shape `(num_variables, num_observations)`
    In the case of this function, where the iterator is over the `Y` values
    of the rate map, that means: `(x_bins, num_cells)`

    Parameters
    ----------
    stack_0: 3D array -or- list of 2D arrays
    stack_1: 3D array -or- list of 2D arrays
        `stack_x[i]` should return the `i`'th ratemap. This corresponds to a 
        constructor like:
            `np.zeros(num_layers, y_bins, x_bins)`
            
        Alternatively, a list or tuple of 2D arrays may be supplied:
            `stack_x` = (`ratemap_0`, `ratemap_1`, `ratemap_2`, ...)
    row_major: bool
        Direction of iteration. If `True`, then each row is iterated over in turn
        and correlation is calculated per row. 
        If `False`, then each column is iterated over in turn, and correlation is 
        calculated per column. 
        Default True (same behavior as in BNT)

    Returns
    -------
    (p1, p2, p3)
    p1: np.ndarray (1D, iterator x 1)
        Array of Pearson correlation coefficients. i'th value is given by the 
        correlation of the i'th flattened slice of stack_0 to the i'th
        flattened slice  of stack_1
    p2: np.ndarray (2D, iterator x non-iterator)
        i'th row is the diagonal of the correlation matrix, i.e. the correlation
        of the same location (location i) in each stack, i.e. where j==k
    p3: np.ndarray(3D, iterator x non-iterator x non-iterator)
        i'th array is the entire correlation matrix, rather than just the diagonal

    Notes
    --------
    BNT.+analyses.populationVectorCorrelation

    Copyright (C) 2019 by Simon Ball
    """
    debug = kwargs.get("debug", False)
    row_major = kwargs.get("row_major", True)
    
    # Perform input validation and ensure we have a pair of 3D arrays
    stack_0, stack_1 = _handle_both_inputs(stack_0, stack_1)
    
    # _handle_ has ensured that both arrays meet the shape/type requirements
    # Hardcode iterating over Y for now. 
    num_cells, y_bins, x_bins = stack_0.shape
    if row_major:
        iterator = y_bins
        non_iterator = x_bins
    else:
        iterator = x_bins
        non_iterator = y_bins
    
    if debug:
        print(f"Number of ratemaps: {num_cells}")
        print(f"Ratemap dimensions: {y_bins} x {x_bins}")
        print(f"Iterating over axis length {iterator} (row_major is {row_major})")

    p1 = np.zeros(iterator)
    p2 = np.zeros((iterator, non_iterator))
    p3 = np.zeros((iterator, non_iterator, non_iterator))

    for i in range(iterator):
        if row_major:
            left = stack_0[:, i, :].transpose()
            right = stack_1[:, i, :].transpose()
        else:
            left = stack_0[:, :, i].transpose()
            right = stack_1[:, :, i].transpose()
        
        # 1D
        # Reshape 2D array to a 1D array
        correlation_value = np.corrcoef(left.flatten(), right.flatten())[0,1]
        p1[i] = correlation_value
        
        # 2D, 3D
        correlation_matrix = np.corrcoef(left, right)[0:non_iterator, non_iterator:]
        p2[i, :] = np.diagonal(correlation_matrix)
        p3[i, :, :] = correlation_matrix

    return (p1, p2, p3)
Пример #44
0
ext = 0.2236

theta_hat = differential_evolution(Sidious, bnds1)

##The answer to the first part of the problem set
theta_hat

theta_hat = np.matrix(
    [1, theta_hat.x[0], theta_hat.x[1], theta_hat.x[2], theta_hat.x[3]])

deriv = np.diff(theta_hat)

#We can take these steps seeing that putting the itdenty matrix in between
#this two matricies will do nothing...#

sd = np.diagonal(np.dot(deriv.T, deriv))
sd

##I now do the same for table III

##First we redefine theta
theta = alpha_k, rho, psi, sigma_z, phi0

##theta_hat_2 = differential_evolution(Vitiate,bnds2)

theta_hat_2 = differential_evolution(Qfunk2, bnds2)

theta_hat_2 = np.matrix([
    1, 1, theta_hat_2.x[0], theta_hat_2.x[1], theta_hat_2.x[2],
    theta_hat_2.x[3]
])
Пример #45
0
    def update_gradients_KL(self, variational_posterior):
        # import pdb; pdb.set_trace() # breakpoint 1
        # print("Updating Gradients")
        # print (self.variational_wi)
        # if self.stop<1:
        #     return
        # self.stop-=1
        # dL:
        # variational_posterior.mean.gradient -= variational_posterior.mean
        # variational_posterior.variance.gradient -= (1. -  (1. / (variational_posterior.variance))) * 0.5
        self.px_mu.gradient = 0
        self.px_lmatrix.gradient = 0
        self.variational_wi.gradient = 0
        # print self.variational_wi
        #self.variational_wi -= self.variational_wi.max(axis = 0)[None,:]

        # self.variational_wi = self.variational_wi/(self.variational_wi).sum(axis=0)

        mu = variational_posterior.mean
        S = variational_posterior.variance

        cov_inv = np.zeros((self.px_lmatrix.shape))
        cov_k = np.zeros((self.px_lmatrix.shape))
        ######################################################
        for k in range(self.px_lmatrix.shape[0]):
            cov_inv[k, :, :] = np.linalg.inv(self.px_lmatrix[k, :, :]).T.dot(
                np.linalg.inv(self.px_lmatrix[k, :, :]))
            cov_k[k, :, :] = np.dot(self.px_lmatrix[k, :, :],
                                    self.px_lmatrix[k, :, :].T)
        #######################################################

        # variational_wets = self.variational_wi
        # wets = self.wi

        # wi_max = self.variational_wi - self.variational_wi.max(axis = 0)#
        # variational_wets = np.exp(wi_max)/ np.exp(wi_max).sum(axis = 0)
        variational_wets = np.exp(self.variational_wi) / np.exp(
            self.variational_wi).sum(axis=0)
        wets = np.exp(self.wi) / np.exp(self.wi).sum(axis=0)

        mu_minus = self.px_mu[:, np.newaxis, :] - mu[np.newaxis, :, :]
        sigma_mu = np.zeros((mu_minus.shape))
        sigma2_S = np.zeros((mu_minus.shape[0], mu_minus.shape[1],
                             mu_minus.shape[2], mu_minus.shape[2]))
        sigma_S = np.zeros((sigma2_S.shape))
        sigma_S_sigma = np.zeros((sigma2_S.shape))
        mu_sigma_mu = np.zeros((mu_minus.shape[0], mu_minus.shape[1]))
        sigma_diag = np.diagonal(cov_inv.T)
        # sigma_inv1 = np.linalg.inv(self.px_var) #equal to cov_inv

        for k in range(mu_minus.shape[0]):
            for i in range(mu_minus.shape[1]):
                sigma_mu[k, i, :] = np.dot(cov_inv[k, :, :], mu_minus[k, i, :])
                sigma_S[k, i, :, :] = np.dot(cov_inv[k, :, :],
                                             np.diag(S[i, :]))
                sigma2_S[k, i, :, :] = np.dot(np.diag(S[i, :]),
                                              np.matrix(cov_inv[k, :, :])**2)
                sigma_S_sigma[k, i, :, :] = np.dot(sigma_mu[k, i, :][:, None],
                                                   sigma_mu[k, i, :][None, :])
                mu_sigma_mu[k, i] = np.dot(mu_minus[k, i, :][None, :],
                                           sigma_mu[k, i, :][:, None])

        variational_posterior.mean.gradient += (
            variational_wets[:, :, np.newaxis] * sigma_mu).sum(axis=0)
        variational_posterior.variance.gradient += 0.5 * (
            1. / S - (variational_wets[:, :, np.newaxis] *
                      sigma_diag[:, np.newaxis, :]).sum(axis=0))
        self.px_mu.gradient -= (variational_wets[:, :, np.newaxis] *
                                sigma_mu).sum(axis=1)
        # self.px_var.gradient -= 0.5 * (variational_wets[:,:,np.newaxis, np.newaxis] * ((np.linalg.inv(self.px_var))[:,np.newaxis, :,:] - sigma2_S
        #                                         - sigma_S_sigma) ).sum(axis=1)
        dL_dcov = 0.5 * (variational_wets[:, :, np.newaxis, np.newaxis] *
                         (cov_inv[:, np.newaxis, :, :] - sigma2_S -
                          sigma_S_sigma)).sum(axis=1)
        dL_dlmatrix = np.zeros((dL_dcov.shape))
        for k in range(mu_minus.shape[0]):
            dL_dlmatrix[k, :, :] = 2 * np.dot(dL_dcov[k, :, :],
                                              self.px_lmatrix[k, :, :])
        self.px_lmatrix.gradient -= dL_dlmatrix
        # print self.px_lmatrix
        # print 'test'
        # print dL_dlmatrix

        dL_dw = np.zeros((self.variational_wi.shape))
        ew = np.exp(self.variational_wi)
        # ew = np.exp(wi_max)
        sumew = ew.sum(axis=0)
        dL_dq = ((0.5 * (np.log(np.linalg.det(cov_k))[:, np.newaxis] +
                         (sigma_S).sum(axis=2).sum(axis=2) + mu_sigma_mu) -
                  (np.log(wets / variational_wets) - 1)))
        dq_dwi = ((sumew - ew) * ew) / (sumew**2)
        for i in range(mu_minus.shape[1]):
            dq_dw = np.diag(dq_dwi[:, i])
            for j in range(mu_minus.shape[0]):
                for k in range(mu_minus.shape[0]):
                    if j != k:
                        dq_dw[j, k] = -ew[j, i] * ew[k, i] / (sumew[i]**2)
            dL_dw[:, i] = np.dot(dq_dw, dL_dq[:, i])
        self.variational_wi.gradient -= dL_dw
Пример #46
0
def plot_confusion_matrix(cm=None,
                          labels=None,
                          cmap="Blues",
                          ax=None,
                          fontsize=12,
                          cbar=False,
                          title=None,
                          y_true=None,
                          y_pred=None,
                          **kwargs):
  r"""
  cm : a square matrix of raw count
  kwargs : arguments for `odin.visual.plot_heatmap`
  """
  # TODO: new style for confusion matrix (using small and big dot)
  if cm is None:
    assert y_true is not None and y_pred is not None, \
      "Provide either cm explicitly or y_true and y_pred together"
    from sklearn.metrics import confusion_matrix
    cm = confusion_matrix(y_true=y_true, y_pred=y_pred)
  assert cm.shape[0] == cm.shape[1], \
    "Plot confusion matrix only applied for squared matrix"
  if labels is None:
    labels = ['#%d' % i for i in range(max(cm.shape))]
  # calculate F1
  N_row = np.sum(cm, axis=-1)
  N_col = np.sum(cm, axis=0)
  TP = np.diagonal(cm)
  FP = N_col - TP
  FN = N_row - TP
  precision = TP / (TP + FP)
  recall = TP / (TP + FN)
  F1 = 2 / (1 / precision + 1 / recall)
  F1[np.isnan(F1)] = 0.
  F1_mean = np.mean(F1)
  # column normalize
  nb_classes = cm.shape[0]
  cm = cm.astype('float32') / np.sum(cm, axis=1, keepdims=True)
  # generate annotation
  annotation = np.empty(shape=(nb_classes, nb_classes), dtype=object)
  for i, j in itertools.product(range(nb_classes), range(nb_classes)):
    if i == j:  # diagonal
      text = '%.2f\nF1:%.2f' % (cm[i, j], F1[i])
    else:
      text = '%.2f' % cm[i, j]
    annotation[i, j] = text
  # plotting
  return plot_heatmap(\
      data=cm,
      xticklabels=labels,
      yticklabels=labels,
      xlabel="Prediction",
      ylabel="True",
      cmap=cmap,
      ax=ax,
      fontsize=fontsize,
      cbar=cbar,
      cbar_title="Accuracy",
      annotation=annotation,
      text_colors=dict(diag='magenta', other='black', minrow='red'),
      title='%s(F1: %.3f)' % ('' if title is None else str(title), F1_mean),
      **kwargs)
Пример #47
0
def olf_bulb_10(Nmitral, H_in, W_in, P_odor_in, dam):
    #    Nmitral = 10 #number of mitral cells
    Ngranule = np.copy(Nmitral)  #number of granule cells     pg. 383 of Li/Hop
    Ndim = Nmitral + Ngranule  #total number of cells
    t_inh = 25
    # time when inhalation starts
    t_exh = 205
    #time when exhalation starts
    finalt = 395
    # end time of the cycle

    #y = zeros(ndim,1);

    Sx = 1.43  #Sx,Sx2,Sy,Sy2 are parameters for the activation functions
    Sx2 = 0.143
    Sy = 2.86  #These are given in Li/Hopfield pg 382, slightly diff in her thesis
    Sy2 = 0.286
    th = 1  #threshold for the activation function

    tau_exh = 33.3333
    #Exhale time constant, pg. 382 of Li/Hop
    exh_rate = 1 / tau_exh

    alpha = .15  #decay rate for the neurons
    #Li/Hop have it as 1/7 or .142 on pg 383

    P_odor0 = np.zeros(Nmitral)  #odor pattern, no odor

    H0 = H_in  #weight matrix: to mitral from granule
    W0 = W_in  #weights: to granule from mitral

    Ib = np.ones((Nmitral, 1)) * .243  #initial external input to mitral cells
    Ic = np.ones(
        (Ngranule, 1)) * .1  #initial input to granule cells, these values are
    #given on pg 382 of Li/Hop

    signalflag = 1  # 0 for linear output, 1 for activation function

    noise = np.zeros((Ndim, 1))  #noise in inputs
    noiselevel = .00143
    noisewidth = 7  #noise correlation time, given pg 383 Li/Hop as 9, but 7 in thesis

    lastnoise = np.zeros((Ndim, 1))  #initial time of last noise pule

    #******************************************************************************

    #CALCULATE FIXED POINTS

    #Calculating equilibrium value with no input
    rest0 = np.zeros((Ndim, 1))

    restequi = fsolve(lambda x: equi(x,Ndim,Nmitral,Sx,Sx2,Sy,Sy2,th,alpha,\
                                     t_inh,H0,W0,P_odor0,Ib,Ic,dam),rest0) #about 20 ms to run this

    np.random.seed(seed=23)
    #init0 = restequi+np.random.rand(Ndim)*.00143 #initial conditions plus some noise
    #for no odor input
    init0 = restequi + np.random.rand(
        Ndim) * .00143  #initial conditions plus some noise
    #for no odor input
    np.random.seed()
    #Now calculate equilibrium value with odor input

    lastnoise = lastnoise + t_inh - noisewidth  #initialize lastnoise value
    #But what is it for? to have some
    #kind of correlation in the noise

    #find eigenvalues of A to see if input produces oscillating signal

    xequi = fsolve(lambda x: equi(x,Ndim,Nmitral,Sx,Sx2,Sy,Sy2,th,alpha,\
                                     t_inh,H0,W0,P_odor_in,Ib,Ic,dam),rest0)
    #equilibrium values with some input, about 20 ms to run

    #******************************************************************************

    #CALCULATE A AND DETERMINE EXISTENCE OF OSCILLATIONS

    diffgy = celldiff(xequi[Nmitral:], Sy, Sy2, th)
    diffgx = celldiff(xequi[0:Nmitral], Sx, Sx2, th)

    H1 = np.dot(H0, diffgy)
    W1 = np.dot(W0, diffgx)  #intermediate step in constructing A

    A = np.dot(H1, W1)  #Construct A

    dA, vA = lin.eig(A)  #about 20 ms to run this
    #Find eigenvalues of A

    diff = (1j) * (dA)**.5 - alpha  #criteria for a growing oscillation

    negsum = -(1j) * (dA)**.5 - alpha  #Same

    diff_re = np.real(diff)
    #Take the real part
    negsum_re = np.real(negsum)

    #do an argmax to return the eigenvalue that will cause the fastest growing oscillations
    #Then do a spectrograph to track the growth of the associated freq through time

    indices = np.where(
        diff_re > 0)  #Find the indices where the criteria is met
    indices2 = np.where(negsum_re > 0)

    #eigenvalues that could lead to growing oscillations
    #    candidates = np.append(np.real((dA[indices])**.5),np.real((dA[indices2])**.5))
    largest = np.argmax(diff_re)

    check = np.size(indices)
    check2 = np.size(indices2)

    if check == 0 and check2 == 0:
        #    print("No Odor Recognized")
        dominant_freq = 0
    else:
        dominant_freq = np.real((dA[largest])**.5) / (
            2 * np.pi)  #find frequency of the dominant mode
        #Divide by 2pi to get to cycles/ms
    #    print("Odor detected. Eigenvalues:",dA[indices],dA[indices2],\
    #          "\nEigenvectors:",vA[indices],vA[indices2],\
    #          "\nDominant Frequency:",dominant_freq)

    #*************************************************************************

    #SOLVE DIFFERENTIAL EQUATIONS TO GET INPUT AND OUTPUTS AS FN'S OF t

    #differential equation to solve
    teval = np.r_[0:finalt]

    #solve the differential equation
    sol = solve_ivp(lambda t,y: diffeq(t,y,Nmitral,Ngranule,Ndim,lastnoise,\
                    noise,noisewidth,noiselevel, t_inh,t_exh,exh_rate,alpha,Sy,\
                    Sy2,Sx,Sx2,th,H0,W0,P_odor_in,Ic,Ib,dam),\
                    [0,395],init0,t_eval = teval,method = 'RK45')
    t = sol.t
    y = sol.y
    y = np.transpose(y)
    yout = np.copy(y)

    #convert signal into output signal given by the activation fn
    if signalflag == 1:
        for i in np.arange(np.size(t)):
            yout[i, :Nmitral] = cellout(y[i, :Nmitral], Sx, Sx2, th)
            yout[i, Nmitral:] = cellout(y[i, Nmitral:], Sy, Sy2, th)

    #solve diffeq for P_odor = 0
    #first, reinitialize lastnoise & noise
    noise = np.zeros((Ndim, 1))
    lastnoise = np.zeros((Ndim, 1))
    lastnoise = lastnoise + t_inh - noisewidth

    sol0 = sol = solve_ivp(lambda t,y: diffeq(t,y,Nmitral,Ngranule,Ndim,lastnoise,\
                    noise,noisewidth,noiselevel, t_inh,t_exh,exh_rate,alpha,Sy,\
                    Sy2,Sx,Sx2,th,H0,W0,P_odor0,Ic,Ib,dam),\
                    [0,395],init0,t_eval = teval,method = 'RK45')
    y0 = sol0.y
    y0 = np.transpose(y0)
    y0out = np.copy(y0)

    #convert signal into output signal given by the activation fn
    if signalflag == 1:
        for i in np.arange(np.size(t)):
            y0out[i, :Nmitral] = cellout(y0[i, :Nmitral], Sx, Sx2, th)
            y0out[i, Nmitral:] = cellout(y0[i, Nmitral:], Sy, Sy2, th)

    #*****************************************************************************

    #SIGNAL PROCESSING

    #Filtering the signal - O_mean: Lowpass fitered signal, under 20 Hz
    #S_h: Highpass filtered signal, over 20 Hz

    fs = 1 / (.001 * (t[1] - t[0]))  #sampling freq, converting from ms to sec

    f_c = 15 / fs  # Cutoff freq at 20 Hz, written as a ratio of fc to sample freq

    flter = np.sinc(2 * f_c *
                    (t -
                     (finalt - 1) / 2)) * np.blackman(finalt)  #creating the
    #windowed sinc filter
    #centered at the middle
    #of the time data
    flter = flter / np.sum(flter)  #normalize

    hpflter = -np.copy(flter)
    hpflter[int(
        (finalt - 1) / 2)] += 1  #convert the LP filter into a HP filter

    Sh = np.zeros(np.shape(yout))
    Sl = np.copy(Sh)
    Sl0 = np.copy(Sh)
    Sbp = np.copy(Sh)

    for i in np.arange(Ndim):
        Sh[:, i] = np.convolve(yout[:, i], hpflter, mode='same')
        Sl[:, i] = np.convolve(yout[:, i], flter, mode='same')
        Sl0[:, i] = np.convolve(y0out[:, i], flter, mode='same')

    #find the oscillation period Tosc (Tosc must be greater than 5 ms to exclude noise)
    Tosc0 = np.zeros(np.size(np.arange(5, 50)))
    for i in np.arange(5, 50):
        Sh_shifted = np.roll(Sh, i, axis=0)
        Tosc0[i - 5] = np.sum(
            np.diagonal(
                np.dot(np.transpose(Sh[:, :Nmitral]),
                       Sh_shifted[:, :Nmitral])))
        #That is, do the correlation matrix (time correlation), take the diagonal to
        #get the autocorrelations, and find the max
    Tosc = np.argmax(Tosc0)
    Tosc = Tosc + 5

    f_c2 = 1000 * (
        1.3 /
        Tosc) / fs  #Filter out components with frequencies higher than this
    #to get rid of noise effects in cross-correlation
    #times 1000 to get units right

    flter2 = np.sinc(2 * f_c2 * (t - (finalt - 1) / 2)) * np.blackman(finalt)
    flter2 = flter2 / np.sum(flter2)

    for i in np.arange(Ndim):
        Sbp[:, i] = np.convolve(Sh[:, i], flter2, mode='same')

    #CALCULATE THE DISTANCE MEASURES

    #calculate phase via cross-correlation with each cell
    phase = np.zeros(Nmitral)

    for i in np.arange(1, Nmitral):
        crosscor = signal.correlate(Sbp[:, 0], Sbp[:, i])
        tdiff = np.argmax(crosscor) - (finalt - 1)
        phase[i] = tdiff / Tosc * 2 * np.pi

    #Problem with the method below is that it will only give values from 0 to pi
    #for i in np.arange(1,Nmitral):
    #    phase[i]=np.arccos(np.dot(Sbp[:,0],Sbp[:,i])/(lin.norm(Sbp[:,0])*lin.norm(Sbp[:,i])))

    OsciAmp = np.zeros(Nmitral)
    Oosci = np.copy(OsciAmp) * 0j
    Omean = np.zeros(Nmitral)

    for i in np.arange(Nmitral):
        OsciAmp[i] = np.sqrt(
            np.sum(Sh[125:250, i]**2) / np.size(Sh[125:250, i]))
        Oosci[i] = OsciAmp[i] * np.exp(1j * phase[i])
        Omean[i] = np.average(Sl[:, i] - Sl0[:, i])

    Omean = np.maximum(Omean, 0)

    Ooscibar = np.sqrt(np.dot(
        Oosci,
        np.conjugate(Oosci))) / Nmitral  #can't just square b/c it's complex
    Omeanbar = np.sqrt(np.dot(Omean, Omean)) / Nmitral

    maxlam = np.max(np.abs(np.imag(np.sqrt(dA))))

    return yout, y0out, Sh, t, OsciAmp, Omean, Oosci, Omeanbar, Ooscibar, dominant_freq, maxlam
Пример #48
0
# extract y:
y = gpa1['colGPA']

# extract X & add a column of ones:
X = pd.DataFrame({'const': 1, 'hsGPA': gpa1['hsGPA'], 'ACT': gpa1['ACT']})

# alternative with patsy:
y2, X2 = pt.dmatrices('colGPA ~ hsGPA + ACT',
                      data=gpa1,
                      return_type='dataframe')

# display first rows of X:
print(f'X.head(): \n{X.head()}\n')

# parameter estimates:
X = np.array(X)
y = np.array(y).reshape(n, 1)  # creates a row vector
b = np.linalg.inv(X.T @ X) @ X.T @ y
print(f'b: \n{b}\n')

# residuals, estimated variance of u and SER:
u_hat = y - X @ b
sigsq_hat = (u_hat.T @ u_hat) / (n - k - 1)
SER = np.sqrt(sigsq_hat)
print(f'SER: {SER}\n')

# estimated variance of the parameter estimators and SE:
Vbeta_hat = sigsq_hat * np.linalg.inv(X.T @ X)
se = np.sqrt(np.diagonal(Vbeta_hat))
print(f'se: {se}\n')
Пример #49
0
# The uncertainty bounds are +/- 3 standard deviations based on our uncertainty (covariance).
################################################################################################
error_fig, ax = plt.subplots(2, 3)
error_fig.suptitle('Error Plots')
num_gt = gt.p.shape[0]
p_est_euler = []
p_cov_euler_std = []

# Convert estimated quaternions to euler angles
for i in range(len(q_est)):
    qc = Quaternion(*q_est[i, :])
    p_est_euler.append(qc.to_euler())

    # First-order approximation of RPY covariance
    J = rpy_jacobian_axis_angle(qc.to_axis_angle())
    p_cov_euler_std.append(np.sqrt(np.diagonal(J @ p_cov[i, 6:, 6:] @ J.T)))

p_est_euler = np.array(p_est_euler)
p_cov_euler_std = np.array(p_cov_euler_std)

# Get uncertainty estimates from P matrix
p_cov_std = np.sqrt(np.diagonal(p_cov[:, :6, :6], axis1=1, axis2=2))

titles = ['Easting', 'Northing', 'Up', 'Roll', 'Pitch', 'Yaw']
for i in range(3):
    ax[0, i].plot(range(num_gt), gt.p[:, i] - p_est[:num_gt, i])
    ax[0, i].plot(range(num_gt), 3 * p_cov_std[:num_gt, i], 'r--')
    ax[0, i].plot(range(num_gt), -3 * p_cov_std[:num_gt, i], 'r--')
    ax[0, i].set_title(titles[i])
ax[0, 0].set_ylabel('Meters')
Пример #50
0
def state_vector_plots(x_act=None,
                       act=None,
                       x_calc=None,
                       calc=None,
                       x_data=None,
                       data=None,
                       covar=None):
    """Plot the state vectors and compare them to the data.

    This would mainly be used to see the results of the fit.

    Each set of parameters is optional, but if one y parameter is included, its corresponding x parameter must also
    be present. For instance, if `calc` is provided, then `x_calc` must also be provided.

    Parameters
    ----------
    x_act : array-like, optional
        x values of the actual state vector
    act : array-like, optional
        actual state vector
    x_calc : array-like, optional
        x values of calculated state vector
    calc : array-like, optional
        the calculated state vector
    x_data : array-like, optional
        x values of the data
    data : array-like, optional
        measured data points
    covar : array-like, optional
        the covariance matrix of the fit

    Returns
    -------
    figure
        The state vector plots
    """
    fig, ax = plt.subplots(3, 2)
    fig.set_figheight(15)
    fig.set_figwidth(15)

    blue = sns.xkcd_rgb['denim blue']
    red = sns.xkcd_rgb['crimson']
    purple = sns.xkcd_rgb['amethyst']

    if x_act is not None and act is not None:
        ax[0, 0].plot(x_act, act[:, 0], color=blue, label='Actual', zorder=2)
        ax[1, 0].plot(x_act, act[:, 1], color=blue, label='Actual', zorder=2)
        ax[2, 0].plot(x_act, act[:, 2], color=blue, label='Actual', zorder=2)
        ax[0, 1].plot(x_act, act[:, 3], color=blue, label='Actual', zorder=2)
        ax[1, 1].plot(x_act, act[:, 4], color=blue, label='Actual', zorder=2)
        ax[2, 1].plot(x_act, act[:, 5], color=blue, label='Actual', zorder=2)

    if calc is not None and x_calc is not None:
        ax[0, 0].plot(x_calc,
                      calc[:, 0],
                      color=purple,
                      label='Calculated',
                      zorder=3)
        ax[1, 0].plot(x_calc,
                      calc[:, 1],
                      color=purple,
                      label='Calculated',
                      zorder=3)
        ax[2, 0].plot(x_calc,
                      calc[:, 2],
                      color=purple,
                      label='Calculated',
                      zorder=3)
        ax[0, 1].plot(x_calc,
                      calc[:, 3],
                      color=purple,
                      label='Calculated',
                      zorder=3)
        ax[1, 1].plot(x_calc,
                      calc[:, 4],
                      color=purple,
                      label='Calculated',
                      zorder=3)
        ax[2, 1].plot(x_calc,
                      calc[:, 5],
                      color=purple,
                      label='Calculated',
                      zorder=3)

    if data is not None and x_data is not None:
        ax[0, 0].plot(x_data,
                      data[:, 0],
                      '.',
                      markersize=4,
                      color=red,
                      label='Data',
                      zorder=1,
                      alpha=0.5)
        ax[1, 0].plot(x_data,
                      data[:, 1],
                      '.',
                      markersize=4,
                      color=red,
                      label='Data',
                      zorder=1,
                      alpha=0.5)
        ax[2, 0].plot(x_data,
                      data[:, 2],
                      '.',
                      markersize=4,
                      color=red,
                      label='Data',
                      zorder=1,
                      alpha=0.5)

    if covar is not None:
        ubd = calc + numpy.sqrt(numpy.diagonal(covar, axis1=1, axis2=2))
        lbd = calc - numpy.sqrt(numpy.diagonal(covar, axis1=1, axis2=2))
        ax[0, 0].fill_between(x_calc,
                              ubd[:, 0],
                              lbd[:, 0],
                              color=purple,
                              alpha=0.3,
                              zorder=0)
        ax[1, 0].fill_between(x_calc,
                              ubd[:, 1],
                              lbd[:, 1],
                              color=purple,
                              alpha=0.3,
                              zorder=0)
        ax[2, 0].fill_between(x_calc,
                              ubd[:, 2],
                              lbd[:, 2],
                              color=purple,
                              alpha=0.3,
                              zorder=0)
        ax[0, 1].fill_between(x_calc,
                              ubd[:, 3],
                              lbd[:, 3],
                              color=purple,
                              alpha=0.3,
                              zorder=0)
        ax[1, 1].fill_between(x_calc,
                              ubd[:, 4],
                              lbd[:, 4],
                              color=purple,
                              alpha=0.3,
                              zorder=0)
        ax[2, 1].fill_between(x_calc,
                              ubd[:, 5],
                              lbd[:, 5],
                              color=purple,
                              alpha=0.3,
                              zorder=0)

    ax[0, 0].set_ylabel('x [m]')
    ax[0, 0].legend(loc='best')

    ax[1, 0].set_ylabel('y [m]')
    ax[1, 0].legend(loc='best')

    ax[2, 0].set_ylabel('z [m]')
    ax[2, 0].legend(loc='best')

    ax[0, 1].set_ylabel('px [MeV/c]')
    ax[0, 1].legend(loc='best')

    ax[1, 1].set_ylabel('py [MeV/c]')
    ax[1, 1].legend(loc='best')

    ax[2, 1].set_ylabel('pz [MeV/c]')
    ax[2, 1].legend(loc='best')

    return fig
Пример #51
0
    def _calc_ast_cov(self, indxs, filters, return_all=False):
        """
        The NxN-dimensional covariance matrix and N-dimensional bias vector are
        calculated from M independent ASTs computed for N bands

        Parameters
        ----------
        indxs : index array giving the ASTs assocaited with a single
                model SED
        filters : base filter names in the AST file

        Keywords
        --------
        return_all : True/False
        
        Returns
        -------
        if return_all = False
           (cov_mat, bias, compls)
        else
           (cov_mat, bias, stddevs, corr_mat, diffs, ifluxes, compls)

        cov_mat : NxN dim numpy array
                  covariance matrix in flux units
        bias : N dim numpy vector
               vector of the biases in each filter
        stddevs : N dim numpy vector
                  vector of standard deviations in each filter
        corr_mat : NxN dim numpy array
                   correlation matrix
        diffs : KxN dim numpy vector
                raw flux differences for N filters and K AST instances
        ifluxes : N dim numpy vector
                  input fluxes of the AST in each filter
        compl : float
                AST completeness for this model
        """

        # set the asts for this star using the input index array
        asts = self.data[indxs]

        # now check that the source was recovered in at least 1 band
        #   this replicates how the observed catalog is created
        n_asts = len(asts)
        gtindxs = np.full((n_asts), 1)
        for k in range(n_asts):
            cgood = 0
            for cfilter in filters:
                if asts[cfilter + '_VEGA'][k] < 90:
                    cgood = cgood + 1
            gtindxs[k] = cgood

        indxs, = np.where(gtindxs > 0)
        n_indxs = len(indxs)
        if n_indxs <= 5:
            return False

        # completeness
        compl = float(n_indxs) / float(n_asts)

        # setup the variables for output
        n_filters = len(filters)
        ifluxes = np.empty((n_filters), dtype=np.float32)
        diffs = np.empty((n_filters, n_indxs), dtype=np.float32)
        biases = np.empty((n_filters), dtype=np.float32)
        cov_matrix = np.full((n_filters, n_filters), 0.0, dtype=np.float32)

        for ck, cfilter in enumerate(filters):
            ifluxes[ck] = np.power(10.0,-0.4*asts[cfilter+'_IN'][indxs[0]])* \
                          self.vega_flux[ck]
            # compute the difference vector between the input and output fluxes
            #    note that the input fluxes are in magnitudes and the
            #    output fluxes in normalized vega fluxes
            diffs[ck,:] = asts[cfilter+'_RATE'][indxs]*self.vega_flux[ck] - \
                          ifluxes[ck]
            # compute the bias and standard deviations around said bias
            biases[ck] = np.mean(diffs[ck, :])

        # compute the covariance matrix
        for ck in range(n_filters):
            for dk in range(ck, n_filters):
                for ci in range(n_indxs):
                    cov_matrix[ck,dk] += (diffs[ck,ci] - biases[ck])* \
                                         (diffs[dk,ci] - biases[dk])
                # fill in the symmetric terms
                cov_matrix[dk, ck] = cov_matrix[ck, dk]

        cov_matrix /= (n_indxs - 1)
        stddevs = np.sqrt(np.diagonal(cov_matrix))

        # compute the corrleation matrix
        corr_matrix = np.array(cov_matrix)
        for ck in range(n_filters):
            for dk in range(ck, n_filters):
                if stddevs[ck] * stddevs[dk] > 0:
                    corr_matrix[ck, dk] /= stddevs[ck] * stddevs[dk]
                else:
                    corr_matrix[ck, dk] = 0.0
                # fill in the symmetric terms
                corr_matrix[dk, ck] = corr_matrix[ck, dk]

        if return_all:
            return (cov_matrix, biases, stddevs, corr_matrix, diffs, ifluxes,
                    compl)
        else:
            return (cov_matrix, biases, compl)
Пример #52
0
def fitgaussian(y,
                x=None,
                bgpars=None,
                fitbg=0,
                guess=None,
                mask=None,
                weights=None,
                maskg=False,
                yxguess=None):
    """
    Fits an N-dimensional Gaussian to (value, coordinate) data.

    Parameters
    ----------
    y : ndarray
        Array giving the values of the function.
    x : ndarray
        (optional) Array (any shape) giving the abcissas of y (if
        missing, uses np.indices(y).  The highest dimension must be
        equal to the number of other dimensions (i.e., if x has 6
        dimensions, the highest dimension must have length 5).  The
        rest of the dimensions must have the same shape as y.  Must be
        sorted ascending (which is not checked), if guess is not
        given.
    bgpars : ndarray or tuple, 3-elements
        Background parameters, the elements determine a X- and Y-linearly
        dependant level, of the form:
        f = Y*bgparam[0] + X*bgparam[1] + bgparam[2]
        (Not tested for 1D yet).
    fitbg : Integer
        This flag indicates the level of background fitting:
        fitbg=0: No fitting, estimate the bg as median(data).
        fitbg=1: Fit a constant to the bg (bg = c).
        fitbg=2: Fit a plane as bg (bg = a*x + b*y + c).
    guess : tuple, (width, center, height)
        Tuple giving an initial guess of the Gaussian parameters for
        the optimizer.  If supplied, x and y can be any shape and need
        not be sorted.  See gaussian() for meaning and format of this
        tuple.
    mask : ndarray
        Same shape as y. Values where its corresponding mask value is
        0 are disregarded for the minimization. Only values where the
        mask value is 1 are considered.
    weights : ndarray
        Same shape as y. This array defines weights for the
        minimization, for scientific data the weights should be
        1/sqrt(variance).

    Returns
    -------
    params : ndarray
    
        This array contains the best fitting values parameters: width,
        center, height, and if requested, bgpars. with:
          width :  The fitted Gaussian widths in each dimension.
          center : The fitted Gaussian center coordinate in each dimension.
          height : The fitted height.

    err : ndarray
        An array containing the concatenated uncertainties
        corresponding to the values of params.  For example, 2D input
        gives np.array([widthyerr, widthxerr, centeryerr, centerxerr,
        heighterr]).

    Notes
    -----
    If the input does not look anything like a Gaussian, the result
    might not even be the best fit to that.

    Method: First guess the parameters (if no guess is provided), then
    call a Levenberg-Marquardt optimizer to finish the job.

    Examples
    --------

    >>> import matplotlib.pyplot as plt
    >>> import gaussian as g

    >>> # parameters for X
    >>> lx = -3.  # low end of range
    >>> hx = 5.   # high end of range
    >>> dx = 0.05 # step

    >>> # parameters of the noise
    >>> nc = 0.0  # noice center
    >>> ns = 1.0  # noise width
    >>> na = 0.2  # noise amplitude

    >>> # 1D Example

    >>> # parameters of the underlying Gaussian
    >>> wd = 1.1  # width
    >>> ct = 1.2  # center
    >>> ht = 2.2  # height

    >>> # x and y data to fit
    >>> x  = np.arange(lx, hx + dx / 2., dx)
    >>> x +=                             na * np.random.normal(nc, ns, x.size)
    >>> y  = g.gaussian(x, wd, ct, ht) + na * np.random.normal(nc, ns, x.size)
    >>> s  = x.argsort()    # sort, in case noise violated order
    >>> xs = x[s]
    >>> ys = y[s]

    >>> # calculate guess and fit
    >>> (width, center, height)     = g.gaussianguess(ys, xs)
    >>> (fw,    fc,     fh,    err) = g.fitgaussian(ys, xs)

    >>> # plot results
    >>> plt.clf()
    >>> plt.plot(xs, ys)
    >>> plt.plot(xs,      g.gaussian(xs, wd,    ct,     ht))
    >>> plt.plot(xs,      g.gaussian(xs, width, center, height))
    >>> plt.plot(xs,      g.gaussian(xs, fw,    fc,     fh))
    >>> plt.title('Gaussian Data, Guess, and Fit')
    >>> plt.xlabel('Abcissa')
    >>> plt.ylabel('Ordinate')
    >>> # plot residuals
    >>> plt.clf()
    >>> plt.plot(xs, ys - g.gaussian(xs, fw,    fc,     fh))
    >>> plt.title('Gaussian Fit Residuals')
    >>> plt.xlabel('Abcissa')
    >>> plt.ylabel('Ordinate')

    >>> # 2D Example

    >>> # parameters of the underlying Gaussian
    >>> wd = (1.1, 3.2)  # width
    >>> ct = (1.2, 3.1)  # center
    >>> ht = 2.2         # height

    >>> # x and y data to fit
    >>> nx = (hx - lx) / dx + 1
    >>> x  = np.indices((nx, nx)) * dx + lx
    >>> y  = g.gaussian(x, wd, ct, ht) + na * np.random.normal(nc, ns, x.shape[1:])

    >>> # calculate guess and fit
    >>> #(width, center, height)     = g.gaussianguess(y, x) # not in 2D yet...
    >>> (fw,    fc,     fh,    err) = g.fitgaussian(y, x, (wd, ct, ht))

    >>> # plot results
    >>> plt.clf()
    >>> plt.title('2D Gaussian Given')
    >>> plt.xlabel('X')
    >>> plt.ylabel('Y')
    >>> plt.imshow(    g.gaussian(x, wd,    ct,     ht))
    >>> plt.clf()
    >>> plt.title('2D Gaussian With Noise')
    >>> plt.xlabel('X')
    >>> plt.ylabel('Y')
    >>> plt.imshow(y)
    >>> #plt.imshow(    g.gaussian(x, width, center, height)) # not in 2D yet...
    >>> plt.clf()
    >>> plt.title('2D Gaussian Fit')
    >>> plt.xlabel('X')
    >>> plt.ylabel('Y')
    >>> plt.imshow(    g.gaussian(x, fw,    fc,     fh))
    >>> plt.clf()
    >>> plt.title('2D Gaussian Fit Residuals')
    >>> plt.xlabel('X')
    >>> plt.ylabel('Y')
    >>> plt.imshow(y - g.gaussian(x, fw,    fc,     fh))

    >>> # All cases benefit from...

    >>> # show difference between fit and underlying Gaussian
    >>> # Random data, your answers WILL VARY.
    >>> np.array(fw) - np.array(wd)
    array([ 0.00210398, -0.00937687])
    >>> np.array(fc) - np.array(ct)
    array([-0.00260803,  0.00555011])
    >>> np.array(fh) - np.array(ht)
    0.0030143371034774269

    >>> Last Example:
    >>> x = np.indices((30,30))
    >>> g1 = g.gaussian(x, width=(1.2, 1.15), center=(13.2,15.75), height=1e4,
    >>>                 bgpars=[0.0, 0.0, 100.0])
    >>> error = np.sqrt(g1) * np.random.randn(30,30)
    >>> y = g1 + error
    >>> var = g1
    >>> 
    >>> plt.figure(1)
    >>> plt.clf()
    >>> plt.imshow(y, origin='lower_left', interpolation='nearest')
    >>> plt.colorbar()
    >>> plt.title('2D Gaussian')
    >>> plt.xlabel('X')
    >>> plt.ylabel('Y')
    >>> 
    >>> guess = ((1.2,1.2),(13,16.),1e4)
    >>> reload(g)
    >>> fit = g.fitgaussian(y, x, bgpars=[0.0, 0.0, 110.], fitbg=1, guess=guess,
    >>>                     mask=None, weights=1/np.sqrt(var))
    >>> print(fit[0])


    Revisions
    ---------
    2007-09-17  Joe      Initial version, portions adapted from
                         http://www.scipy.org/Cookbook/FittingData.
                         [email protected]
    2007-11-13  Joe      Made N-dimensional.
    2008-12-02  Nate     Included error calculation, and return Fixed a bug
                         in which if the initial guess was None, and incorrect
                         shape array was generated. This caused gaussian guess
                         to fail.
                         [email protected]
    2009-10-25           Converted to standard doc header, fixed examples to
                         return 4 parameters.
    2011-05-03  patricio Added mask, weights, and background-fitting options.
                         [email protected]
  """

    if x == None:
        x = np.indices(np.shape(y))
    else:
        if (((x.ndim == 1) and (x.shape != y.shape))
                or ((x.ndim > 1) and (x.shape[1:] != y.shape))):
            raise (ValueError, "x must give coordinates of points in y.")

    # Default mask: all good
    if mask == None:
        mask = np.ones(np.shape(y))

    # Default weights: no weighting
    if weights == None:
        weights = np.ones(np.shape(y))

    # Mask the gaussian if requested:
    medmask = np.copy(mask)
    if maskg and (yxguess != None or guess != None):
        if yxguess != None:
            center = yxguess
        elif guess != None:
            center = guess[1]
        medmask *= (1 - d.disk(3, center, np.shape(y)))

    # Estimate the median of the image:
    medbg = np.median(y[np.where(medmask)])

    if bgpars == None:
        bgpars = [0.0, 0.0, medbg]

    # get a guess if not provided
    if guess == None:
        if yxguess == None:
            guess = gaussianguess(y - medbg, mask=mask)
        else:
            guess = gaussianguess(y - medbg, mask=mask, yxguess=yxguess)

    # "ravel" the guess
    gparams = np.append(guess[0], guess[1])
    gparams = np.append(gparams, guess[2])

    # Background params to fit:
    if fitbg == 0:
        bgparams = []
    elif fitbg == 1:
        bgparams = bgpars[2]
    elif fitbg == 2:
        bgparams = bgpars

    # Concatenate sets of parameters we want to fit:
    params = np.append(gparams, bgparams)
    # Rest of parameters needed by residuals:
    args = (x, y, mask, weights, bgpars, fitbg)

    # The fit:
    p, cov, info, mesg, success = so.leastsq(residuals,
                                             params,
                                             args,
                                             full_output=True)
    try:
        err = np.sqrt(np.diagonal(cov))
    except:
        err = None

    return p, err
Пример #53
0
Файл: pdfs.py Проект: Samnor/maf
    def __init__(self, m=None, P=None, U=None, S=None, Pm=None):
        """
        Initialize a gaussian pdf given a valid combination of its parameters. Valid combinations are:
        m-P, m-U, m-S, Pm-P, Pm-U, Pm-S
        :param m: mean
        :param P: precision
        :param U: upper triangular precision factor such that U'U = P
        :param S: covariance
        :param Pm: precision times mean such that P*m = Pm
        """

        if m is not None:
            m = np.asarray(m)
            self.m = m
            self.ndim = m.size

            if P is not None:
                P = np.asarray(P)
                L = np.linalg.cholesky(P)
                self.P = P
                self.C = np.linalg.inv(L)
                self.S = np.dot(self.C.T, self.C)
                self.Pm = np.dot(P, m)
                self.logdetP = 2.0 * np.sum(np.log(np.diagonal(L)))

            elif U is not None:
                U = np.asarray(U)
                self.P = np.dot(U.T, U)
                self.C = np.linalg.inv(U.T)
                self.S = np.dot(self.C.T, self.C)
                self.Pm = np.dot(self.P, m)
                self.logdetP = 2.0 * np.sum(np.log(np.diagonal(U)))

            elif S is not None:
                S = np.asarray(S)
                self.P = np.linalg.inv(S)
                self.C = np.linalg.cholesky(S).T
                self.S = S
                self.Pm = np.dot(self.P, m)
                self.logdetP = -2.0 * np.sum(np.log(np.diagonal(self.C)))

            else:
                raise ValueError('Precision information missing.')

        elif Pm is not None:
            Pm = np.asarray(Pm)
            self.Pm = Pm
            self.ndim = Pm.size

            if P is not None:
                P = np.asarray(P)
                L = np.linalg.cholesky(P)
                self.P = P
                self.C = np.linalg.inv(L)
                self.S = np.dot(self.C.T, self.C)
                self.m = np.linalg.solve(P, Pm)
                self.logdetP = 2.0 * np.sum(np.log(np.diagonal(L)))

            elif U is not None:
                U = np.asarray(U)
                self.P = np.dot(U.T, U)
                self.C = np.linalg.inv(U.T)
                self.S = np.dot(self.C.T, self.C)
                self.m = np.linalg.solve(self.P, Pm)
                self.logdetP = 2.0 * np.sum(np.log(np.diagonal(U)))

            elif S is not None:
                S = np.asarray(S)
                self.P = np.linalg.inv(S)
                self.C = np.linalg.cholesky(S).T
                self.S = S
                self.m = np.dot(S, Pm)
                self.logdetP = -2.0 * np.sum(np.log(np.diagonal(self.C)))

            else:
                raise ValueError('Precision information missing.')

        else:
            raise ValueError('Mean information missing.')
Пример #54
0
    def __call__(self, sedgrid, generic_absflux_a_matrix=None, progress=True):
        """
        Interpolate the results of the ASTs on the model grid

        Parameters
        ----------
        sedgrid: beast.core.grid type
            model grid to interpolate AST results on

        Returns
        -------

        progress: bool, optional
            if set, display a progress bar
        """
        flux = sedgrid.seds
        if generic_absflux_a_matrix is not None:
            model_absflux_cov = False
            if generic_absflux_a_matrix is not None:
                print('using model indepdent absflux cov matrix')
            else:
                print('not using any absflux cov matrix')
        elif ((sedgrid.cov_diag is not None) &
              (sedgrid.cov_offdiag is not None)):
            model_absflux_cov = True
            absflux_cov_diag = sedgrid.cov_diag
            absflux_cov_offdiag = sedgrid.cov_offdiag
            print('using model dependent absflux cov matrix')
        else:
            model_absflux_cov = False

        n_models, n_filters = flux.shape
        n_offdiag = (((n_filters**2) - n_filters) / 2)

        if n_filters != len(self.filters):
            raise AttributeError('the grid of models does not seem to' +
                                 'be defined with the same number of filters')

        biases = np.empty((n_models, n_filters), dtype=np.float64)
        sigmas = np.empty((n_models, n_filters), dtype=np.float64)
        cov_diag = np.empty((n_models, n_filters), dtype=np.float64)
        cov_offdiag = np.empty((n_models, n_offdiag), dtype=np.float64)
        icov_diag = np.empty((n_models, n_filters), dtype=np.float64)
        icov_offdiag = np.empty((n_models, n_offdiag), dtype=np.float64)
        q_norm = np.empty((n_models), dtype=np.float64)
        compls = np.empty((n_models), dtype=float)

        if progress is True:
            it = Pbar(desc='Evaluating model').iterover(range(n_models))
        else:
            it = range(n_models)

        for i in it:
            # AST results are in vega fluxes
            cur_flux = flux[i, :]

            # find the 10 nearest neighbors to the model SED
            result = self._kdtree.query(np.log10(cur_flux), 10)

            dist = result[0]
            indxs = result[1]

            # check if the distance is very small, set to a reasonable value
            tindxs, = np.where(dist < 0.01)
            if len(tindxs) > 0:
                dist[tindxs] = 0.01

            # compute the interpolated covariance matrix
            #    use the distances to generate weights for the sum
            dist_weights = 1.0 / dist
            dist_weights /= np.sum(dist_weights)

            cur_cov_matrix = np.average(self._cov_matrices[indxs, :, :],
                                        axis=0,
                                        weights=dist_weights)

            # add in the absflux covariance matrix
            #   unpack off diagonal terms the same way they were packed
            if model_absflux_cov:
                m = 0
                cur_cov_matrix[n_filters-1,n_filters-1] += \
                                                absflux_cov_diag[i,n_filters-1]
                for k in range(n_filters - 1):
                    cur_cov_matrix[k, k] += absflux_cov_diag[i, k]
                    for l in range(k + 1, n_filters):
                        cur_cov_matrix[k, l] += absflux_cov_offdiag[i, m]
                        cur_cov_matrix[l, k] += absflux_cov_offdiag[i, m]
                        m += 1
            elif generic_absflux_a_matrix is not None:
                for k in range(n_filters):
                    for l in range(n_filters):
                        cur_cov_matrix[k,
                                       l] += (generic_absflux_a_matrix[k, l] *
                                              cur_flux[k] * cur_flux[l])

            # compute the interpolated biases
            biases[i, :] = np.average(self._biases[indxs, :],
                                      axis=0,
                                      weights=dist_weights)

            # compute the interpolated completeness
            compls[i] = np.average(self._completenesses[indxs],
                                   weights=dist_weights)

            # save the straight uncertainties
            sigmas[i, :] = np.sqrt(np.diagonal(cur_cov_matrix))

            # invert covariance matrix
            inv_cur_cov_matrix = np.linalg.inv(cur_cov_matrix)

            # save the diagnonal and packed version of non-diagonal terms
            m = 0
            icov_diag[i, n_filters - 1] = inv_cur_cov_matrix[n_filters - 1,
                                                             n_filters - 1]
            cov_diag[i, n_filters - 1] = cur_cov_matrix[n_filters - 1,
                                                        n_filters - 1]
            for k in range(n_filters - 1):
                icov_diag[i, k] = inv_cur_cov_matrix[k, k]
                cov_diag[i, k] = cur_cov_matrix[k, k]
                for l in range(k + 1, n_filters):
                    icov_offdiag[i, m] = inv_cur_cov_matrix[k, l]
                    cov_offdiag[i, m] = cur_cov_matrix[k, l]
                    m += 1

            # save the log of the determinat for normalization
            #   the ln(det) is calculated and saved as this is what will
            #   be used in the actual calculation
            #       norm = 1.0/sqrt(Q)
            det = np.linalg.slogdet(cur_cov_matrix)
            #print(det)
            if det[0] <= 0:
                print('something bad happened')
                print('determinant of covarinace matrix is zero or negative')
                print(det)
            q_norm[i] = -0.5 * det[1]

        return (biases, sigmas, compls, q_norm, icov_diag, icov_offdiag,
                cov_diag, cov_offdiag)
Пример #55
0
print(get_short_diagnostics(full_mcmc_tensor))

out = sampler1.get_diagnostics(permuted=False)
print("num divergent")
processed_diag = process_diagnostics(out, name_list=["divergent"])

print(processed_diag.sum(axis=1))
print("num hit max tree depth")
processed_diag = process_diagnostics(out, name_list=["hit_max_tree_depth"])

print(processed_diag.sum(axis=1))

print("average acceptance rate after warmup")
processed_diag = process_diagnostics(out, name_list=["accept_rate"])

average_accept_rate = numpy.mean(processed_diag, axis=1)

print(average_accept_rate)

print("energy diagnostics")
print(energy_diagnostics(diagnostics_obj=out))

mixed_mcmc_tensor = sampler1.get_samples(permuted=True)
print(mixed_mcmc_tensor)

true_cov = numpy.cov(mixed_mcmc_tensor, rowvar=False)
sd_vec = numpy.diagonal(true_cov)

print("problem difficulty")

print(max(sd_vec) / min(sd_vec))  # val = 11.5
Пример #56
0
def profile_align(prof1, prof2):
    """
    Profile-to-profile Needleman-Wunsch global alignment algorithm
    :param prof1: aminoacid-profile of a sequence (output of seqprofile function);
    must be a numpy array of 20 rows
    :param prof2: aminoacid-profile of a sequence (output of seqprofile function);
    must be a numpy array of 20 rows
    :return:
    """
    assert len(prof1) == 20 and len(prof2) == 20, 'The profiles prof1 and prof2 ' \
                                                  'must be numpy arrays with 20 rows'
    # The following are the assumptions made
    # extendgap = False
    # adjust_oldgap = True
    # adjust_endgap = False
    # v2_flag = False
    gap_penalty = -8
    gapgap_function = lambda a, b: 0.1 * a
    gapres_function = lambda a, b: 0.1 * b

    blosum50 = np.loadtxt('./sca/blosum50.txt')
    len1, len2 = len(prof1[0]), len(prof2[0])
    gap1, gap2 = np.ones(len1 + 1) * gap_penalty, np.ones(len2 +
                                                          1) * gap_penalty
    prof1 = np.concatenate((prof1, np.zeros((1, len(prof1[0])))))
    prof2 = np.concatenate((prof2, np.zeros((1, len(prof2[0])))))

    num_sym = 21
    scoring_matrix = np.zeros((num_sym, num_sym))
    scoring_matrix[0:num_sym - 1, 0:num_sym - 1] = blosum50[0:num_sym - 1,
                                                            0:num_sym - 1]
    sm = np.mean(np.diagonal(scoring_matrix))
    sx = sum(sum(scoring_matrix - np.diag(np.diagonal(scoring_matrix))))
    gapgap_const = gapgap_function(sm, sx)
    gapres_const = gapres_function(sm, sx)
    scoring_matrix[num_sym - 1, :] = gapres_const
    scoring_matrix[:, num_sym - 1] = gapres_const
    scoring_matrix[num_sym - 1, num_sym - 1] = gapgap_const

    gap_weight1, gap_weight2 = sum(prof1[:-1, :]), sum(prof2[:-1, :])
    temp1 = np.vstack((np.append(gap_weight1, max(gap_weight1)),
                       np.insert(gap_weight1, 0, max(gap_weight1))))
    gap1 = gap1 * np.min(temp1, axis=0)
    temp2 = np.vstack((np.append(gap_weight2, max(gap_weight2)),
                       np.insert(gap_weight2, 0, max(gap_weight2))))
    gap2 = gap2 * np.min(temp2, axis=0)

    f, pointer = needle_wunsch_align(prof1, prof2, scoring_matrix, gap1, gap2,
                                     gap_weight1, gap_weight2)

    i, j = len2 + 1, len1 + 1
    path = np.zeros((len1 + len2, 2))
    step = 1
    score = f[-1, -1]

    while i > 1 or j > 1:
        if pointer[i - 1, j - 1] == 1:
            i, j = i - 1, j - 1
            path[step - 1, :] = np.array([j, i])
        elif pointer[i - 1, j - 1] == 2:
            i -= 1
            path[step - 1, 1] = i
        elif pointer[i - 1, j - 1] == 4:
            j -= 1
            path[step - 1, 0] = j
        else:
            raise Exception
        step += 1
    path = path[:step - 1, :]
    path = np.flipud(path)
    prof = np.zeros((num_sym, step - 1))
    mask1 = path[:, 0] > 0
    mask2 = path[:, 1] > 0
    prof[:, mask1] = prof1
    prof[:, mask2] = prof[:, mask2] + prof2
    prof[num_sym - 1, ~mask1] = prof[num_sym - 1, ~mask1] + np.mean(sum(prof1))
    prof[num_sym - 1, ~mask2] = prof[num_sym - 1, ~mask2] + np.mean(sum(prof2))
    h1 = np.where(mask1 != 0)[0]
    h2 = np.where(mask2 != 0)[0]
    return prof, h1, h2
Пример #57
0
import matplotlib.pyplot as plt
from rf import *
import numpy as np

##Diverse Portfolios Decrease Variance I
path = 'stock_data.csv'
path = 'stock_data_nvidia.csv'

stock_data = pd.read_csv(path)
selected = list(stock_data.columns[1:])

returns_quarterly = stock_data[selected].pct_change()
expected_returns = returns_quarterly.mean()
cov_quarterly = returns_quarterly.cov()

single_asset_std = np.sqrt(np.diagonal(cov_quarterly))
df = return_portfolios(expected_returns, cov_quarterly)
weights, returns, risks = optimal_portfolio(returns_quarterly[1:])

df.plot.scatter(x='Volatility', y='Returns', fontsize=12)
plt.plot(risks, returns, 'y-o')
plt.scatter(single_asset_std, expected_returns, marker='X', color='red', s=200)
for xc in single_asset_std:
    plt.axvline(x=xc, color='red')

if 'nvidia' in path:
    plt.axvline(single_asset_std[-1], color='green')
    plt.scatter(single_asset_std[-1],
                expected_returns[-1],
                marker='X',
                color='green',
Пример #58
0
                                           input_data=train_set,
                                           prior_dict=prior_dict,
                                           model_dict=model_dict)
precision_type = "torch.DoubleTensor"
te2, predicted2 = test_error(test_set,
                             v_obj=v_generator(precision_type=precision_type),
                             mcmc_samples=mcmc_samples_mixed,
                             type="classification",
                             memory_efficient=False)

print(te2)
mixed_mcmc_tensor = sampler1.get_samples(permuted=True)
print(mixed_mcmc_tensor)

mcmc_cov = numpy.cov(mixed_mcmc_tensor, rowvar=False)
mcmc_sd_vec = numpy.sqrt(numpy.diagonal(mcmc_cov))

print("mcmc problem difficulty")

print(max(mcmc_sd_vec) / min(mcmc_sd_vec))  # val = 2.25

out = sampler1.get_diagnostics(permuted=False)

print("num divergences after warmup")
processed_diag = process_diagnostics(out, name_list=["divergent"])

print(processed_diag.sum(axis=1))

print("num hit max tree depth after warmup")
processed_diag = process_diagnostics(out, name_list=["hit_max_tree_depth"])
Пример #59
0
def get_cbepath(params, rpath, wpath):
    '''
    --------------------------------------------------------------------
    Generates matrices for the time path of the distribution of
    individual savings, individual consumption, and the Euler errors
    associated with the savings decisions.
    --------------------------------------------------------------------
    INPUTS:
    params  = length 9 tuple,
              (S, T, beta, sigma, nvec, bvec1, b_ss, TPI_tol, EulDiff)
    S       = integer in [3,80], number of periods an individual lives
    T       = integer > S, number of time periods until steady state
    beta    = scalar in (0,1), discount factor for each model period
    sigma   = scalar > 0, coefficient of relative risk aversion
    nvec    = (S,) vector, exogenous labor supply n_s
    bvec1   = (S-1,) vector, initial period savings distribution
    b_ss    = (S-1,) vector, steady-state savings distribution
    TPI_tol = scalar > 0, tolerance level for fsolve's in TPI
    EulDiff = Boolean, =True if want difference version of Euler errors
              beta*(1+r)*u'(c2) - u'(c1), =False if want ratio version
              [beta*(1+r)*u'(c2)]/[u'(c1)] - 1
    rpath   = (T+S-2,) vector, equilibrium time path of interest rate
    wpath   = (T+S-2,) vector, equilibrium time path of the real wage

    OTHER FUNCTIONS AND FILES CALLED BY THIS FUNCTION:
        paths_life()

    OBJECTS CREATED WITHIN FUNCTION:
    cpath       = (S, T+S-2) matrix, time path of the distribution of
                  consumption
    bpath       = (S-1, T+S-2) matrix, time path of the distribution of
                  savings
    EulErrPath  = (S-1, T+S-2) matrix, time path of Euler errors
    pl_params   = length 5 tuple, (S, beta, sigma, TPI_tol, EulDiff)
    p           = integer >= 2, index representing number of periods
                  remaining in a lifetime, used to solve incomplete
                  lifetimes
    b_guess     = (p-1,) vector, initial guess for remaining lifetime
                  savings, taken from previous cohort's choices
    bveclf      = (p-1,) vector, optimal remaining lifetime savings
                  decisions
    cveclf      = (p,) vector, optimal remaining lifetime consumption
                  decisions
    b_err_veclf = (p-1,) vector, Euler errors associated with
                  optimal remaining lifetime savings decisions
    DiagMaskb   = (p-1, p-1) Boolean identity matrix
    DiagMaskc   = (p, p) Boolean identity matrix

    FILES CREATED BY THIS FUNCTION: None

    RETURNS: cpath, bpath, EulErrPath
    --------------------------------------------------------------------
    '''
    S, T, beta, sigma, nvec, bvec1, b_ss, TPI_tol, EulDiff = params
    cpath = np.zeros((S, T + S - 2))
    bpath = np.append(bvec1.reshape((S - 1, 1)),
                      np.zeros((S - 1, T + S - 3)), axis=1)
    EulErrPath = np.zeros((S - 1, T + S - 2))
    # Solve the incomplete remaining lifetime decisions of agents alive
    # in period t=1 but not born in period t=1
    cpath[S - 1, 0] = ((1 + rpath[0]) * bvec1[S - 2] +
                       wpath[0] * nvec[S - 1])
    pl_params = (S, beta, sigma, TPI_tol, EulDiff)
    for p in range(2, S):
        b_guess = np.diagonal(bpath[S - p:, :p - 1])
        bveclf, cveclf, b_err_veclf = paths_life(
            pl_params, S - p + 1, bvec1[S - p - 1], nvec[-p:],
            rpath[:p], wpath[:p], b_guess)
        # Insert the vector lifetime solutions diagonally (twist donut)
        # into the cpath, bpath, and EulErrPath matrices
        DiagMaskb = np.eye(p - 1, dtype=bool)
        DiagMaskc = np.eye(p, dtype=bool)
        bpath[S - p:, 1:p] = DiagMaskb * bveclf + bpath[S - p:, 1:p]
        cpath[S - p:, :p] = DiagMaskc * cveclf + cpath[S - p:, :p]
        EulErrPath[S - p:, 1:p] = (DiagMaskb * b_err_veclf +
                                   EulErrPath[S - p:, 1:p])
    # Solve for complete lifetime decisions of agents born in periods
    # 1 to T and insert the vector lifetime solutions diagonally (twist
    # donut) into the cpath, bpath, and EulErrPath matrices
    DiagMaskb = np.eye(S - 1, dtype=bool)
    DiagMaskc = np.eye(S, dtype=bool)
    for t in range(1, T):  # Go from periods 1 to T-1
        b_guess = np.diagonal(bpath[:, t - 1:t + S - 2])
        bveclf, cveclf, b_err_veclf = paths_life(
            pl_params, 1, 0, nvec, rpath[t - 1:t + S - 1],
            wpath[t - 1:t + S - 1], b_guess)
        # Insert the vector lifetime solutions diagonally (twist donut)
        # into the cpath, bpath, and EulErrPath matrices
        bpath[:, t:t + S - 1] = (DiagMaskb * bveclf +
                                 bpath[:, t:t + S - 1])
        cpath[:, t - 1:t + S - 1] = (DiagMaskc * cveclf +
                                     cpath[:, t - 1:t + S - 1])
        EulErrPath[:, t:t + S - 1] = (DiagMaskb * b_err_veclf +
                                      EulErrPath[:, t:t + S - 1])

    return cpath, bpath, EulErrPath
Пример #60
0
 def sigmas(self):
     return np.sqrt(np.diagonal(self.cov()))