def request_agreement(self):
        """7. Alice starts key agreement
        generate fingerprint and doing fuzzy commitment
        send hash and delta to Bob
        """
        log.info('7. Alice starts key agreement')

        #===============================================================================
        # Fingerprinting and Fuzzy Cryptography
        #===============================================================================
        # generate fingerprint
        self.fingerprint = fingerprint_energy_diff.get_fingerprint(self.recording_data, self.recording_samplerate)
        
        # save fingerprint for debugging
        scipy.savetxt("client_fingerprint.txt", self.fingerprint)

        log.debug('Alice fingerprint:\n'+str(self.fingerprint))
        
        # doing commit, rs codes can correct up to (n-m)/2 errors
        self.hash, self.delta, self.private_key = crypto_fuzzy_jw.JW_commit(self.fingerprint, m=self.rs_code_m, n=self.rs_code_n, symsize=self.rs_code_symsize)
        
        log.debug('Alice Blob:\nHash:\n'+str(self.hash)+'\nDelta:\n'+str(self.delta))
        
        # save delta for debugging
        scipy.savetxt("client_delta.txt", self.delta)
        
        # remote call for key agreement
        # using debug means sending also the fingerprint in clear text!!!
        # meaning no security!
        if self.debug:
            accept_agreement = self.pairing_server.callRemote("agreement_debug", self.fingerprint.tolist(), self.hash, self.delta.tolist())
            accept_agreement.addCallbacks(self.answer_agreement)
        else:
            accept_agreement = self.pairing_server.callRemote("agreement", self.hash, self.delta.tolist())
            accept_agreement.addCallbacks(self.answer_agreement)
Example #2
0
 def run(self):
 
     # Parameters passed are current data array, along with time step
     # between current data points
     self.times = sp.arange(0,self.Tfinal,self.dt)
     self.sim = odeint(self.eqns,self.init,self.times,(self.inj,self.injdt))
     sp.savetxt('simulation.txt',sp.column_stack((self.times,self.sim)))
Example #3
0
def simPheno(options):

    print 'importing covariance matrix'
    if options.cfile is None: options.cfile=options.bfile
    XX = readCovarianceMatrixFile(options.cfile,readEig=False)['K']

    print 'simulating phenotypes'
    SP.random.seed(options.seed)
    simulator = sim.CSimulator(bfile=options.bfile,XX=XX,P=options.nTraits)
    Xr,region = simulator.getRegion(chrom_i=options.chrom,size=options.windowSize,min_nSNPs=options.nCausalR,pos_min=options.pos_min,pos_max=options.pos_max)
 
    Y,info    = genPhenoCube(simulator,Xr,vTotR=options.vTotR,nCausalR=options.nCausalR,pCommonR=options.pCommonR,vTotBg=options.vTotBg,pHidd=options.pHidden,pCommon=options.pCommon)

    print 'exporting pheno file'
    if options.pfile is not None:
        outdir = os.path.split(options.pfile)[0]
        if not os.path.exists(outdir):
            os.makedirs(outdir)
    else:
        identifier = '_seed%d_nTraits%d_wndSize%d_vTotR%.2f_nCausalR%d_pCommonR%.2f_vTotBg%.2f_pHidden%.2f_pCommon%.2f'%(options.seed,options.nTraits,options.windowSize,options.vTotR,options.nCausalR,options.pCommonR,options.vTotBg,options.pHidden,options.pCommon)
        options.pfile = os.path.split(options.bfile)[-1] + '%s'%identifier

    pfile  = options.pfile + '.phe'
    rfile  = options.pfile + '.phe.region'

    SP.savetxt(pfile,Y)
    SP.savetxt(rfile,region)
def main():
    # Do not modify
    start = time.time()

    parser = argparse.ArgumentParser(description='Build context vectors.')
    parser.add_argument('textfile', type=str, help='name of text file')
    parser.add_argument('window', type=int, help='context window')
    parser.add_argument('threshold', type=int, help='vocabulary minimum frequency')
    parser.add_argument('--ndims', type=int, default=100, help='number of SVD dimensions')
    parser.add_argument('--debug', type=bool, default=False)
    args = parser.parse_args()

    vocab, points, vocabFreq = count_context_vectors(args.textfile+'.txt', args.window, args.threshold)
    print 'Estimated count context vectors'
    if not args.debug:  # compute PPMI and SVD before writing. if debug is True, just write the count vectors
        points = ppmi(points)
        print 'Converted to positive pointwise mutual information'
        points = dimensionality_reduce(points, args.ndims)
        print 'Reduced dimensionality'
        outfile = args.textfile+'.window'+str(args.window)+'.thresh'+str(args.threshold)
    else:
        outfile = args.textfile+'.window'+str(args.window)+'.thresh'+str(args.threshold)+'.todebug'

    with open(outfile+'.labels', 'w') as o:
        o.write('\n'.join(vocab)+'\n')
    scipy.savetxt(outfile+'.vecs', points, fmt='%.4e')
    with open(outfile+'.csv', 'w') as do:
        do.write('\n'.join(vocabFreq)+'\n')
    print 'Saved to file'

    print time.time()-start, 'seconds'
def apm_generate_aperture_map():
    r"""
    Driver function to generate an aperture map from a TIF image.
    """
    # parsing commandline args
    namespace = parser.parse_args()
    if namespace.verbose:
        set_main_logger_level('debug')

    # checking path to prevent accidental overwritting
    if not namespace.aperture_map_name:
        map_name = os.path.basename(namespace.image_file)
        map_name = map_name.replace( os.path.splitext(map_name)[1], '-aperture-map.txt')
        namespace.aperture_map_name = map_name

    #
    map_path = os.path.join(namespace.output_dir, namespace.aperture_map_name)
    if os.path.exists(map_path) and not namespace.force:
        msg = '{} already exists, use "-f" option to overwrite'
        raise FileExistsError(msg.format(map_path))

    # loading image data
    data_array = load_image_data(namespace.image_file, namespace.invert)
    data_array = data_array.astype(sp.int8)

    # summing data array down into a 2-D map
    logger.info('creating 2-D aperture map...')
    aperture_map = sp.sum(data_array, axis=1, dtype=int)

    # saving map
    logger.info('saving aperture map as {}'.format(map_path))
    sp.savetxt(map_path, aperture_map.T, fmt='%d', delimiter='\t')
def Corr(GDP,I,C):
	m = sp.shape(GDP)[1]
	GDPIcorr = []
	GDPCcorr = []
	for i in range(0, m):
		gdp = GDP[:,i]
		inv = I[:,i]
		con = C[:,i]
		#Correlation between output and investment for each series
		gdpi = sp.corrcoef(gdp,inv)
		GDPIcorr.append(gdpi[0,1])
		#Correlation between output and consumption for each series
		gdpc = sp.corrcoef(gdp,con)
		GDPCcorr.append(gdpc[0,1])
	#Mean and standard deviation of correlation between GDP and
	#Investment and Consumption over total number of simulations
	GDPICORR = sp.array(GDPIcorr)
	gdpimean = sp.mean(GDPICORR)
	gdpistdev = sp.std(GDPICORR)
	GDPCCORR = sp.array(GDPCcorr)
	gdpcmean = sp.mean(GDPCCORR)
	gdpcstdev = sp.std(GDPCCORR)
	sp.savetxt('GDPICORR.csv',GDPICORR)
	sp.savetxt('GDPCCORR.csv',GDPCCORR)
	print "The mean and standard deviation between GDP and"
	print "Investment and GDP and Consumption followed by"
	print "The lists of each correlation coefficient for"
	print "each series are saved in csv files"
	return gdpimean, gdpistdev, gdpcmean, gdpcstdev
Example #7
0
def processdataset(globpath='iimage-*', outputpath='./', margin=17):

    global sp
    sp = margin

    files = glob.glob(globpath)
    files.sort()

    datasets = []

    imagedir = outputpath+'images/'
    if not os.path.exists(imagedir):
        os.makedirs(imagedir)

    datadir = outputpath+'datasets/'
    if not os.path.exists(datadir):
        os.makedirs(datadir)


    for filename in files:
        print filename
        datasets = processimage(filename, imagedir, datasets)

    for i,dataset in enumerate(datasets):
        scipy.savetxt(datadir+'results-'+str(dataset[2])+'.dat',dataset[-1])
def make_S(A0, Ak, G0, Gk, phi):

    R = P = scipy.matrix(scipy.zeros((6,6)))

    for i in range(0, 3, 2):
        for j in range(3):
            R[i, j] = Ak[i, j]
    R[1, 1] = R[3, 3] = R[4, 4] = 1.0;
    R[5, 5] = Ak[5, 5]

    P[0, 0] = (A0[0, 0] * cos(phi)**2.0 + A0[1, 0] * sin(phi)**2.0)
    P[0, 1] = (A0[0, 1] * cos(phi)**2.0 + A0[1, 1] * sin(phi)**2.0)
    P[0, 2] = (A0[0, 2] * cos(phi)**2.0 + A0[1, 2] * sin(phi)**2.0)
    P[0, 3] = (A0[3, 3] * sin(2.0 * phi))
    P[1, 0] = sin(phi)**2.0
    P[1, 1] = cos(phi)**2.0
    P[1, 3] = -sin(2.0*phi)
    P[2, 0] = A0[2, 0]
    P[2, 1] = A0[2, 1]
    P[2, 2] = A0[2, 2]
    P[3, 0] = -0.5*sin(2.0*phi)
    P[3, 1] = 0.5*sin(2.0*phi)
    P[3, 3] = cos(2.0*phi)
    P[4, 4] = cos(phi)
    P[4, 5] = -sin(phi)
    P[5, 4] = A0[4, 4] * sin(phi)
    P[5, 5] = A0[5, 5] * cos(phi)

    scipy.savetxt("R", R)
    scipy.savetxt("P", P)
    return scipy.matrix(R.I) * scipy.matrix(P)
def main():
    # Do not modify
    start = time.time()

    parser = argparse.ArgumentParser(description='Build document-term vectors.')
    parser.add_argument('textfile', type=str, help='name of text file with documents on each line')
    parser.add_argument('threshold', type=int, help='term minimum frequency')
    parser.add_argument('--ndims', type=int, default=100, help='number of SVD dimensions')
    parser.add_argument('--debug', type=bool, default=False, help='debug mode?')
    args = parser.parse_args()

    terms, points = tfidf_docterm(args.textfile+'.txt', args.threshold)
    print 'Estimated document-term TF-IDF vectors'
    if not args.debug:  # compute PPMI and SVD before writing. if debug is True, just write the count vectors
        points = dimensionality_reduce(points, args.ndims)
        print 'Reduced dimensionality'
        outfile = args.textfile+'.tfidf'+'.thresh'+str(args.threshold)
    else:
        outfile = args.textfile+'.tfidf'+'.thresh'+str(args.threshold)+'.todebug'

    with open(outfile+'.dims', 'w') as o:
        o.write('\n'.join(terms)+'\n')
    scipy.savetxt(outfile+'.vecs', points, fmt='%.4e')
    print 'Saved to file'

    print time.time()-start, 'seconds'
Example #10
0
 def toASCII(self, ASCIIfile, ncol=1, hdr="", onError='w',
             writechannel=False):
     if isinstance(ASCIIfile, str): ASCIIfile = open(ASCIIfile, 'w')
     ASCIIfile.write(hdr)
     if ncol > 1:
         nrow = self.data.size // ncol
         rm = self.data.size % ncol
         if rm > 0:
             nrow += 1
             if onError == 'w':
                 print>> sys.stderr, 'Warning: padded with %i zeros' % (
                 ncol - rm)
             elif onError == 'n':
                 pass
             else:
                 raise ValueError(
                     'Data size does not fit into %i columns' % ncol)
         dat = self.data.copy()
         dat.resize(nrow, ncol)
     else:
         dat = self.data
     if writechannel:
         if ncol == 1:
             channels = arange(1, dat.size + 1)
             channels.resize(dat.size, 1)
             dat.resize(dat.size, 1)
             print>> ASCIIfile, "Channel Counts"
             savetxt(ASCIIfile, concatenate((channels, dat), axis=1),
                     fmt='%i')
         else:
             raise NonImplementedError(
                 "channel numbers not yet supported for multicolumns")
     else:
         savetxt(ASCIIfile, dat, fmt='%9i')
     ASCIIfile.close()
def gen_IC(sigma,rn,outfile="workfile",icdir="ICs", M=5, N=50, lapfile="Laplacian.txt", tries=10, iclist=[]):
   lap = loadtxt(lapfile)
   spa = sparse.csr_matrix(lap)
   success=0
   attempts=0
   while success==0 and attempts<tries:
     try:
	tag='s%.2fr%.3d'%(sigma,rn)
	tag=tag.replace(".", "")

	parameters = [35.0, 16.0, 9.0, 0.4, 0.12, sigma]
	x0=10*(random.random(2*N)-0.5)

	tic=time.time()
	trajectory = integrate.odeint(mimura, x0, range(0,1000), args=(parameters,spa))
	print "integration took", time.time()-tic, "seconds"

	x1=trajectory[-1]

	sol=fsolve(mimura3, x1, args=(parameters,spa),full_output=True)
	x2=sol[0]
	if x2 not in iclist:
	    savetxt(icdir+'/init_cond_'+tag+'.txt',x2)
	    write_mimu(lap,par=parameters,ic=x2,outfile=outfile)
    	    iclist.append(x2)
	    success=1
	tries+=1
     except: pass
   return iclist
def TSP(stops, Alg, steps, param, seed = None,
                      coordfile = 'xycoords.txt'):
    '''A wrapper function that attempts to optimize the traveling 
    salesperson problem using a specified algorithm. If coordfile
    exists, a preexisting set of coordinates will be used. Otherwise,
    a new set of "stops" coordinates will be generated for the person to 
    traverse, and will be written to the specified file.'''
    
    ## Create the distance matrix, which will be used to calculate
    ## the fitness of a given path
    if os.path.isfile(coordfile):
        coords = scipy.genfromtxt(coordfile)
        distMat = DistanceMatrix(coords)
    else:
        distMat = GenerateMap(stops, fname = coordfile, seed = seed)

    if Alg == 'HC':
        ## param is the number of solutions to try per step
        bestSol, fitHistory = HillClimber(steps, param, distMat, seed)
    elif Alg == 'SA':
        ## param is a placeholder
        bestSol, fitHistory = SimulatedAnnealing(steps, param, distMat, seed)
    elif Alg == 'MC3':
        ## param is the number of chains
        bestSol, fitHistory = MCMCMC(steps, param, distMat, seed)
    elif Alg == 'GA':
        ## param is the population size
        bestSol, fitHistory = GeneticAlgorithm(steps, param, distMat, seed)
    else:
        raise ValueError('Algorithm must be "HC", "SA", "MC3", or "GA".')

    outfname = coordfile + '-' + Alg + '-' + str(steps) + '-' + str(param) + '.txt'
    scipy.savetxt(outfname, scipy.array(bestSol), fmt = '%i')
    return bestSol, fitHistory
Example #13
0
def create_histogram(parameter_name, nbins=100, writeFile=True, skipfirst=0, truncate=False, smooth=False):
	"""
	Returns a histogram and some statistics about this parameter.
		
	@param writeFile: if true, write the histogram to paramname.histogram
	"""
	f = "%s-chain-0.prob.dump" % parameter_name
	values = numpy.recfromtxt(f)[skipfirst::nevery]

	statistics = {
		'min':   float(values.min()),
		'max':   float(values.max()),
		'stdev': float(values.std()),
		'mean':  float(values.mean()),
		'median':float(numpy.median(values)),
		'q1':    float(scipy.stats.scoreatpercentile(values, 25)),
		'q3':    float(scipy.stats.scoreatpercentile(values, 75)),
		'p5':    float(scipy.stats.scoreatpercentile(values, 5)),
		'p95':    float(scipy.stats.scoreatpercentile(values, 95)),
	}
	
	hist = scipy.histogram(values, bins = nbins if not smooth else nbins*10, normed=True)
	histwithborders = numpy.dstack([hist[1][0:nbins], hist[1][1:nbins+1], hist[0]])
	if writeFile:
		scipy.savetxt('%s.histogram' % parameter_name, histwithborders[0], delimiter="\t")
	return histwithborders[0], statistics
Example #14
0
    def test_networkx_matrix(self):
        print('\n---------- Matrix Test Start -----------\n')

        g = nx.barabasi_albert_graph(30, 2)
        nodes = g.nodes()
        edges = g.edges()
        print(edges)

        mx1 = nx.adjacency_matrix(g)
        fp = tempfile.NamedTemporaryFile()
        file_name = fp.name
        sp.savetxt(file_name, mx1.toarray(), fmt='%d')

        # Load it back to matrix
        mx2 = sp.loadtxt(file_name)
        fp.close()

        g2 = nx.from_numpy_matrix(mx2)
        cyjs_g = util.from_networkx(g2)

        #print(json.dumps(cyjs_g, indent=4))

        self.assertIsNotNone(cyjs_g)
        self.assertIsNotNone(cyjs_g['data'])
        self.assertEqual(len(nodes), len(cyjs_g['elements']['nodes']))
        self.assertEqual(len(edges), len(cyjs_g['elements']['edges']))

        # Make sure all edges are reproduced
        print(set(edges))
        diff = compare_edge_sets(set(edges), cyjs_g['elements']['edges'])
        self.assertEqual(0, len(diff))
def make_B(A0, Ak, G0, Gk, phi):
    x = 0
    y = 1
    z = 2

    B = scipy.zeros((6,6))
    B[0, 0] = cos(phi)**2.0*1
    B[0, 1] = sin(phi)**2.0
    B[0, 3] = sin(2.0 * phi)*1
    B[1, 0] = (A0[0, 0] * sin(phi)**2.0 + (A0[1, 0] - Ak[1, 0]) * cos(phi)**2.0) / Ak[1, 1]
    print B[1,0], A0[0,0]/Ak[1,1]
    B[1, 1] = (A0[1, 1] * cos(phi)**2.0 + (A0[1, 0] - Ak[1, 0]) * sin(phi)**2.0) / Ak[1, 1]
    B[1, 2] = (A0[0, 2] * sin(phi)**2.0 + A0[1, 2] * cos(phi)**2.0 - Ak[1, 2]) / Ak[1, 1]*1
    B[1, 3] = -sin(2.0 * phi) * (2.0 * G0[0, 1] + Ak[1, 0]) / Ak[1, 1]*1
    B[2, 2] = 1.0*1
    B[3, 0] = sin(2.0 * phi) * (A0[1, 0] - A0[0, 0]) / (4.0 * Gk[x, y])*1
    B[3, 1] = sin(2.0 * phi) * (A0[1, 1] - A0[0, 1]) / (4.0 * Gk[x, y])*1
    B[3, 2] = sin(2.0 * phi) * (A0[1, 2] - A0[0, 2]) / (4.0 * Gk[x, y])*1
    B[3, 3] = cos(2.0 * phi) * G0[0, 1] / Gk[x, y]*1
    B[4, 4] = cos(phi) * G0[1, 2] / Gk[y, z]*1
    B[4, 5] = -sin(phi) * G0[0, 2] / Gk[y, z]*1
    B[5, 4] = sin(phi)*1
    B[5, 5] = cos(phi)*1

    scipy.savetxt("B1", B)
    return scipy.matrix(B)
Example #16
0
def CalculateProjectRg(ProjectInfo,Output,returnRgs = False):
    """
    Calculate Radius of gyration for the Project ie. all the Trajectories.
    ProjectInfo: ProjectInfo.h5 file.
    Output: output file (XXX.dat). 
    The Output default will be set in the scripts and it is './Rgs.dat'.
    """
    Output = checkoutput(Output)
   
    if not isinstance(ProjectInfo,str):
	print "Please input the Path to ProjectInfo.h5"
	raise IOError
    print 'Calculating the Rg for each trajectory......' 
    ProjectInfoPath = '/'.join(os.path.realpath(ProjectInfo).split('/')[:-1])
    os.chdir(ProjectInfoPath)
    Trajfiles = []
    ProjectInfo = Serializer.LoadFromHDF(ProjectInfo)
    for i in range(ProjectInfo['NumTrajs']): 
	Trajfiles.append(ProjectInfo['TrajFilePath']+ProjectInfo['TrajFileBaseName']+'%d'%i+ProjectInfo['TrajFileType'])
    Rgs = computeRg(Trajfiles)
    
    print "Save data to %s"%Output
    savetxt(Output,Rgs)
    print "Done."
    if returnRgs:
	return Rgs
Example #17
0
File: QTR.py Project: xkronosua/QTR
	def Save(self):
		'''Збереження активного масиву до текстового файлу'''
		Dict = {'cSave' : 0, 'sSave' : 1, 'rSave' : 2}
		senderName = self.sender().objectName()
		active = Dict[senderName]
		data = self.getData(active)
		filename = QtGui.QFileDialog.getSaveFileName(self,'Save File', self.Root)
		if filename:
			sp.savetxt(str(filename), data)
Example #18
0
def RecordOverlap(overlap, outdir, T, binary):
    """ 
    Output overlap (in time) to data file. 
    """
    path = os.path.dirname(os.path.realpath(__file__)) + "/" + outdir + "/overlap" + str(T) + ".dat"
    if binary:
        sp.save(path, overlap)
    else:
        sp.savetxt(path, overlap)
Example #19
0
    def save_MBAR(self):
	"""save results (BICePs score and population) from MBAR analysis"""
	print 'Writing %s...'%self.BSdir
	savetxt(self.BSdir, self.f_df)
	print '...Done.'

	print 'Writing %s...'%self.popdir
	savetxt(self.popdir, self.P_dP)
	print '...Done.'
Example #20
0
def RecordFidelity(fidelity, outdir, binary):
    """
    Output fidelity to data file. 
    """
    path = os.path.dirname(os.path.realpath(__file__)) + "/" + outdir + "/fidelity.dat"
    if binary:
        sp.save(path, fidelity)
    else:
        sp.savetxt(path, fidelity)
Example #21
0
def RecordEigSpec(eigspec, outdir, binary):
    """
    Output eigenspectrum to data file. 
    """
    eigpath = os.path.dirname(os.path.realpath(__file__)) + "/" + outdir + "/eigenspectrum.dat"
    if binary:
        sp.save(eigpath, eigspec)
    else:
        sp.savetxt(eigpath, eigspec)
Example #22
0
def RecordProbs(bitstring, density, fname, rpath, outinfo):
    """
    Record the final-state probabilities.
    """
    path = rpath + outinfo["outdir"] + "/" + fname
    if outinfo["binary"]:
        sp.save(path, density)
    else:
        sp.savetxt(path, density)
Example #23
0
    def save_coeffs(self, filename, k_coeff):
        """
        This will save the numpy array to a csv file.  Will work with any numpy array.

        :param filename: <string> The name and path of the file to be saved
        :param k_coeff: <numpy> The array to be saved
        return:
        """
        lg.info('Writing k_coeffs to file :: ' + filename)
        scipy.savetxt(filename, k_coeff, delimiter=",")
Example #24
0
def displayArrayInExcel(a):
	import os, tempfile
	from win32com.client import Dispatch
	# write to a csv file
	(fileno, filename) = tempfile.mkstemp(suffix=".csv")
	os.close(fileno)
	scipy.savetxt(filename, a, fmt='%f', delimiter=',')
	# start excel
	xl = Dispatch('Excel.Application')
	wb = xl.Workbooks.Open(filename)
	xl.Visible = 1
    def remote_agreement_debug(self, fingerprint_debug, hash, delta):
        """THIS IS A DEBUG FUNCTION
        using the fingerprint from the client
        Using this means NO security!
        
        8. Key Agreement on Server
        generates fingerprint and decommits
        using received ``hash`` and ``delta``
        
        :param hash: SHA-512 Hash of codeword c
        :type hash: str
        :param delta: difference
        :type delta: list
        """
        log.info('8. Key Agreement on Server')
        
        #===============================================================================
        # Fingerprinting and Fuzzy Cryptography
        #===============================================================================       
        # generate fingerprint, not used see possible fingerprints
        self.fingerprint = fingerprint_energy_diff.get_fingerprint(self.recording_data, self.recording_samplerate)
        
        # save fingerprint for debugging
        scipy.savetxt("server_fingerprint.txt", self.fingerprint)

        log.debug('Bob fingerprint:\n'+str(self.fingerprint))
        
        # get possible fingerprints
        possible_fingerprints = get_possible_fingerprints(self.recording_data, self.recording_samplerate)
        
        # DEBUG
        length = len(fingerprint_debug)
        
        distances = []
        
        for fingerprint in possible_fingerprints:
            # calculate hamming distance between fingerprints
            distance = hamming_distance(fingerprint, fingerprint_debug)
            print('Distance: '+str(distance)+' of '+str(length))
            print('Correlation percentage: '+str(1-float(distance)/float(length)))
            distances += [distance]
        
        min_distance = min(distances)
        min_correlation = 1-float(min(distances))/float(length)
        print('Minimal distance: '+str(min_distance)+' of '+str(length))
        print('Minimal correlation percentage: '+str(min_correlation))
        
        try:
            minimals = scipy.genfromtxt(self.debug_file)
        except Exception, err:
            log.error('%s' % str(err))
            print('first time, so creating minimals')
            minimals = scipy.array([])
Example #26
0
def simpleCicada(sound,learningRate=0.5,waitDur=(0.,0.5),
    initialPitch=880.,minPitchIn=0.,maxPitchIn=2000.,
    minPitchOut=880.,maxPitchOut=1760.):
    """
    If a pitch is heard, change myPitch according to
        myPitch *= (heardPitch/myPitch)**learningRate.

    Between sounds, wait a random time in the range given
    by waitDur (in seconds).

    Returns list of heard pitches and list of played pitches
    (in Hz).
    """
    myPitch = initialPitch
    heardPitchList,myPitchList = [],[]

    # measure pitch of audio sample
    samplePitch = detectPitch(sound)
    print "samplePitch = %.1f"%samplePitch

    try:
        while True:
            # listen
            heardPitch = detectPitch(audioInput())

            # change
            if (heardPitch > minPitchIn) and (heardPitch < maxPitchIn):
                mappedPitch = mapToInterval(heardPitch,
                                minPitchOut,maxPitchOut)
                myPitch *= (mappedPitch/myPitch)**learningRate
            else:
                mappedPitch = 0.
            print "heardPitch = %.1f, mappedPitch = %.1f, myPitch = %.1f"%(heardPitch,mappedPitch,myPitch)

            # sing
            #playTone(myPitch,dur)
            playSound(sound,samplePitch,myPitch)

            # log
            curTime = time.time()
            heardPitchList.append([curTime,heardPitch])
            myPitchList.append([curTime,myPitch])

            # wait
            dur = waitDur[0] \
                + (waitDur[1]-waitDur[0])*scipy.random.rand()
            time.sleep(dur)

    except KeyboardInterrupt:
        # save data to file
        prefix = str(os.getpid())
        scipy.savetxt(prefix+'pitchList.txt',myPitchList)
        return heardPitchList,myPitchList
Example #27
0
    def write_gdf(filename, gdf):
        """Writes GDF data file.

        :type filename: str
        :param filename: valid path on the filesystem
        :type gdf: dict or ndarray
        :param gdf: either a dict mapping unit ids to spike trains or a
            ndarray with the keys in the first column and the samples in the
            second column"""

        if isinstance(gdf, dict):
            gdf = GdfFile.convert_dict_to_matrix(gdf)
        sp.savetxt(filename, gdf, fmt='%05d %d')
Example #28
0
def StateOverlapOutput(t, outinfo, psi):
    """
    Output the overlap with psi and a specified state at some timestep.
    """
    # Fix up probabilities
    idx = sp.array(outinfo["stateoverlap"], dtype=int)[:, 0]
    probs = sp.power(abs(psi[idx]), 2).ravel()
    # Write to file
    fname = outinfo["outdir"] + "/state_overlap_T" + str(t) + ".txt"
    if outinfo["binary"]:
        sp.save(fname, probs)
    else:
        sp.savetxt(fname, probs)
Example #29
0
    def Report():
        imgname = 'polymethodsgrowth.png'
        slopename = 'slopes.txt'
        slopes = sp.zeros([len(Experiment.runs), len(Experiment.conc)])
        i = 0
        j = 0
        pyplot.figure(1)
        for sim, fname in Experiment.runs:
            for conc in Experiment.conc[0:6]:
                print 'Concentration: ', str(conc)
                fname_conc = fname + '_c=' + str(conc)
                pth = os.path.join(Experiment.datafolder, fname_conc)
                record = SimulationRecord(pth, sim, 'r')
                recon = TimeReconstruction(record)
                data = recon.length_vs_time()
                if fname == 'processive.txt':
                    color = 'r'
                elif fname == 'non_processive.txt':
                    color = 'g'
                else:
                    color = 'b'
                pyplot.plot(data[0], data[1], color)
                (ar,br)=polyfit(data[0],data[1],1)
                slopes[i, j] = ar
                j += 1
            j = 0
            i += 1

        
        # length versus time
        pyplot.legend(('Processive', 'Non-processive', 'Detach'),
                      'upper left', shadow=True, fancybox=False)
        pyplot.xlabel('Time (s)')
        pyplot.ylabel('Filament length')
        pyplot.savefig(os.path.join(Experiment.datafolder, imgname))
        slope_fname = os.path.join(Experiment.datafolder, slopename)
        sp.savetxt(slopename, slopes)

        # slope vs concentration
        pyplot.figure(2)
        pyplot.plot(Experiment.conc, slopes[0])
        pyplot.plot(Experiment.conc, slopes[1])
        pyplot.plot(Experiment.conc, slopes[2])
        pyplot.legend(('Processive', 'Non-processive', 'Detach'),
                      'upper left', shadow=True, fancybox=False)
        # pyplot.xscale('log')
        pyplot.xlabel('Concentration')
        pyplot.ylabel('Average Growth Rate')
        
        pyplot.show()
        return slopes
Example #30
0
def run(steps=40):
    pk = uuid4()
    for i in range(steps):
        Q.time_step()
        if i%10 == 0:
            print i*1./steps
        Q.plot_links()
        py.xlim((-25,25))
        py.ylim((-25,config['YSIZE']))
        py.draw()

    xyfile = os.path.join(clusterdir,str(steps),str(pk) + ".gz")
    #Q.save('clusters/40/{:08d}.dat'.format(pk))
    sp.savetxt(xyfile,Q.get_pos_arr(force=True))
Example #31
0
    def saveConfusion(self):
        """
        Open window and save confusion shown in qtableview
        """
        fileName = QFileDialog.getSaveFileName(self.dockwidget,
                                               "Select output file",
                                               self.lastSaveDir, "CSV (*.csv)")
        self.rememberLastSaveDir(fileName)
        fileName, fileExtension = os.path.splitext(fileName)
        if fileExtension != '.csv':
            fileName = fileName + '.csv'

        # save to CSV
        try:
            sp.savetxt(fileName,
                       self.lastConfusionMatrix,
                       delimiter=',',
                       fmt='%1.4d')
        except:
            QtGui.QMessageBox.warning(
                self, 'Missing confusion matrix ? ',
                'Cannot save confusion matrix. Are you sure to have generated it before ?',
                QtGui.QMessageBox.Ok)
Example #32
0
    def to_file(self, file_name, out_vars=None, separator=', '):
        """
        Output the given variables to a file.

        file_name   Name of the file to use for output (will be overwritten!)
        out_vars    List of variable ids ot output. If None, default is
                    'time', dynamic variables
        separator   The separator to use between columns.
        """
        if out_vars is None:
            out_vars = ['time'] + self.dynamicVarKeys

        first_line = separator.join(out_vars) + os.linesep
        f = open(file_name, 'w')
        f.write(first_line)

        out_array = []
        for var in out_vars:
            out_array.append(self.get_var_traj(var))

        out_array = scipy.transpose(out_array)
        scipy.savetxt(f, out_array, delimiter=separator)
        f.close()
Example #33
0
def analize_graphs(N,
                   xfile,
                   y1file,
                   y2file,
                   y3file,
                   maxfile="max_filename.txt",
                   medfile="medium_filename.txt",
                   minfile="min_filename.txt",
                   srcfile="screenshot.png"):
    x = scipy.genfromtxt(xfile, delimiter=', ')
    x = x[:N]
    y1 = scipy.genfromtxt(y1file, delimiter=', ')
    y1 = y1[:N]
    y2 = scipy.genfromtxt(y2file, delimiter=', ')
    y2 = y2[:N]
    y3 = scipy.genfromtxt(y3file, delimiter=', ')
    y3 = y3[:N]

    plt.rcParams["figure.figsize"] = (20, 10)
    l = plt.subplot(121)  # numrows, numcols, fignum

    l.plot(x, y1, c="red")
    l.plot(x, y2, c="green")
    l.plot(x, y3, c="blue")
    y1_patch = mpatches.Patch(color='red', label="y1")
    y2_patch = mpatches.Patch(color='green', label="y2")
    y3_patch = mpatches.Patch(color='blue', label="y3")
    plt.legend(handles=[y1_patch, y2_patch, y3_patch])

    plt.xlabel("Abscissa")
    plt.ylabel("Ordinate")
    plt.title("Graph")

    mx = scipy.maximum(y1, y2)
    mx = scipy.maximum(mx, y3)
    mn = scipy.minimum(y1, y2)
    mn = scipy.minimum(mn, y3)
    av = scipy.average([y1, y2, y3], axis=0)

    r = plt.subplot(122)
    r.plot(x, mn, c="blue")
    r.plot(x, mx, c="red")
    r.plot(x, av, c="green")
    y1_patch = mpatches.Patch(color='red', label="Maximum")
    y2_patch = mpatches.Patch(color='green', label="Average")
    y3_patch = mpatches.Patch(color='blue', label="Minimum")
    plt.legend(handles=[y1_patch, y2_patch, y3_patch])
    plt.xlabel("Abscissa")
    plt.ylabel("Ordinate")
    plt.title("Graph")

    scipy.savetxt(maxfile, mx, delimiter=", ")
    scipy.savetxt(minfile, mn, delimiter=", ")
    scipy.savetxt(medfile, av, delimiter=", ")
    plt.savefig(srcfile)
    plt.show()
Example #34
0
def main():
    # Do not modify
    start = time.time()

    parser = argparse.ArgumentParser(
        description='Build document-term vectors.')
    parser.add_argument('textfile',
                        type=str,
                        help='name of text file with documents on each line')
    parser.add_argument('threshold', type=int, help='term minimum frequency')
    parser.add_argument('--ndims',
                        type=int,
                        default=100,
                        help='number of SVD dimensions')
    parser.add_argument('--debug',
                        type=bool,
                        default=False,
                        help='debug mode?')
    args = parser.parse_args()

    terms, points = tfidf_docterm(args.textfile + '.txt', args.threshold)
    print 'Estimated document-term TF-IDF vectors'
    if not args.debug:  # compute PPMI and SVD before writing. if debug is True, just write the count vectors
        points = dimensionality_reduce(points, args.ndims)
        print 'Reduced dimensionality'
        outfile = args.textfile + '.tfidf' + '.thresh' + str(args.threshold)
    else:
        outfile = args.textfile + '.tfidf' + '.thresh' + str(
            args.threshold) + '.todebug'

    with open(outfile + '.dims', 'w') as o:
        o.write('\n'.join(terms) + '\n')
    scipy.savetxt(outfile + '.vecs', points, fmt='%.4e')
    print 'Saved to file'

    print time.time() - start, 'seconds'
Example #35
0
def saveTo(array, filename, directory=None, extention=".out", verbose=True):
    """
    Save an array to the current working directory or directory of the
    invoked script.

    Parameters
    ----------
    array: numpy array
        An array to be saved
    filename: str
        Filename of the saved array
    directory: str
        Path of the directory the array is saved in. Defaults to the
        directory of the invoked script.
    verbose: bool
        If True, print the full path of the saved file.

    Returns
    -------
    None

    """
    # Set directory of the invoked script as default
    if directory is None:
        directory = os.getcwd()
        if os.path.basename(directory) == "Notepad++":
            directory = os.path.dirname(sys.argv[0])

    # Create the directory if it does not exist
    if not os.path.exists(directory):
        os.makedirs(directory)

    full_path = os.path.abspath(
        os.path.join(directory, "{}.out".format(filename,extention)))

    scipy.savetxt(full_path, array, delimiter=" ", fmt="%i")
Example #36
0
def create_histogram(parameter_name,
                     nbins=100,
                     writeFile=True,
                     skipfirst=0,
                     truncate=False,
                     smooth=False):
    """
	Returns a histogram and some statistics about this parameter.
		
	@param writeFile: if true, write the histogram to paramname.histogram
	"""
    f = "%s-chain-0.prob.dump" % parameter_name
    values = numpy.recfromtxt(f)[skipfirst::nevery]

    statistics = {
        'min': float(values.min()),
        'max': float(values.max()),
        'stdev': float(values.std()),
        'mean': float(values.mean()),
        'median': float(numpy.median(values)),
        'q1': float(scipy.stats.scoreatpercentile(values, 25)),
        'q3': float(scipy.stats.scoreatpercentile(values, 75)),
        'p5': float(scipy.stats.scoreatpercentile(values, 5)),
        'p95': float(scipy.stats.scoreatpercentile(values, 95)),
    }

    hist = scipy.histogram(values,
                           bins=nbins if not smooth else nbins * 10,
                           normed=True)
    histwithborders = numpy.dstack(
        [hist[1][0:nbins], hist[1][1:nbins + 1], hist[0]])
    if writeFile:
        scipy.savetxt('%s.histogram' % parameter_name,
                      histwithborders[0],
                      delimiter="\t")
    return histwithborders[0], statistics
Example #37
0
def PlotMultipleRuns(Alg, nruns=20, fname=None):
    '''Plot "nruns" runs of a given algorithm to show performance
    and variability across runs.'''
    if fname:
        runs = scipy.genfromtxt(fname)
    else:
        runs = []
        for i in range(nruns):
            bestSol, fitHistory = tsp.TSP(200,
                                          Alg,
                                          3000,
                                          30,
                                          seed=None,
                                          coordfile='tmp.txt')
            runs.append(fitHistory)
        fname = 'MultRuns-' + str(Alg) + '.txt'
        runs = scipy.array(runs)
        scipy.savetxt(fname, runs)

    # plotting
    Xs = scipy.linspace(0, runs.shape[1] * 1000, runs.shape[1])
    for i in range(runs.shape[0]):
        pl.plot(Xs, runs[i, :])
    pl.show()
Example #38
0
    def plot_xy_data(self, in_dir, genotype):
        """
		"""

        plots_dir = os.path.join(in_dir, '_postures')

        for iP, posture in enumerate(self.posture_names):
            filename = os.path.join(plots_dir, '_xys',
                                    '%s_laser_x_%s.txt' % (genotype, posture))
            sp.savetxt(filename, self.laser_xs[iP])
            filename = os.path.join(plots_dir, '_xys',
                                    '%s_laser_y_%s.txt' % (genotype, posture))
            sp.savetxt(filename, self.laser_ys[iP])
            filename = os.path.join(plots_dir, '_xys',
                                    '%s_wall_x_%s.txt' % (genotype, posture))
            sp.savetxt(filename, self.wall_xs[iP])
            filename = os.path.join(plots_dir, '_xys',
                                    '%s_wall_y_%s.txt' % (genotype, posture))
            sp.savetxt(filename, self.wall_ys[iP])

        # Plot wall y-distribution; aggregate all postures (L and R leg)
        wall_data = []
        laser_data = []
        for iP in range(len(self.posture_names)):
            wall_data.extend(self.wall_ys[iP])
            laser_data.extend(self.laser_ys[iP])

        filename = os.path.join(plots_dir, '_xys', '%s_y_hist.png' % genotype)
        hist, bins = sp.histogram(wall_data,
                                  bins=sp.linspace(0, 2.1, 20),
                                  density=True)
        fig = plt.figure()
        fig.set_size_inches(3, 3)
        plt.plot(bins[:-1], hist, color='b')
        hist, bins = sp.histogram(laser_data,
                                  bins=sp.linspace(0, 2.1, 20),
                                  density=True)
        plt.plot(bins[:-1], hist, color='r')
        plt.tight_layout()
        plt.savefig(filename)
def save_data_maps(map_coords, data_coords, aper_map, data_dict, density):
    r"""
    Converts the raw paraview point data into a 2-D data distribution and
    saves the file by appending to the base_name.
    """
    #
    # generating p field
    logger.info('generating and saving pressure field...')
    field = data_dict['p'] * density  # openFoam outputs kinematic pressure
    field = griddata(data_coords, field, map_coords, method='nearest')
    field = sp.reshape(field, aper_map.data_map.shape[::-1])
    sp.savetxt(base_name + '-p-map.txt', field.T, delimiter='\t')
    #
    # generating Ux -> Qx field
    logger.info('generating and saving Qx field...')
    field = data_dict['u:0']
    field = griddata(data_coords, field, map_coords, method='nearest')
    field = sp.reshape(field, aper_map.data_map.shape[::-1])
    field = field * aper_map.data_map.T * voxel_size**2
    sp.savetxt(base_name + '-qx-map.txt', field.T, delimiter='\t')
    #
    # generating Uz -> Qz field
    logger.info('generating and saving Qz field...')
    field = data_dict['u:2']
    field = griddata(data_coords, field, map_coords, method='nearest')
    field = sp.reshape(field, aper_map.data_map.shape[::-1])
    field = field * aper_map.data_map.T * voxel_size**2
    sp.savetxt(base_name + '-qz-map.txt', field.T, delimiter='\t')
    #
    # generating Um -> Qm field
    logger.info('generating and saving Q magnitude field...')
    field = sp.sqrt(data_dict['u:0']**2 + data_dict['u:2']**2)
    field = griddata(data_coords, field, map_coords, method='nearest')
    field = sp.reshape(field, aper_map.data_map.shape[::-1])
    field = field * aper_map.data_map.T * voxel_size**2
    sp.savetxt(base_name + '-qm-map.txt', field.T, delimiter='\t')
Example #40
0
def check_annotation(CFG, genes):
    
    if CFG['verbose']:
        print '\n... checking annotation'

    ### check whether genes have no exons annotated
    rm_ids = []
    for gene in genes:
        if len(gene.exons) == 0:
            rm_ids.append(gene.name)
    if len(rm_ids) > 0:
        print >> sys.stderr, 'WARNING: removing %i genes from given annotation that had no exons annotated:' % len(rm_ids)
        print >> sys.stderr, 'list of excluded genes written to: %s' % (CFG['anno_fname'] + '.genes_excluded_no_exons')
        sp.savetxt(CFG['anno_fname'] + '.genes_excluded_no_exons', rm_ids, fmt='%s', delimiter='\t')
        gene_names = sp.array([x.name for x in genes], dtype='str')
        k_idx = sp.where(~sp.in1d(gene_names, rm_ids))[0]
        genes = genes[k_idx]

    ### check whether we run unstranded analysis and have to exclude overlapping gene annotations
    ### TODO: make this also work for stranded analysis and only exclude genes overlapping on the same strand
    rm_ids = []
    chrms = sp.array([x.chr for x in genes])
    starts = sp.array([x.start for x in genes], dtype='int')
    stops = sp.array([x.stop for x in genes], dtype='int')
    for c in sp.unique(chrms):
        c_idx = sp.where(chrms == c)[0]
        for i in c_idx:
            if sp.sum((starts[i] <= stops[c_idx]) & (stops[i] >= starts[c_idx])) > 1:
                rm_ids.append(genes[i].name)
    if len(rm_ids) > 0:
        rm_ids = sp.unique(rm_ids)
        print >> sys.stderr, 'WARNING: removing %i genes from given annotation that overlap to each other:' % rm_ids.shape[0]
        print >> sys.stderr, 'list of excluded genes written to: %s' % (CFG['anno_fname'] + '.genes_excluded_gene_overlap')
        sp.savetxt(CFG['anno_fname'] + '.genes_excluded_gene_overlap', rm_ids, fmt='%s', delimiter='\t')
        gene_names = sp.array([x.name for x in genes], dtype='str')
        k_idx = sp.where(~sp.in1d(gene_names, rm_ids))[0]
        genes = genes[k_idx]

    ### check whether exons are part of multiple genes
    exon_map = dict()
    for i, g in enumerate(genes):
        for t in range(len(g.exons)):
            for e in range(g.exons[t].shape[0]):
                k = frozenset(g.exons[t][e, :])
                if k in exon_map:
                    exon_map[k].append(g.name)
                else:
                    exon_map[k] = [g.name]
    rm_ids = []
    for exon in exon_map:
        if sp.unique(exon_map[exon]).shape[0] > 1:
            rm_ids.extend(exon_map[exon])
    if len(rm_ids) > 0:
        rm_ids = sp.unique(rm_ids)
        print >> sys.stderr, 'WARNING: removing %i genes from given annotation that share exact exon coordines:' % rm_ids.shape[0]
        print >> sys.stderr, 'list of excluded exons written to: %s' % (CFG['anno_fname'] + '.genes_excluded_exon_shared')
        sp.savetxt(CFG['anno_fname'] + '.genes_excluded_exon_shared', rm_ids, fmt='%s', delimiter='\t')
        gene_names = sp.array([x.name for x in genes], dtype='str')
        k_idx = sp.where(~sp.in1d(gene_names, rm_ids))[0]
        genes = genes[k_idx]

    ### check whether exons within the same transcript overlap
    rm_ids = []
    for i, g in enumerate(genes):
        for t in range(len(g.exons)):
            for e in range(g.exons[t].shape[0] - 1):
                if sp.any(g.exons[t][e+1:, 0] < g.exons[t][e, 1]):
                    rm_ids.append(g.name)
    if len(rm_ids) > 0:
        rm_ids = sp.unique(rm_ids)
        print >> sys.stderr, 'WARNING: removing %i genes from given annotation that have at least one transcript with overlapping exons.' % rm_ids.shape[0]
        print >> sys.stderr, 'list of excluded genes written to: %s' % (CFG['anno_fname'] + '.genes_excluded_exon_overlap')
        sp.savetxt(CFG['anno_fname'] + '.genes_excluded_exon_overlap', rm_ids, fmt='%s', delimiter='\t')
        gene_names = sp.array([x.name for x in genes], dtype='str')
        k_idx = sp.where(~sp.in1d(gene_names, rm_ids))[0]
        genes = genes[k_idx]

    ### do we have any genes left?
    if genes.shape[0] == 0:
        print >> sys.stderr, '\nERROR: there are no valid genes left in the input. Please verify correctnes of input annotation.\n'
        sys.exit(1)

    return genes
Example #41
0
sp.random.seed(3)  # to reproduce the data later on

x = sp.arange(1, 31 * 24)
print x
y = sp.array(200 * (sp.sin(2 * sp.pi * x / (7 * 24))), dtype=int)
y += gamma.rvs(15, loc=0, scale=100, size=len(x))
y += 2 * sp.exp(x / 100.0)
y = sp.ma.array(y, mask=[y < 0])
print(sum(y), sum(y < 0))

plt.scatter(x, y)
plt.title("Web traffic over the last month")
plt.xlabel("Time")
plt.ylabel("Hits/hour")
plt.xticks([w * 7 * 24 for w in [0, 1, 2, 3, 4]],
           ['week %i' % (w + 1) for w in [0, 1, 2, 3, 4]])

plt.autoscale(tight=True)
plt.grid()
plt.savefig(os.path.join("..", "1400_01_01.png"))

data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..",
                        "data")

# sp.savetxt(os.path.join("..", "web_traffic.tsv"),
# zip(x[~y.mask],y[~y.mask]), delimiter="\t", fmt="%i")
sp.savetxt(os.path.join(data_dir, "web_traffic.tsv"),
           list(zip(x, y)),
           delimiter="\t",
           fmt="%s")
Example #42
0
#!/usr/bin/env python

import argparse, sys, re, scipy

parser = argparse.ArgumentParser(description="Compute a delay embedding")
parser.add_argument('input', metavar='input', help='data file')
parser.add_argument('dimension',
                    type=int,
                    help='dimension increase multiplier')
parser.add_argument('delay',
                    type=int,
                    help='delay size, given in sample count')
parser.add_argument('output', default=None, help='output file')
args = parser.parse_args()

dim = args.dimension
delay = args.delay

mat = scipy.loadtxt(args.input)
if mat.ndim < 2:
    mat.resize((len(mat), 1))

ret = scipy.concatenate(
    [mat[i * delay:-(dim - i) * delay, :] for i in range(dim)], 1)

if args.output:
    scipy.savetxt(args.output, ret)
else:
    print ret
import os,sys
from scipy import savetxt,loadtxt

energies = []
states = []
#conffile = '../gaussian_runs/angles_index.dat'
#conf = loadtxt(conffile)
#energy_confs = []
for i in range(100):
    logfile = 'outputs/%d.log'%i
    if os.path.exists(logfile):
        #print conf[i+1]
        fn = open(logfile,'r')
        lines = fn.readlines()
        while 'E(RHF)' not in lines[-1]:
            lines.pop(-1)
        energies.append(float(lines[-1].split()[4]))
    else:
        print "Warning: %s is missing."%logfile
energyfile='./cineromycinB_QM_energies.dat'
#energy_conffile='./Nae-ace-ndm_energyconfs.dat'
print "Wrote: %s"%energyfile
savetxt(energyfile,energies)

Example #44
0
def getxyzs(path, ampcor_label, rwin, awin, wsamp, input_res, input_width,
            input_length, ul_long, ul_lat, pair_label):

    cmd = "\nsed -i '/\*/d' " + path + "/ampcor_" + ampcor_label + ".off\n"
    subprocess.call(cmd, shell=True)

    indat = scipy.loadtxt(path + "/ampcor_" + ampcor_label + ".off")

    da_e = int(input_res)
    #az pixel size at earth surface, cm
    dr_g = int(input_res)
    #ground pixel size in range direction, cm

    x1ind = scipy.matrix([indat[:, 0]], scipy.int32).conj().transpose()
    dx = scipy.matrix([indat[:, 1]]).conj().transpose()
    y1ind = scipy.matrix([indat[:, 2]], scipy.int32).conj().transpose()
    dy = scipy.matrix([indat[:, 3]]).conj().transpose()
    snr = scipy.matrix([indat[:, 4]]).conj().transpose()
    c11 = scipy.matrix([scipy.sqrt(indat[:, 5])
                        ]).conj().transpose()  #1 sigma drng
    c22 = scipy.matrix([scipy.sqrt(indat[:, 6])
                        ]).conj().transpose()  #1 sigma dazo

    width0 = int(input_width)
    length0 = int(input_length)

    x1 = x1ind * dr_g
    dx = dx * dr_g
    y1 = y1ind * da_e
    dy = dy * da_e
    c11 = c11 * dr_g
    #1 sigma drng
    c22 = c22 * da_e
    #1 sigma dazo
    x2 = x1 + dx
    y2 = y1 + dy

    rlooks = int(rwin) / int(wsamp)
    alooks = int(awin) / int(wsamp)

    width1 = scipy.floor(width0 / rlooks)
    length1 = scipy.floor(length0 / alooks)
    [xg, yg] = scipy.meshgrid(scipy.arange(1, width1 + 1, 1),
                              scipy.arange(1, length1 + 1, 1))
    xg = xg * dr_g * rlooks / 1e5
    #convert from pix to km
    yg = yg * da_e * alooks / 1e5
    #convert from pix to km

    sigy_thresh = scipy.inf
    #cm
    sigx_thresh = scipy.inf
    #cm
    snr_thresh = 0
    #(not log10)
    mag_threshx = scipy.inf
    #cm
    mag_threshy = scipy.inf
    #cm

    #initial mask
    c22good = scipy.matrix(pylab.find(c22 < sigy_thresh)).conj().transpose()
    c11good = scipy.matrix(pylab.find(c11 < sigx_thresh)).conj().transpose()
    snrgood = scipy.matrix(pylab.find(snr > snr_thresh)).conj().transpose()

    good = scipy.matrix(
        scipy.unique(scipy.array(scipy.vstack(
            (snrgood, c11good, c22good))))).conj().transpose()

    x1good = x1[good].reshape(-1, 1)
    x1goodind = x1ind[good].reshape(-1, 1)
    y1good = y1[good].reshape(-1, 1)
    y1goodind = y1ind[good].reshape(-1, 1)
    x2good = x2[good].reshape(-1, 1)
    y2good = y2[good].reshape(-1, 1)

    #get and remove affine fit
    good2 = scipy.matrix(pylab.find(good < 300000)).conj().transpose()

    x1good = x1[good2].reshape(-1, 1)
    y1good = y1[good2].reshape(-1, 1)
    x2good = x2[good2].reshape(-1, 1)
    y2good = y2[good2].reshape(-1, 1)

    c0 = scipy.matrix(scipy.zeros((scipy.size(good2)))).reshape(-1, 1)
    c1 = scipy.matrix(scipy.ones((scipy.size(good2)))).reshape(-1, 1)
    n = c1.shape[0]

    A = scipy.vstack((scipy.hstack((x1good, y1good, c0, c0, c1, c0)),
                      scipy.hstack((c0, c0, x1good, y1good, c0, c1))))

    b = scipy.vstack((x2good, y2good))

    M = scipy.linalg.lstsq(A, b)[0]

    pred = A * M
    res = pred - b

    # std() in python defaults to 0 degrees of freedom
    resdev = res.std(axis=0, ddof=1)
    q = pylab.find(abs(res) < 1.5 * resdev)
    A1 = A[q, ]
    b1 = b[q]
    M = scipy.linalg.lstsq(A1, b1)[0]
    pred = A * M

    x1good = x1[good].reshape(-1, 1)
    x1goodind = x1ind[good].reshape(-1, 1)
    y1good = y1[good].reshape(-1, 1)
    y1goodind = y1ind[good].reshape(-1, 1)
    x2good = x2[good].reshape(-1, 1)
    y2good = y2[good].reshape(-1, 1)

    c0 = scipy.matrix(scipy.zeros((scipy.size(good)))).reshape(-1, 1)
    c1 = scipy.matrix(scipy.ones((scipy.size(good)))).reshape(-1, 1)
    n = c1.shape[0]

    A = scipy.vstack((scipy.hstack((x1good, y1good, c0, c0, c1, c0)),
                      scipy.hstack((c0, c0, x1good, y1good, c0, c1))))

    b = scipy.vstack((x2good, y2good))

    pred = A * M

    n = c1.shape[0]

    res = pred - b
    resdx = res[0:n]
    resdy = res[(n):(2 * n)]

    #remap into
    newx = scipy.matrix(scipy.ceil(x1goodind / rlooks), scipy.int32)
    newy = scipy.matrix(scipy.floor(y1goodind / alooks), scipy.int32)

    vind = scipy.asarray((newy - 1) * width1 + newx, scipy.int32).reshape(-1)

    temp = scipy.NaN * scipy.matrix(
        0 * (scipy.arange(1, length1 * width1 + 1, 1))).conj().transpose()
    temp[vind] = resdy
    dyg = temp.reshape(length1, width1)

    temp = scipy.NaN * scipy.matrix(
        0 * (scipy.arange(1, length1 * width1 + 1, 1))).conj().transpose()
    temp[vind] = resdx
    dxg = temp.reshape(length1, width1)

    #setup mask indicies
    newx = scipy.matrix(scipy.ceil(x1ind / rlooks), scipy.int32)
    newy = scipy.matrix(scipy.floor(y1ind / alooks), scipy.int32)
    vind = scipy.asarray((newy - 1) * width1 + newx, scipy.int32).reshape(-1)
    temp = scipy.NaN * scipy.matrix(scipy.arange(0, length1 * width1,
                                                 1)).conj().transpose()

    #sigma_y mask
    temp[vind] = c22
    sigyg = temp.reshape(length1, width1)
    mask_sigy = scipy.zeros(dyg.shape)
    mask_sigy[(sigyg > sigy_thresh)] = scipy.NaN

    #sigma_x mask
    temp = scipy.NaN * scipy.matrix(scipy.arange(0, length1 * width1,
                                                 1)).conj().transpose()
    temp[vind] = c11
    sigxg = temp.reshape(length1, width1)
    mask_sigx = scipy.zeros(dxg.shape)
    mask_sigx[(sigxg > sigx_thresh)] = scipy.NaN

    #SNR mask
    temp = scipy.NaN * scipy.matrix(scipy.arange(0, length1 * width1,
                                                 1)).conj().transpose()
    temp[vind] = snr
    snrg = temp.reshape(length1, width1)
    mask_snr = scipy.zeros(dyg.shape)
    mask_snr[(snrg < snr_thresh)] = scipy.NaN

    #mag mask y
    mask_magy = scipy.zeros(dyg.shape)
    mask_magy[abs(dyg) > mag_threshy] = scipy.NaN

    #mag mask x
    mask_magx = scipy.zeros(dxg.shape)
    mask_magx[abs(dxg) > mag_threshx] = scipy.NaN

    #final mask
    mask_total = mask_snr + mask_sigy + mask_magy
    bad = scipy.isnan(mask_total)
    dyg[bad] = scipy.NaN

    mask_total = mask_snr + mask_sigx + mask_magx
    bad = scipy.isnan(mask_total)
    dxg[bad] = scipy.NaN

    x_step = int(rwin) * int(input_res)
    y_step = int(awin) * int(input_res) * -1

    columns = scipy.arange(0, width1, 1)
    rows = scipy.arange(0, length1, 1)

    vect_utm_n = float(ul_lat) + rows * y_step
    vect_utm_e = float(ul_long) + columns * x_step

    [map_utm_e, map_utm_n] = scipy.meshgrid(vect_utm_e, vect_utm_n)
    # map_utm_n		= scipy.flipud(map_utm_n);  whyj

    cdir = os.getcwd()
    cdir = cdir[cdir.rfind("/") + 1:].strip()

    column_utm_n = map_utm_n.reshape(
        scipy.size(map_utm_n, 0) * scipy.size(map_utm_n, 1), 1)
    column_utm_e = map_utm_e.reshape(
        scipy.size(map_utm_e, 0) * scipy.size(map_utm_e, 1), 1)
    # column_dyg	= scipy.flipud(dyg).reshape(scipy.size(dyg,0)*scipy.size(dyg,1),1);
    # column_dxg	= scipy.flipud(dxg).reshape(scipy.size(dxg,0)*scipy.size(dxg,1),1);
    # column_snr	= scipy.flipud(snrg).reshape(scipy.size(snrg,0)*scipy.size(snrg,1),1);
    # column_c11      = scipy.flipud(sigxg).reshape(scipy.size(sigxg,0)*scipy.size(sigxg,1),1);
    # column_c22      = scipy.flipud(sigyg).reshape(scipy.size(sigyg,0)*scipy.size(sigyg,1),1);
    column_dyg = dyg.reshape(scipy.size(dyg, 0) * scipy.size(dyg, 1), 1)
    column_dxg = dxg.reshape(scipy.size(dxg, 0) * scipy.size(dxg, 1), 1)
    column_snr = snrg.reshape(scipy.size(snrg, 0) * scipy.size(snrg, 1), 1)
    column_c11 = sigxg.reshape(scipy.size(sigxg, 0) * scipy.size(sigxg, 1), 1)
    column_c22 = sigyg.reshape(scipy.size(sigyg, 0) * scipy.size(sigyg, 1), 1)
    azimuth_off = scipy.concatenate((column_utm_e, column_utm_n, column_dyg,
                                     column_snr, column_c11, column_c22),
                                    axis=1)
    range_off = scipy.concatenate((column_utm_e, column_utm_n, column_dxg,
                                   column_snr, column_c11, column_c22),
                                  axis=1)

    #	azimuth_off     = scipy.delete(azimuth_off, scipy.arange(0, len(azimuth_off))[sum(scipy.transpose(scipy.isnan(azimuth_off))) > 0], 0);
    #	range_off       = scipy.delete(range_off, scipy.arange(0, len(range_off))[sum(scipy.transpose(scipy.isnan(range_off))) > 0], 0);

    scipy.savetxt(path + "/" + pair_label + "_" + ampcor_label +
                  "_northxyz.txt",
                  azimuth_off,
                  delimiter=" ",
                  fmt="%.1f")
    scipy.savetxt(path + "/" + pair_label + "_" + ampcor_label +
                  "_eastxyz.txt",
                  range_off,
                  delimiter=" ",
                  fmt="%.1f")

    cmd = "\nsed -i '/a/d' " + path + "/" + pair_label + "_" + ampcor_label + "_eastxyz.txt\n"
    cmd += "\nsed -i '/a/d' " + path + "/" + pair_label + "_" + ampcor_label + "_northxyz.txt\n"
    subprocess.call(cmd, shell=True)

    #matplotlib.pyplot.imshow(scipy.array(dxg),interpolation='nearest',origin='lower');
    #matplotlib.pyplot.show();

    return
Example #45
0

    #print nnd

    sim.reset()

    #Create CSV, plots and interactive html/JS charts based on collected data
    np.savetxt(get_path(folder, "Msg_stats-%s.csv" % filename),
               message_stats,
               delimiter=",", fmt="%8.2f", comments='',
               header="Node,transmit,transmit_failed_power,"
                      "received,received_failed_power,"
                      "received_failed_loss,SNR", footer=net.name + "\n" + comments)

    sp.savetxt(get_path(folder, "Localization-%s.csv" % filename),
               position_stats,
               delimiter=",", fmt="%8.2f", comments='',
               header="Node,Xactual,Yactual,Xestimated,Yestimated,RMSerror")

    sp.savetxt(get_path(folder, "Localization2-%s.csv" % filename),
               position_stats2,
               delimiter=",", fmt="%8.2f", comments='',
               header="Node,Xactual,Yactual,Xestimated,Yestimated,RMSerror")


    # Create html/JS file for network Topology
    plotter.gethtmlScatter(xpositions, [anchpositions, positions, newpos],
                    fname=filename, folder=folder,
                    xlabel="X-coordinates", ylabel="Y-ccordinates", labels=['Anchor','Regular','Localized'],
                    title="Topology-"+filename, open=1, axis_range={'xmin':0, 'ymin':0, 'xmax': w, 'ymax': h},
                    comment=comments, show_range=int(node.commRange),report=comments+"<br><br>" + str(stats),
                    plot_options=["color: 'red', visible: false,", "color: 'blue',",
if args.Popmethod == 'retain_first_mle':
    Populations1 = get_populations(count_mtx=sparse1, method='mle')[0]
    #for testing
    #Populations1 = get_populations(count_mtx = sparse1, method = 'counts')
else:
    Populations1 = None

for i in range(numoftcounts):
    print "Calculate %d of %d tCounts" % (i, numoftcounts)
    sparse2 = mmread(bcounts[i])
    print sparse1.shape, sparse2.shape
    if args.Fix == True:
        fix = mmread(args.OCount)
        fix = fix / fix.sum()
        fix = fix.asformat('csr')
        for j in range(fix.shape[0]):
            for k in range(fix.shape[0]):
                if fix[j, k] != 0:
                    fix[j, k] = np.ceil(fix[j, k])
        fix = fix.asformat('coo')
        sparse2 = sparse2 + fix
    obj = SurprisalCalculator(sparse1,
                              sparse2,
                              pop_method=args.Popmethod,
                              init_guess1=Populations1)
    JSDs = obj.calculate_all_jsd()
    JSDs_bootstraps[i, :] = JSDs[:]
print len(JSDs_bootstraps)
savetxt(args.Output, JSDs_bootstraps)
print "Wrote:%s" % args.Output
Example #47
0
    N = 500
    P = 4
    S = 1000
    R = 10

    hr = 0.05
    hg = 0.40

    Xr = 1. * (SP.rand(N, R) < 0.2)
    Xg = 1. * (SP.rand(N, S) < 0.2)
    Xg -= Xg.mean(0)
    Xg /= Xg.std(0)
    XX = SP.dot(Xg, Xg.T)
    XX /= XX.diagonal().mean()
    XX += 1e-4 * SP.eye(XX.shape[0])

    Yr = SP.dot(Xr, SP.randn(R, P))
    Yr *= SP.sqrt(hr / Yr.var(0))
    Yg = SP.dot(Xg, SP.randn(S, P))
    Yg *= SP.sqrt(hg / Yg.var(0))
    Yn = SP.randn(N, P)
    Yn *= SP.sqrt((1 - hr - hg) / Yn.var(0))

    Y = Yr + Yg + Yn
    Y -= Y.mean(0)
    Y /= Y.std(0)

    SP.savetxt('Xr.txt', Xr, fmt='%d')
    SP.savetxt('XX.txt', XX, fmt='%.6f')
    SP.savetxt('Y.txt', Y, fmt='%.6f')
Example #48
0
n22 = ((r - sigma**2 / 2) * (T - time))
numerd1 = logSoverK[scipy.newaxis, :] + n12[:, scipy.newaxis]
numerd2 = logSoverK[scipy.newaxis, :] + n22[:, scipy.newaxis]
d1 = numerd1 / (sigma * scipy.sqrt(T - time)[:, scipy.newaxis])
d2 = numerd2 / (sigma * scipy.sqrt(T - time)[:, scipy.newaxis])

from scipy.stats import norm
part1 = norm.cdf(-d2) * K * scipy.exp(-r * (T - time))[:, scipy.newaxis]
part2 = S[scipy.newaxis] * norm.cdf(-d1)
VC = part1 - part2

# optional file output to use with external plotting programming
# such as Asymptote

scipy.savetxt('putcallparity_asy.dat',
              scipy.column_stack((scipy.transpose(S), scipy.transpose(VC))),
              fmt=('%4.3f'))

## NAME:  putcallparity_asy.py
## USAGE: From shell prompt: python3 solution.py
##        or within interactive python3 environment, import filename
## REQUIRED ARGUMENTS: none
## OPTIONS: none
## DESCRIPTION:  For given parameter values, the Black-Scholes-Merton
## solution formula is sampled at a specified m X 1 array of times and
## at a specified  1 X n array of security prices using vectorization
## and broadcasting.  The result can be plotted as functions of the
## security price as done in the text.  This approach is taken to
## illustrate the use of vectorization and broadcasting for efficient
## evaluation of an array of solution values from a complicated formula.
## DIAGNOSTICS: none
d1 = numerd1 / (sigma * scipy.sqrt(T - time)[:, scipy.newaxis])
d2 = numerd2 / (sigma * scipy.sqrt(T - time)[:, scipy.newaxis])

from scipy.stats import norm

part1 = S[scipy.newaxis] * norm.cdf(d1)
part2 = norm.cdf(d2) * K * scipy.exp(-r * (T - time))[:, scipy.newaxis]
VC = part1 - part2

# optional file output to use with external plotting programming
# such as gnuplot, R, octave, etc.
# Start gnuplot, then from gnuplot prompt
#    plot for [n=2:7] 'solution.dat' using 1:(column(n)) with lines

scipy.savetxt('solution_asy.dat',
              scipy.column_stack((scipy.transpose(S), scipy.transpose(VC))),
              fmt=('%4.3f'))

## NAME:  solution.py
## USAGE: From shell prompt: python3 solution.py
##        or within interactive python3 environment, import filename
## REQUIRED ARGUMENTS: none
## OPTIONS: none
## DESCRIPTION:  For given parameter values, the Black-Scholes-Merton
## solution formula is sampled at a specified m X 1 array of times and
## at a specified  1 X n array of security prices using vectorization
## and broadcasting.  The result can be plotted as functions of the
## security price as done in the text.  This approach is taken to
## illustrate the use of vectorization and broadcasting for efficient
## evaluation of an array of solution values from a complicated formula.
## DIAGNOSTICS: none
Example #50
0
    if chi2_delta < chi2_gauss:
        f.p = {
            'shift': p_delta['shift'],
            'scale': p_delta['scale'],
            'width': 0.001
        }
    else:
        f.p = p_gauss

    sout, dummy, covar = f.output(s)

    hout = fits.getheader(spec)
    savefits('scale_' + spec, sout, hout)

    if get_covar:
        sp.savetxt('covar_matrices/covar_' + spec, covar)
    if get_chains:
        chain_gauss.save('chains/' + spec + '.chain.gauss')

    f = RescaleModel(lref, kernel="Hermite")
    #    Here is an example of how to put in a prior----we are using the
    #    posterior distribution of the kernel width from the pure Gaussian
    #    as a prior on the width for the Gauss-Hermite kernel.
    #    'burn=0.75' means we throw out the first 3/4 of the chain
    #    (assumed to be burn in).

    #    f.make_dist_prior(chain_gauss,'width', burn=0.75)

    #    Or, you can specify an analytic function, if say, you have a
    #    guess of what the width should be----here, the prior is a
    #    Gaussian of mean 1.8 angstroms and std 1.0 angstroms.
Example #51
0
def main():

    ### get command line options
    options = parse_options(sys.argv)

    ### parse parameters from options object
    CFG = settings.parse_args(options, identity='test')
    CFG['use_exon_counts'] = False

    ### generate output directory
    outdir = os.path.join(options.outdir, 'testing')
    if options.timestamp == 'y':
        outdir = '%s_%s' % (outdir, str(datetime.datetime.now()).replace(
            ' ', '_'))
    if CFG['diagnose_plots']:
        CFG['plot_dir'] = os.path.join(options.outdir, 'plots')
        if not os.path.exists(CFG['plot_dir']):
            os.makedirs(CFG['plot_dir'])

    if options.labelA != 'condA' and options.labelB != 'condB':
        outdir = '%s_%s_vs_%s' % (outdir, options.labelA, options.labelB)
    if not os.path.exists(outdir):
        os.makedirs(outdir)

    if CFG['debug']:

        print "Generating simulated dataset"

        npr.seed(23)
        CFG['is_matlab'] = False
        #cov = npr.permutation(20000-20).astype('float').reshape(999, 20)
        #cov = sp.r_[cov, sp.c_[sp.ones((1, 10)) *10, sp.ones((1, 10)) * 500000] + npr.normal(10, 1, 20)]
        #sf = sp.ones((cov.shape[1], ), dtype='float')

        setsize = 50
        ### diff event counts
        cov = sp.zeros((500, 2 * setsize), dtype='int')
        for i in range(10):
            cov[i, :setsize] = nbinom.rvs(30, 0.8, size=setsize)
            cov[i, setsize:] = nbinom.rvs(10, 0.8, size=setsize)
        for i in range(10, cov.shape[0]):
            cov[i, :] = nbinom.rvs(30, 0.8, size=2 * setsize)

        ### diff gene expression
        cov2 = sp.zeros((500, 2 * setsize), dtype='int')
        for i in range(20):
            cov2[i, :setsize] = nbinom.rvs(2000, 0.2, size=setsize)
            cov2[i, setsize:] = nbinom.rvs(2000, 0.3, size=setsize)
        for i in range(20, cov2.shape[0]):
            cov2[i, :] = nbinom.rvs(2000, 0.3, size=2 * setsize)

        cov = sp.c_[cov, cov2] * 10000

        tidx = sp.arange(setsize)

        sf = npr.uniform(0, 5, 2 * setsize)
        sf = sp.r_[sf, sf]

        #dmatrix0 = sp.ones((cov.shape[1], 3), dtype='bool')
        dmatrix1 = sp.zeros((cov.shape[1], 4), dtype='float')
        dmatrix1[:, 0] = 1
        dmatrix1[tidx, 1] = 1
        #dmatrix1[tidx, 2] = 1
        dmatrix1[tidx + (2 * setsize), 2] = 1
        dmatrix1[(2 * setsize):, 3] = 1
        #dmatrix1[:, 4] = sp.log(sf)
        dmatrix0 = dmatrix1[:, [0, 2, 3]]

        cov = cov * sf
        #sf = sp.ones((cov.shape[1], ), dtype='float')

        pvals = run_testing(cov, dmatrix0, dmatrix1, sf, CFG)
        pvals_adj = adj_pval(pvals, CFG)
        pdb.set_trace()
    else:
        val_tag = ''
        if CFG['validate_splicegraphs']:
            val_tag = '.validated'

        if CFG['is_matlab']:
            CFG['fname_genes'] = os.path.join(
                CFG['out_dirname'], 'spladder', 'genes_graph_conf%i.%s%s.mat' %
                (CFG['confidence_level'], CFG['merge_strategy'], val_tag))
            CFG['fname_count_in'] = os.path.join(
                CFG['out_dirname'], 'spladder',
                'genes_graph_conf%i.%s%s.count.mat' %
                (CFG['confidence_level'], CFG['merge_strategy'], val_tag))
        else:
            CFG['fname_genes'] = os.path.join(
                CFG['out_dirname'], 'spladder',
                'genes_graph_conf%i.%s%s.pickle' %
                (CFG['confidence_level'], CFG['merge_strategy'], val_tag))
            CFG['fname_count_in'] = os.path.join(
                CFG['out_dirname'], 'spladder',
                'genes_graph_conf%i.%s%s.count.hdf5' %
                (CFG['confidence_level'], CFG['merge_strategy'], val_tag))

        condition_strains = None
        CFG['fname_exp_hdf5'] = os.path.join(
            CFG['out_dirname'], 'spladder',
            'genes_graph_conf%i.%s%s.gene_exp.hdf5' %
            (CFG['confidence_level'], CFG['merge_strategy'], val_tag))
        if os.path.exists(CFG['fname_exp_hdf5']):
            if CFG['verbose']:
                print 'Loading expression counts from %s' % CFG[
                    'fname_exp_hdf5']
            IN = h5py.File(CFG['fname_exp_hdf5'], 'r')
            gene_counts = IN['raw_count'][:]
            gene_strains = IN['strains'][:]
            gene_ids = IN['genes'][:]
            IN.close()
        else:
            if options.subset_samples == 'y':
                condition_strains = sp.unique(
                    sp.r_[sp.array(CFG['conditionA']),
                          sp.array(CFG['conditionB'])])
                CFG['fname_exp_hdf5'] = os.path.join(
                    CFG['out_dirname'], 'spladder',
                    'genes_graph_conf%i.%s%s.gene_exp.%i.hdf5' %
                    (CFG['confidence_level'], CFG['merge_strategy'], val_tag,
                     hash(tuple(sp.unique(condition_strains))) * -1))
            if os.path.exists(CFG['fname_exp_hdf5']):
                if CFG['verbose']:
                    print 'Loading expression counts from %s' % CFG[
                        'fname_exp_hdf5']
                IN = h5py.File(CFG['fname_exp_hdf5'], 'r')
                gene_counts = IN['raw_count'][:]
                gene_strains = IN['strains'][:]
                gene_ids = IN['genes'][:]
                IN.close()
            else:
                gene_counts, gene_strains, gene_ids = get_gene_expression(
                    CFG,
                    fn_out=CFG['fname_exp_hdf5'],
                    strain_subset=condition_strains)

        gene_strains = sp.array(
            [x.split(':')[1] if ':' in x else x for x in gene_strains])

        ### estimate size factors for library size normalization
        sf_ge = get_size_factors(gene_counts, CFG)

        ### get index of samples for difftest
        idx1 = sp.where(sp.in1d(gene_strains, CFG['conditionA']))[0]
        idx2 = sp.where(sp.in1d(gene_strains, CFG['conditionB']))[0]

        ### for TESTING
        #setsize = 100
        #idx1 = sp.arange(0, setsize / 2)
        #idx2 = sp.arange(setsize / 2, setsize)

        ### subset expression counts to tested samples
        gene_counts = gene_counts[:, sp.r_[idx1, idx2]]
        sf_ge = sf_ge[sp.r_[idx1, idx2]]
        #sf = sp.r_[sf, sf]

        ### test each event type individually
        for event_type in CFG['event_types']:

            if CFG['verbose']:
                print 'Testing %s events' % event_type

            CFG['fname_events'] = os.path.join(
                CFG['out_dirname'], 'merge_graphs_%s_C%i.counts.hdf5' %
                (event_type, CFG['confidence_level']))

            ### quantify events
            (cov, gene_idx, event_idx, event_ids,
             event_strains) = quantify.quantify_from_counted_events(
                 CFG['fname_events'], sp.r_[idx1, idx2], event_type, CFG)

            ### estimate size factors
            sf_ev = get_size_factors(sp.vstack(cov), CFG)

            sf = sp.r_[sf_ev, sf_ge]

            assert (sp.all(gene_strains == event_strains))

            ### map gene expression to event order
            curr_gene_counts = gene_counts[gene_idx, :]

            ### filter for min expression
            if event_type == 'intron_retention':
                k_idx = sp.where((sp.mean(cov[0] == 0, axis=1) < CFG['max_0_frac']) | \
                                 (sp.mean(cov[1] == 0, axis=1) < CFG['max_0_frac']))[0]
            else:
                k_idx = sp.where(((sp.mean(cov[0] == 0, axis=1) < CFG['max_0_frac']) | \
                                  (sp.mean(cov[1] == 0, axis=1) < CFG['max_0_frac'])) & \
                                 (sp.mean(sp.c_[cov[0][:, :idx1.shape[0]], cov[1][:, :idx1.shape[0]]] == 0, axis=1) < CFG['max_0_frac']) & \
                                 (sp.mean(sp.c_[cov[0][:, idx2.shape[0]:], cov[1][:, idx2.shape[0]:]] == 0, axis=1) < CFG['max_0_frac']))[0]
            if CFG['verbose']:
                print 'Exclude %i of %i %s events (%.2f percent) from testing due to low coverage' % (
                    cov[0].shape[0] - k_idx.shape[0], cov[0].shape[0],
                    event_type,
                    (1 - float(k_idx.shape[0]) / cov[0].shape[0]) * 100)
            if k_idx.shape[0] == 0:
                print 'All events of type %s were filtered out due to low coverage. Please try re-running with less stringent filter criteria' % event_type
                continue
        # k_idx = sp.where((sp.mean(sp.c_[cov[0], cov[1]], axis=1) > 2))[0]
        # k_idx = sp.where((sp.mean(cov[0], axis=1) > 2) & (sp.mean(cov[1], axis=1) > 2))[0]
            cov[0] = cov[0][k_idx, :]
            cov[1] = cov[1][k_idx, :]
            curr_gene_counts = curr_gene_counts[k_idx, :]
            event_idx = event_idx[k_idx]
            gene_idx = gene_idx[k_idx]
            event_ids = [x[k_idx] for x in event_ids]

            cov[0] = sp.around(sp.hstack([cov[0], curr_gene_counts]))
            cov[1] = sp.around(sp.hstack([cov[1], curr_gene_counts]))
            cov = sp.vstack(cov)
            event_ids = sp.hstack(event_ids)

            tidx = sp.arange(idx1.shape[0])

            #if CFG['debug']:
            #    for i in range(cov.shape[0]):
            #        fig = plt.figure(figsize=(8, 6), dpi=100)
            #        ax = fig.add_subplot(111)
            #        ax.hist(cov[i, :] * sf, 50, histtype='bar', rwidth=0.8)
            #        #ax.plot(sp.arange(cov.shape[1]), sorted(cov[i, :]), 'bo')
            #        ax.set_title('Count Distribution - Sample %i' % i )
            #        plt.savefig('count_dist.%i.pdf' % i, format='pdf', bbox_inches='tight')
            #        plt.close(fig)

            ### build design matrix for testing
            dmatrix1 = sp.zeros((cov.shape[1], 4), dtype='bool')
            dmatrix1[:, 0] = 1  # intercept
            dmatrix1[tidx, 1] = 1  # delta a
            dmatrix1[tidx, 2] = 1  # delta g
            dmatrix1[tidx + (idx1.shape[0] + idx2.shape[0]), 2] = 1  # delta g
            dmatrix1[(idx1.shape[0] + idx2.shape[0]):, 3] = 1  # is g
            dmatrix0 = dmatrix1[:, [0, 2, 3]]

            ### make event splice forms unique to prevent unnecessary tests
            event_ids, u_idx, r_idx = sp.unique(event_ids,
                                                return_index=True,
                                                return_inverse=True)
            if CFG['verbose']:
                print 'Consider %i unique event splice forms for testing' % u_idx.shape[
                    0]

            ### run testing
            #pvals = run_testing(cov[u_idx, :], dmatrix0, dmatrix1, sf, CFG, r_idx)
            pvals = run_testing(cov, dmatrix0, dmatrix1, sf, CFG)
            pvals_adj = adj_pval(pvals, CFG)

            ### write output
            out_fname = os.path.join(
                outdir,
                'test_results_C%i_%s.tsv' % (options.confidence, event_type))
            if CFG['verbose']:
                print 'Writing test results to %s' % out_fname
            s_idx = sp.argsort(pvals_adj)
            header = sp.array(['event_id', 'gene', 'p_val', 'p_val_adj'])
            event_ids = sp.array(
                ['%s_%i' % (event_type, i + 1) for i in event_idx],
                dtype='str')
            if CFG['is_matlab']:
                data_out = sp.c_[event_ids[s_idx], gene_ids[gene_idx[s_idx],
                                                            0],
                                 pvals[s_idx].astype('str'),
                                 pvals_adj[s_idx].astype('str')]
            else:
                data_out = sp.c_[event_ids[s_idx], gene_ids[gene_idx[s_idx]],
                                 pvals[s_idx].astype('str'),
                                 pvals_adj[s_idx].astype('str')]
            data_out = sp.r_[header[sp.newaxis, :], data_out]
            sp.savetxt(out_fname, data_out, delimiter='\t', fmt='%s')
Example #52
0
        
        PL.figure(figsize=[12,6])
        PL.subplot(211)
        PL.title('Peak score')
        PL.plot(p,score,'b.-')
        PL.ylim([0,5000])
        PL.subplot(212)
        PL.title('Quality score (sum=0.5)')
        PL.plot(p,L0_bin,'b.-')
        PL.ylim([-25000,0])
        fn = os.path.join('./../results/figures/',os.path.basename(options.data_file)+'_maxcov%d_chrom%s' % (preprocess_params['max_coverage'],options.chrom))
        fn_figure = fn+'.png'
        fn_csv = fn+'.csv'
        PL.savefig(fn_figure)
        M = SP.concatenate((p[:,SP.newaxis],score[:,SP.newaxis],L0_bin[:,SP.newaxis]),axis=1)
        SP.savetxt(fn_csv,M)


    if 0:
        Nhom_res,p = bin_data(1.0*(data['counts_res'][:,1]==0),pos,w=100E3)
        Nhom_sus,p = bin_data(1.0*(data['counts_sus'][:,1]==0),pos,w=100E3)
        Nhet_res,p = bin_data(1.0*(data['counts_res'][:,1]!=0),pos,w=100E3)
        Nhet_sus,p = bin_data(1.0*(data['counts_sus'][:,1]!=0),pos,w=100E3)

        R_res = Nhet_res/(Nhom_res+Nhet_res)
        R_sus = Nhet_sus/(Nhom_sus+Nhet_sus)
        
        p0=PL.plot(p,R_res,'r.-')
        p1=PL.plot(p,R_sus,'b.-')
        p2=PL.plot(p,R_res-R_sus,'k.-')
        PL.legend([p0,p1,p2],['res','sus','delta'])
Example #53
0
def main(input_options, libmode=False):
    def analysePatterns(strain):

        # these are the IRE conventions, except that integers are 0->5 rather than 1->6
        strainDict = {0: "xx", 1: "yy", 2: "zz", 3: "yz", 4: "zx", 5: "xy"}

        strainsUsed = S.zeros((6, 1))

        for a in range(0, S.size(strain)):
            if strain[a] != 0.0:
                print strainDict[a], "component is non-zero"
                strainsUsed[a] = 1
            else:
                strainsUsed[a] = 0

        return strainsUsed

    def cMatrix(symmetryType, TetrHigh):
        if symmetryType == "Cubic":
            return S.matrix([[1, 7, 7, 0, 0, 0], [7, 1, 7, 0, 0, 0],
                             [7, 7, 1, 0, 0, 0], [0, 0, 0, 4, 0, 0],
                             [0, 0, 0, 0, 4, 0], [0, 0, 0, 0, 0, 4]])

        elif symmetryType == "Trigonal-high/Hexagonal":
            return S.matrix([[1, 7, 8, 9, 0, 0], [7, 1, 8, -9, 0, 0],
                             [8, 8, 3, 0, 0, 0], [9, -9, 0, 4, 0, 0],
                             [0, 0, 0, 0, 4, 9], [0, 0, 0, 0, 9, 6]])

        elif symmetryType == "Trigonal-low":
            return S.matrix([[1, 7, 8, 9, 10, 0], [7, 1, 8, -9, -10, 0],
                             [8, 8, 3, 0, 0, 0], [9, -9, 0, 4, 0, -10],
                             [10, -10, 0, 0, 4, 9], [0, 0, 0, -10, 9, 6]])

        elif symmetryType == "Tetragonal":
            if TetrHigh == "-1":
                print "Higher-symmetry tetragonal (422,4mm,4-2m,4/mmm)"
                return S.matrix([[1, 7, 8, 0, 0, 0], [7, 1, 8, 0, 0, 0],
                                 [8, 8, 3, 0, 0, 0], [0, 0, 0, 4, 0, 0],
                                 [0, 0, 0, 0, 4, 0], [0, 0, 0, 0, 0, 6]])
            else:
                print "Lower-symmetry tetragonal (4,-4,4/m)"
                return S.matrix([[1, 7, 8, 0, 0, 11], [7, 1, 8, 0, 0, -11],
                                 [8, 8, 3, 0, 0, 0], [0, 0, 0, 4, 0, 0],
                                 [0, 0, 0, 0, 4, 0], [11, -11, 0, 0, 0, 6]])

        elif symmetryType == "Orthorhombic":
            return S.matrix([[1, 7, 8, 0, 0, 0], [7, 2, 12, 0, 0, 0],
                             [8, 12, 3, 0, 0, 0], [0, 0, 0, 4, 0, 0],
                             [0, 0, 0, 0, 5, 0], [0, 0, 0, 0, 0, 6]])

        elif symmetryType == "Monoclinic":
            return S.matrix([[1, 7, 8, 0, 10, 0], [7, 2, 12, 0, 14, 0],
                             [8, 12, 3, 0, 17, 0], [0, 0, 0, 4, 0, 20],
                             [10, 14, 17, 0, 5, 0], [0, 0, 0, 20, 0, 6]])

        elif symmetryType == "Triclinic":
            return S.matrix([[1, 7, 8, 9, 10, 11], [7, 2, 12, 13, 14, 15],
                             [8, 12, 3, 16, 17, 18], [9, 13, 16, 4, 19, 20],
                             [10, 14, 17, 19, 5, 21], [11, 15, 18, 20, 21, 6]])

    def get_options():
        # deal with options
        if not libmode:
            p = optparse.OptionParser()
            p.add_option('--force-cml-output',
                         '-f',
                         action='store_true',
                         help="Force CML output",
                         dest="force")
            p.add_option('--graphics',
                         '-g',
                         action='store_true',
                         help="Show graphics (requires matplotlib)")
            p.add_option('--debug',
                         '-d',
                         action='store_true',
                         help="Debug mode (output to stdout rather than file)")
            p.add_option('--latex',
                         action='store_true',
                         help="dump LaTeX formatted table to file",
                         dest="latex")
            p.add_option('--latex-nt',
                         action='store_true',
                         help="supress LaaTeX line titles",
                         dest='latex_nt')
            p.add_option('--txt',
                         action='store',
                         help="Append line to text file",
                         dest="txt",
                         type="string")

            options, arguments = p.parse_args(args=input_options)
        else:

            class PotM_Options:
                xml = True
                graphics = True
                castep = False
                debug = False

            options = PotM_Options()

            taskRe = re.compile(r"(.+)-\w+\.cml")
            arguments = taskRe.findall(input_options[1][0])
            global outfile
            outfile = input_options[0]

        if options.graphics:
            try:
                global P
                import pylab as P
            except ImportError:
                print >> sys.stderr, "You need to have matplotlib installed for the --graphics option"
                sys.exit(1)

        return options, arguments

    options, arguments = get_options()

    # Not sure why the lattice types are enumerated like this, but this is how .cijdat does it...
    latticeTypes = {0:"Unknown", 1:"Triclinic", 2:"Monoclinic", 3:"Orthorhombic", \
     4:"Tetragonal", 5:"Cubic", 6:"Trigonal-low", 7:"Trigonal-high/Hexagonal"}

    # Get strain tensors
    seedname = arguments[0]

    cijdat = open(seedname + ".cijdat", "r")
    print "\nReading strain data from ", seedname + ".cijdat\n"

    numStrainPatterns = (len(cijdat.readlines()) -
                         2) / 4  #total for all strain patterns

    #rewind
    cijdat.seek(0)

    # deal with those first four integers
    latticeType, numsteps, TetrHigh, TrigHigh = cijdat.readline().split()
    numsteps = int(numsteps)

    symmetryType = latticeTypes[int(latticeType)]
    print "System is", symmetryType, "\n"

    # get maximum magnitude of strains
    magnitude = float(cijdat.readline())
    print numsteps, "steps of maximum magnitude", magnitude

    # if using graphics, do some initial set-up
    if options.graphics:
        fig = P.figure(num=1, figsize=(9.5, 8), facecolor='white')
        fig.subplots_adjust(left=0.07,
                            right=0.97,
                            top=0.97,
                            bottom=0.07,
                            wspace=0.5,
                            hspace=0.5)
        colourDict = {
            0: '#BAD0EF',
            1: '#FFCECE',
            2: '#BDF4CB',
            3: '#EEF093',
            4: '#FFA4FF',
            5: '#75ECFD'
        }

        for index1 in range(6):
            for index2 in range(6):
                # position this plot in a 6x6 grid
                sp = P.subplot(6, 6, 6 * (index1) + index2 + 1)
                sp.set_axis_off()
                # change the labels on the axes
                # xlabels = sp.get_xticklabels()
                # P.setp(xlabels,'rotation',90,fontsize=7)
                # ylabels = sp.get_yticklabels()
                # P.setp(ylabels,fontsize=7)
                P.text(0.4, 0.4, "n/a")

    print "\n<>---------------------------- ANALYSIS ---------------------------------<>"

    # initialise 1d array to store all 21 unique elastic constants - will be transformed into 6x6 matrix later
    finalCijs = S.zeros((21, 1))
    errors = S.zeros((21, 1))

    for patt in range(numStrainPatterns / numsteps):

        print "\nAnalysing pattern", patt + 1, ":"

        for a in range(0, numsteps):

            pattern = cijdat.readline()

            # grab the strain data from the .cijdat file
            line1 = cijdat.readline().split()
            line2 = cijdat.readline().split()
            line3 = cijdat.readline().split()

            # only take from the top right triangle
            # numbering according to IRE conventions (Proc IRE, 1949)

            if a == 0:
                strain = S.array([
                    float(line1[0]),
                    float(line2[1]),
                    float(line3[2]), 2 * float(line2[2]), 2 * float(line1[2]),
                    2 * float(line1[1])
                ])
            else:
                strain = S.row_stack(
                    (strain,
                     S.array([
                         float(line1[0]),
                         float(line2[1]),
                         float(line3[2]), 2 * float(line2[2]),
                         2 * float(line1[2]), 2 * float(line1[1])
                     ])))

            # now get corresponding stress data from .castep
            (units,
             thisStress) = castep.get_stress_dotcastep(seedname + "_cij__" +
                                                       str(patt + 1) + "__" +
                                                       str(a + 1) + ".castep")

            # again, top right triangle
            if a == 0:
                stress = thisStress
            else:
                stress = S.row_stack((stress, thisStress))
        """
		Both the stress and strain matrices use the IRE conventions to reduce the
		3x3 matrices to 1x6 arrays. These 1D arrays are then stacked to form a 
		Nx6 array, where N=number of steps.
	
		Note that strain and stress arrays are numbered 0->5 rather than 1->6
		"""
        def __fit(index1, index2):
            from scipy import stats, sqrt, square

            # do the fit
            (cijFitted, intercept, r, tt,
             stderr) = stats.linregress(strain[:, index2 - 1],
                                        stress[:, index1 - 1])

            if (S.__version__ < '0.7.0'):
                # correct for scipy weirdness - see http://www.scipy.org/scipy/scipy/ticket/8
                # This was fixed before 0.7.0 release. Maybe in some versions of 0.6.x too -
                # will report huge errors if the check is wrong
                stderr = S.sqrt((numsteps * stderr**2) / (numsteps - 2))
                error = stderr / sqrt(sum(square(strain[:, index2 - 1])))
            else:
                # Work out the error ourselves as I cannot get it from
                # stderr and this has been checked with gnuplot's fitter
                fit_str = ((strain[:, index2 - 1] * cijFitted) + intercept)
                error = sqrt((sum(square(stress[:,index1-1] - fit_str)) / \
                             (numsteps-2))/(sum(square(strain[:,index2-1]))))

            # print info about the fit
            print '\n'
            print 'Cij (gradient)          :    ', cijFitted
            print 'Error in Cij            :    ', error
            print 'Intercept               :    ', intercept
            if abs(r) > 0.9:
                print 'Correlation coefficient :    ', r
            else:
                print 'Correlation coefficient :    ', r, '     <----- WARNING'

            # if using graphics, add a subplot
            if options.graphics:

                # position this plot in a 6x6 grid
                sp = P.subplot(6, 6, 6 * (index1 - 1) + index2)
                sp.set_axis_on()

                # change the labels on the axes
                xlabels = sp.get_xticklabels()
                P.setp(xlabels, 'rotation', 90, fontsize=7)
                ylabels = sp.get_yticklabels()
                P.setp(ylabels, fontsize=7)

                # colour the plot depending on the strain pattern
                sp.set_axis_bgcolor(colourDict[patt])

                # plot the data
                P.plot([
                    strain[0, index2 - 1], strain[numsteps - 1, index2 - 1]
                ], [
                    cijFitted * strain[0, index2 - 1] + intercept,
                    cijFitted * strain[numsteps - 1, index2 - 1] + intercept
                ])
                P.plot(strain[:, index2 - 1], stress[:, index1 - 1], 'ro')

            return cijFitted, error

        def __appendOrReplace(valList, erList, val):
            try:
                valList.append(val[0])
                erList.append(val[1])
                return (sum(valList) / len(valList)), (S.sqrt(
                    sum([x**2 for x in erList]) / len(erList)**2))
            except NameError:
                return val[0], val[1]

        def __createListAndAppend(val):
            newList = []
            newList.append(val[0])
            errorList = []
            errorList.append(val[1])
            return val[0], newList, val[1], errorList

        cij = S.zeros(21)

        # Analyse the patterns to see which strains were applied
        strainsUsed = analysePatterns(strain[0, :])

        # should check strains are as expected

        if symmetryType == "Cubic":

            if S.all(strainsUsed.transpose() == S.array(
                [[1.0, 0.0, 0.0, 1.0, 0.0, 0.0]])):  # strain pattern e1+e4

                finalCijs[0], errors[0] = __fit(1, 1)  # fit C11
                fit_21, fit_21_error = __fit(2, 1)
                fit_31, fit_31_error = __fit(3, 1)
                finalCijs[6] = (fit_21 + fit_31) / 2  # fit C21+C31
                errors[6] = S.sqrt((fit_21_error**2) / 4 +
                                   (fit_31_error**2) / 4)
                finalCijs[3], errors[3] = __fit(4, 4)  # fit C44

            else:
                print "Unsupported strain pattern"
                sys.exit(1)

        elif symmetryType == "Trigonal-high/Hexagonal":
            if S.all(strainsUsed.transpose() == S.array([[
                    0.0, 0.0, 1.0, 0.0, 0.0, 0.0
            ]])):  # strain pattern e3 (hexagonal)

                # fit C13 + C23, and add to list (more values coming...)
                finalCijs[7], cij13, errors[7], er13 = __createListAndAppend(
                    __fit(1, 3))
                finalCijs[7], cij13, errors[7], er13 = __createListAndAppend(
                    __fit(2, 3))

                finalCijs[2], errors[2] = __fit(3, 3)  # fit C33

            elif S.all(strainsUsed.transpose() == S.array([[
                    1.0, 0.0, 0.0, 1.0, 0.0, 0.0
            ]])):  # strain pattern e1+e4 (hexagonal)

                finalCijs[0], errors[0] = __fit(1, 1)  # fit C11
                finalCijs[6], errors[6] = __fit(2, 1)  # fit C21
                finalCijs[7], errors[7] = __appendOrReplace(
                    cij13, er13, __fit(3, 1))  # fit C31
                finalCijs[3], errors[3] = __fit(4, 4)  # fit C44

            elif S.all(strainsUsed.transpose() == S.array(
                [[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]])):

                # strain pattern e1 (trigonal-high)

                finalCijs[0], errors[0] = __fit(1, 1)  # fit C11
                finalCijs[6], errors[6] = __fit(2, 1)  # fit C21
                finalCijs[7], errors[7] = __fit(3, 1)  # fit C31
                finalCijs[8], errors[8] = __fit(4, 1)  # fit C41
                # Should be zero? finalCijs[9], errors[9] = __fit(5,1)                # fit C51

            elif S.all(strainsUsed.transpose() == S.array(
                [[0.0, 0.0, 1.0, 1.0, 0.0, 0.0]])):

                # strain pattern e3+e4 (trigonal-high)
                # could recalculate C13/C14/C23/C24/C46 here, but won't just now

                finalCijs[2], errors[2] = __fit(3, 3)  # fit C33
                finalCijs[3], errors[3] = __fit(4, 4)  # fit C44

            else:
                print "Unsupported strain pattern"
                sys.exit(1)

        elif symmetryType == "Trigonal-low":
            if S.all(strainsUsed.transpose() == S.array(
                [[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]])):

                # strain pattern e1

                finalCijs[0], errors[0] = __fit(1, 1)  # fit C11
                finalCijs[6], errors[6] = __fit(2, 1)  # fit C21
                finalCijs[7], errors[7] = __fit(3, 1)  # fit C31
                finalCijs[8], errors[8] = __fit(4, 1)  # fit C41
                finalCijs[9], errors[9] = __fit(5, 1)  # fit C51

            elif S.all(strainsUsed.transpose() == S.array(
                [[0.0, 0.0, 1.0, 1.0, 0.0, 0.0]])):

                # strain pattern e3+e4
                # could recalculate C13/C14/C23/C24/C46 here, but won't just now

                finalCijs[2], errors[2] = __fit(3, 3)  # fit C33
                finalCijs[3], errors[3] = __fit(4, 4)  # fit C44

            else:
                print "Unsupported strain pattern"
                sys.exit(1)

        elif symmetryType == "Tetragonal":
            if S.all(strainsUsed.transpose() == S.array(
                [[1.0, 0.0, 0.0, 1.0, 0.0, 0.0]])):  # strain pattern e1+e4

                finalCijs[0], errors[0] = __fit(1, 1)  # fit C11
                finalCijs[6], errors[6] = __fit(2, 1)  # fit C21
                finalCijs[7], errors[7] = __fit(3, 1)  # fit C31
                finalCijs[10], errors[10] = __fit(6, 1)  # fit C61
                finalCijs[3], errors[3] = __fit(4, 4)  # fit C44

            elif S.all(strainsUsed.transpose() == S.array(
                [[0.0, 0.0, 1.0, 0.0, 0.0, 1.0]])):  # strain pattern e3+e6

                finalCijs[2], errors[2] = __fit(3, 3)  # fit C33
                finalCijs[5], errors[5] = __fit(6, 6)  # fit C66

            else:
                print "Unsupported strain pattern"
                sys.exit(1)

        elif symmetryType == "Orthorhombic":
            if S.all(strainsUsed.transpose() == S.array(
                [[1.0, 0.0, 0.0, 1.0, 0.0, 0.0]])):  # strain pattern e1+e4

                finalCijs[0], errors[0] = __fit(1, 1)  # fit C11
                finalCijs[6], cij12, errors[6], er12 = __createListAndAppend(
                    __fit(2, 1))  # fit C21
                finalCijs[7], cij13, errors[7], er13 = __createListAndAppend(
                    __fit(3, 1))  # fit C31
                finalCijs[3], errors[3] = __fit(4, 4)  # fit C44

            elif S.all(strainsUsed.transpose() == S.array(
                [[0.0, 1.0, 0.0, 0.0, 1.0, 0.0]])):  # strain pattern e2+e5

                finalCijs[6], errors[6] = __appendOrReplace(
                    cij12, er12, __fit(1, 2))  # fit C12
                finalCijs[1], errors[1] = __fit(2, 2)  # fit C22
                finalCijs[11], cij23, errors[11], er23 = __createListAndAppend(
                    __fit(3, 2))  # fit C32
                finalCijs[4], errors[4] = __fit(5, 5)  # fit C55

            elif S.all(strainsUsed.transpose() == S.array(
                [[0.0, 0.0, 1.0, 0.0, 0.0, 1.0]])):  # strain pattern e3+e6

                finalCijs[7], errors[7] = __appendOrReplace(
                    cij13, er13, __fit(1, 3))  # fit C13
                finalCijs[11], errors[11] = __appendOrReplace(
                    cij23, er23, __fit(2, 3))  # fit C23
                finalCijs[2], errors[2] = __fit(3, 3)  # fit C33
                finalCijs[5], errors[5] = __fit(6, 6)  # fit C66

            else:
                print "Unsupported strain pattern"
                sys.exit(1)

        elif symmetryType == "Monoclinic":
            if S.all(strainsUsed.transpose() == S.array(
                [[1.0, 0.0, 0.0, 1.0, 0.0, 0.0]])):  # strain pattern e1+e4

                finalCijs[0], errors[0] = __fit(1, 1)  # fit C11
                finalCijs[6], cij12, errors[6], er12 = __createListAndAppend(
                    __fit(2, 1))  # fit C21
                finalCijs[7], cij13, errors[7], er13 = __createListAndAppend(
                    __fit(3, 1))  # fit C31
                finalCijs[3], errors[3] = __fit(4, 4)  # fit C44
                finalCijs[9], cij51, errors[9], er51 = __createListAndAppend(
                    __fit(5, 1))  # fit C51
                finalCijs[19], cij64, errors[19], er64 = __createListAndAppend(
                    __fit(6, 4))  # fit C64

            elif S.all(strainsUsed.transpose() == S.array(
                [[0.0, 0.0, 1.0, 0.0, 0.0, 1.0]])):  # strain pattern e3+e6

                finalCijs[7], errors[7] = __appendOrReplace(
                    cij13, er13, __fit(1, 3))  # fit C13
                finalCijs[11], cij23, errors[11], er23 = __createListAndAppend(
                    __fit(2, 3))  # fit C23
                finalCijs[2], errors[2] = __fit(3, 3)  # fit C33
                finalCijs[16], cij53, errors[16], er53 = __createListAndAppend(
                    __fit(5, 3))  # fit C53
                finalCijs[19], errors[19] = __appendOrReplace(
                    cij64, er64, __fit(4, 6))  # fit C46
                finalCijs[5], errors[5] = __fit(6, 6)  # fit C66

            elif S.all(strainsUsed.transpose() == S.array(
                [[0.0, 1.0, 0.0, 0.0, 0.0, 0.0]])):  # strain pattern e2

                finalCijs[6], errors[6] = __appendOrReplace(
                    cij12, er12, __fit(1, 2))  # fit C12
                finalCijs[1], errors[1] = __fit(2, 2)  # fit C22
                finalCijs[11], errors[11] = __appendOrReplace(
                    cij23, er23, __fit(3, 2))  # fit C32
                finalCijs[13], cij52, errors[13], er52 = __createListAndAppend(
                    __fit(5, 2))  # fit C52

            elif S.all(strainsUsed.transpose() == S.array(
                [[0.0, 0.0, 0.0, 0.0, 1.0, 0.0]])):  # strain pattern e5

                finalCijs[9], errors[9] = __appendOrReplace(
                    cij51, er51, __fit(1, 5))  # fit C15
                finalCijs[13], errors[13] = __appendOrReplace(
                    cij52, er52, __fit(2, 5))  # fit C25
                finalCijs[16], errors[16] = __appendOrReplace(
                    cij53, er53, __fit(3, 5))  # fit C35
                finalCijs[4], errors[4] = __fit(5, 5)  # fit C55
            else:
                print "Unsupported strain pattern"
                sys.exit(1)

        elif symmetryType == "Triclinic":

            if S.all(strainsUsed.transpose() == S.array(
                [[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]])):  # strain pattern e1

                finalCijs[0], errors[0] = __fit(1, 1)  # fit C11
                finalCijs[6], cij12, errors[6], er12 = __createListAndAppend(
                    __fit(2, 1))  # fit C21
                finalCijs[7], cij13, errors[7], er13 = __createListAndAppend(
                    __fit(3, 1))  # fit C31
                finalCijs[8], cij14, errors[8], er14 = __createListAndAppend(
                    __fit(4, 1))  # fit C41
                finalCijs[9], cij15, errors[9], er15 = __createListAndAppend(
                    __fit(5, 1))  # fit C51
                finalCijs[10], cij16, errors[10], er16 = __createListAndAppend(
                    __fit(6, 1))  # fit C61

            elif S.all(strainsUsed.transpose() == S.array(
                [[0.0, 1.0, 0.0, 0.0, 0.0, 0.0]])):  # strain pattern e2

                finalCijs[6], errors[6] = __appendOrReplace(
                    cij12, er12, __fit(1, 2))  # fit C12
                finalCijs[1], errors[1] = __fit(2, 2)  # fit C22
                finalCijs[11], cij23, errors[11], er23 = __createListAndAppend(
                    __fit(3, 2))  # fit C32
                finalCijs[12], cij24, errors[12], er24 = __createListAndAppend(
                    __fit(4, 2))  # fit C42
                finalCijs[13], cij25, errors[13], er25 = __createListAndAppend(
                    __fit(5, 2))  # fit C52
                finalCijs[14], cij26, errors[14], er26 = __createListAndAppend(
                    __fit(6, 2))  # fit C62

            elif S.all(strainsUsed.transpose() == S.array(
                [[0.0, 0.0, 1.0, 0.0, 0.0, 0.0]])):  # strain pattern e3

                finalCijs[7], errors[7] = __appendOrReplace(
                    cij13, er13, __fit(1, 3))  # fit C13
                finalCijs[11], errors[11] = __appendOrReplace(
                    cij23, er23, __fit(2, 3))  # fit C23
                finalCijs[2], errors[2] = __fit(3, 3)  # fit C33
                finalCijs[15], cij34, errors[15], er34 = __createListAndAppend(
                    __fit(4, 3))  # fit C43
                finalCijs[16], cij35, errors[16], er35 = __createListAndAppend(
                    __fit(5, 3))  # fit C53
                finalCijs[17], cij36, errors[17], er36 = __createListAndAppend(
                    __fit(6, 3))  # fit C63

            elif S.all(strainsUsed.transpose() == S.array(
                [[0.0, 0.0, 0.0, 1.0, 0.0, 0.0]])):  # strain pattern e4

                finalCijs[8], errors[8] = __appendOrReplace(
                    cij14, er14, __fit(1, 4))  # fit C14
                finalCijs[12], errors[12] = __appendOrReplace(
                    cij24, er24, __fit(2, 4))  # fit C24
                finalCijs[15], errors[15] = __appendOrReplace(
                    cij34, er34, __fit(3, 4))  # fit C34
                finalCijs[3], errors[3] = __fit(4, 4)  # fit C44
                finalCijs[18], cij45, errors[18], er45 = __createListAndAppend(
                    __fit(5, 4))  # fit C54
                finalCijs[19], cij46, errors[19], er46 = __createListAndAppend(
                    __fit(6, 4))  # fit C64

            elif S.all(strainsUsed.transpose() == S.array(
                [[0.0, 0.0, 0.0, 0.0, 1.0, 0.0]])):  # strain pattern e5

                finalCijs[9], errors[9] = __appendOrReplace(
                    cij15, er15, __fit(1, 5))  # fit C15
                finalCijs[13], errors[13] = __appendOrReplace(
                    cij25, er25, __fit(2, 5))  # fit C25
                finalCijs[16], errors[16] = __appendOrReplace(
                    cij35, er35, __fit(3, 5))  # fit C35
                finalCijs[18], errors[18] = __appendOrReplace(
                    cij45, er45, __fit(4, 5))  # fit C45
                finalCijs[4], errors[4] = __fit(5, 5)  # fit C55
                finalCijs[20], cij56, errors[20], er56 = __createListAndAppend(
                    __fit(6, 5))  # fit C65

            elif S.all(strainsUsed.transpose() == S.array(
                [[0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])):  # strain pattern e6

                finalCijs[10], errors[10] = __appendOrReplace(
                    cij16, er16, __fit(1, 6))  # fit C16
                finalCijs[14], errors[14] = __appendOrReplace(
                    cij26, er26, __fit(2, 6))  # fit C26
                finalCijs[17], errors[17] = __appendOrReplace(
                    cij36, er36, __fit(3, 6))  # fit C36
                finalCijs[19], errors[19] = __appendOrReplace(
                    cij46, er46, __fit(4, 6))  # fit C46
                finalCijs[20], errors[20] = __appendOrReplace(
                    cij56, er56, __fit(5, 6))  # fit C56
                finalCijs[5], errors[5] = __fit(6, 6)  # fit C66

            else:
                print "Unsupported strain pattern"
                sys.exit(1)
        else:
            print "Unsupported symmetry type. Exiting"
            sys.exit(1)

    if options.graphics:
        P.savefig(os.path.basename(seedname) + '_fits')

    cijdat.close()

    if symmetryType == "Trigonal-high/Hexagonal" or symmetryType == "Trigonal-low":
        # for these systems, C66 is calculated as a combination of the other Cijs.
        finalCijs[5] = 0.5 * (finalCijs[0] - finalCijs[6])
        errors[5] = S.sqrt(0.25 * (errors[0]**2 + errors[6]**2))

    c = cMatrix(symmetryType, TetrHigh)

    # Generate the 6x6 matrix of elastic constants
    # - negative values signify a symmetry relation
    finalCijMatrix = S.zeros((6, 6))
    finalErrors = S.zeros((6, 6))
    for i in range(0, 6):
        for j in range(0, 6):
            index = int(c[i, j])
            if index > 0:
                finalCijMatrix[i, j] = finalCijs[index - 1]
                finalErrors[i, j] = errors[index - 1]
            elif index < 0:
                finalCijMatrix[i, j] = -finalCijs[-index - 1]
                finalErrors[i, j] = errors[-index - 1]

    # Tests
    if symmetryType == "Cubic":
        if finalCijs[3] <= 0:
            print "\n *** WARNING: C44 is less than or equal to zero ***\n"
        if finalCijs[0] <= abs(finalCijs[6]):
            print "\n *** WARNING: C11 is less than or equal to |C12| ***\n"
        if (finalCijs[0] + 2 * finalCijs[6]) <= 0:
            print "\n *** WARNING: C11+2C12 is less than or equal to zero ***\n"

    print "\n<>---------------------------- RESULTS ----------------------------------<>\n"
    print "Final Cij matrix (" + units + "):"
    print S.array2string(finalCijMatrix,
                         max_line_width=130,
                         suppress_small=True)
    print "\nErrors on Cij matrix (" + units + "):"
    print S.array2string(finalErrors, max_line_width=130, suppress_small=True)

    (sij, esij, covsij) = CijUtil.invertCij(finalCijMatrix, finalErrors)

    print "\nFinal Sij matrix (" + units + "-1):"
    print S.array2string(sij, max_line_width=130, suppress_small=True)
    print "\nErrors on Sij matrix (" + units + "-1):"
    print S.array2string(esij, max_line_width=130, suppress_small=True)

    print "\n<>----------------------------------------------------------------------<>\n"
    if symmetryType == "Cubic":
        print "  Zener anisotropy index     : %6.5f +/- %6.5f" % (
            CijUtil.zenerAniso(finalCijMatrix, finalErrors))
    print "  Universal anisotropy index : %6.5f +/- %6.5f" % (CijUtil.uAniso(
        finalCijMatrix, finalErrors))
    print "  (Rangnthn and Ostoja-Starzewski, PRL 101, 055504)\n"

    (youngX, youngY, youngZ, eyoungX, eyoungY, eyoungZ, poissonXY, poissonXZ,
     poissonYX, poissonYZ, poissonZX, poissonZY, epoissonXY, epoissonXZ,
     epoissonYX, epoissonYZ, epoissonZX,
     epoissonZY) = CijUtil.youngsmod(finalCijMatrix, finalErrors)

    format = "%18s : %11.5f %8s"
    print "\n                          x           y           z"
    print "%18s : %11.5f %11.5f %11.5f %6s" % ("Young's Modulus", youngX,
                                               youngY, youngZ, units)
    print "%18s : %11.5f %11.5f %11.5f " % ("      +/-      ", eyoungX,
                                            eyoungY, eyoungZ)

    print "\n                        xy       xz       yx       yz       zx       zy"
    format = "%18s :  %6.5f  %6.5f  %6.5f  %6.5f  %6.5f  %6.5f"
    print format % ("Poisson's Ratios", poissonXY, poissonXZ, poissonYX,
                    poissonYZ, poissonZX, poissonZY)
    print format % ("             +/-", epoissonXY, epoissonXZ, epoissonYX,
                    epoissonYZ, epoissonZX, epoissonZY)

    print "\n<>--------------------- POLYCRYSTALLINE RESULTS -------------------------<>\n"
    (voigtB, reussB, voigtG, reussG, hillB, hillG, evB, erB, evG, erG, ehB,
     ehG) = CijUtil.polyCij(finalCijMatrix, finalErrors)
    format = "%16s : %11.5f %11.5f %11.5f %11.5f %11.5f %11.5f %6s"
    print "                     Voigt         +/-       Reuss         +/-       Hill          +/-"
    print format % ("Bulk Modulus", voigtB, evB, reussB, erB, hillB, ehB,
                    units)
    print format % ("Shear Modulus", voigtG, evG, reussG, erG, hillG, ehG,
                    units)

    print "\n<>-----------------------------------------------------------------------<>\n"

    S.savetxt(seedname + '_cij.txt', finalCijMatrix)
    if options.latex:
        CijUtil.latexCij(finalCijMatrix, finalErrors, seedname + '.tex',
                         options.latex_nt)
    if options.txt:
        CijUtil.txtCij(finalCijMatrix, options.txt)
Example #54
0
def dump(R, directory):
    for r in list(R.keys()):
        fn = os.path.join(directory, r + filetype)
        SP.savetxt(fn, R[r])
Example #55
0
# reducevalue2 = reduce(lambda x,y : x + y, [1,2,3,4,5])
# print (reducevalue2)


# a = [8, 1, 2, 10, 9, 6, 7, 8, 0, 4, 19, 22, 3]
# maxv = 0
# sndMaxv = 0
# maxIdx = 0
# sndMaxIdx = 0
# for i in range(len(a)):
#     if a[i] > maxv:
#         sndMaxv = maxv
#         sndMaxIdx = maxIdx
#         maxv = a[i]
#         maxIdx = i
#
# print(maxv, maxIdx, sndMaxv, sndMaxIdx)

import scipy as sp
import os

a = [1,2,3,4]
b = [10,20,30,40]
c = zip(a, b)
d = list(c)
print(d)
path = os.path.realpath(__file__)
path = os.path.dirname(path)
print(path)
sp.savetxt(os.path.join(path, "a.tsv"), d, delimiter="\t", fmt="%s")
Example #56
0
def pleiopred_genomewide(data_file_D1,
                         data_file_D2,
                         alpha,
                         Pi,
                         init_betas_prefix,
                         ld_radius=None,
                         ld_dict=None,
                         out_file_prefix=None,
                         n1=None,
                         n2=None,
                         PRF=None,
                         num_iter=60,
                         burn_in=10,
                         zero_jump_prob=0.05,
                         user_h1=None,
                         user_h2=None):
    """
    Calculate PleioPred for a genome
    """
    prf_chr = PRF['chrom']
    prf_sids = PRF['sids']
    h2_D1 = PRF['h2_D1']
    h2_D2 = PRF['h2_D2']

    df1 = h5py.File(data_file_D1, 'r')
    df2 = h5py.File(data_file_D2, 'r')
    cord_data_g1 = df1['cord_data']
    cord_data_g2 = df2['cord_data']

    has_phenotypes1 = False
    if 'y' in df1.keys():
        'Validation phenotypes of disease 1 found.'
        y1 = df1['y'][...]  # Phenotype
        num_individs1 = len(y1)
        prs_D1 = sp.zeros(num_individs1)
        has_phenotypes1 = True

    has_phenotypes2 = False
    if 'y' in df2.keys():
        'Validation phenotypes of disease 2 found.'
        y2 = df2['y'][...]  # Phenotype
        num_individs2 = len(y2)
        prs_D2 = sp.zeros(num_individs2)
        has_phenotypes2 = True

    ld_scores_dict = ld_dict['ld_scores_dict']
    chrom_ld_dict = ld_dict['chrom_ld_dict']
    chrom_ref_ld_mats = ld_dict['chrom_ref_ld_mats']
    chrom_snps = ld_dict['chrom_snps']
    chrom_snpids = ld_dict['chrom_snpids']

    chrom_betas1 = ld_dict['chrom_betas1']
    chrom_betas2 = ld_dict['chrom_betas2']

    num_snps1 = 0
    sum_beta2s1 = 0
    num_snps2 = 0
    sum_beta2s2 = 0

    chr_list = list(set(cord_data_g1.keys()) & set(cord_data_g2.keys()))

    for chrom_str in chromosomes_list:
        if chrom_str in chr_list:
            betas1 = chrom_betas1[chrom_str]
            n_snps1 = len(betas1)
            num_snps1 += n_snps1
            sum_beta2s1 += sp.sum(betas1**2)
            betas2 = chrom_betas2[chrom_str]
            n_snps2 = len(betas2)
            num_snps2 += n_snps2
            sum_beta2s2 += sp.sum(betas2**2)

    if user_h1 is None or user_h2 is None:
        L1 = ld_scores_dict['avg_gw_ld_score']
        chi_square_lambda1 = sp.mean(n1 * sum_beta2s1 / float(num_snps1))
        print 'Genome-wide lambda inflation of D1:', chi_square_lambda1
        print 'Genome-wide mean LD score of D1:', L1
        gw_h2_ld_score_est1 = max(0.0001, (max(1, chi_square_lambda1) - 1) /
                                  (n1 * (L1 / num_snps1)))
        print 'Estimated genome-wide heritability of D1:', gw_h2_ld_score_est1

        #assert chi_square_lambda1>1, 'Something is wrong with the GWAS summary statistics of D1.  Perhaps there were issues parsing of them, or the given GWAS sample size (N) was too small. Either way, lambda (the mean Chi-square statistic) is too small.  '

        L2 = ld_scores_dict['avg_gw_ld_score']
        chi_square_lambda2 = sp.mean(n2 * sum_beta2s2 / float(num_snps2))
        print 'Genome-wide lambda inflation of D2:', chi_square_lambda2
        print 'Genome-wide mean LD score of D2:', L2
        gw_h2_ld_score_est2 = max(0.0001, (max(1, chi_square_lambda2) - 1) /
                                  (n2 * (L2 / num_snps2)))
        print 'Estimated genome-wide heritability of D2:', gw_h2_ld_score_est2

        #assert chi_square_lambda2>1, 'Something is wrong with the GWAS summary statistics of D2.  Perhaps there were issues parsing of them, or the given GWAS sample size (N) was too small. Either way, lambda (the mean Chi-square statistic) is too small.  '
    else:
        gw_h2_ld_score_est1 = user_h1
        gw_h2_ld_score_est2 = user_h2

    h2_new1 = sp.sum(h2_D1)
    sig_12_D1 = (1.0) / n1
    pr_sig1 = {}

    h2_new2 = sp.sum(h2_D2)
    sig_12_D2 = (1.0) / n2
    pr_sig2 = {}

    post_betas1 = {}
    post_betas2 = {}

    out1 = []
    out1.append('Estimated Genome-wide heritability: ' +
                str(gw_h2_ld_score_est1) + '\n')
    out1.append('Posterior variance for each snp: ' + str(sig_12_D1) + '\n')

    out2 = []
    out2.append('Estimated Genome-wide heritability: ' +
                str(gw_h2_ld_score_est2) + '\n')
    out2.append('Posterior variance for each snp: ' + str(sig_12_D2) + '\n')

    ## main calculation, chr by chr, posterior betas and prs ##

    beta1_current = chrom_betas1
    beta2_current = chrom_betas2

    for chrom_str in chromosomes_list:
        if chrom_str in chr_list:
            print 'Preparing annotation-based priors for Chromosome %s' % (
                (chrom_str.split('_'))[1])

            pval_derived_betas1 = chrom_betas1[chrom_str]
            pval_derived_betas2 = chrom_betas2[chrom_str]
            sids = chrom_snpids[chrom_str]

            n_snps_chrom = len(sids)

            chri = int(chrom_str.split('_')[1])
            prf_sids_chri = prf_sids[prf_chr == chri]
            h2_D1_chri = h2_D1[prf_chr == chri]
            h2_D2_chri = h2_D2[prf_chr == chri]
            if len(prf_sids_chri) == len(sids):
                if sum(prf_sids_chri == sids) == len(prf_sids_chri):
                    pr_sig1[chrom_str] = sp.copy(h2_D1_chri)
                    pr_sig2[chrom_str] = sp.copy(h2_D2_chri)
                else:
                    print 'sorting prior files'
                    pr_sig1[chrom_str] = sp.zeros(len(sids))
                    pr_sig2[chrom_str] = sp.zeros(len(sids))
                    for i, sid in enumerate(sids):
                        pr_sig1[chrom_str][i] = h2_D1_chri[prf_sids_chri ==
                                                           sid]
                        pr_sig2[chrom_str][i] = h2_D2_chri[prf_sids_chri ==
                                                           sid]
            else:
                print 'extracting prior files'
                pr_sig1[chrom_str] = sp.zeros(len(sids))
                pr_sig2[chrom_str] = sp.zeros(len(sids))
                for i, sid in enumerate(sids):
                    pr_sig1[chrom_str][i] = h2_D1_chri[prf_sids_chri == sid]
                    pr_sig2[chrom_str][i] = h2_D2_chri[prf_sids_chri == sid]

            pr_sig1[
                chrom_str] = gw_h2_ld_score_est1 * pr_sig1[chrom_str] / h2_new1
            pr_sig2[
                chrom_str] = gw_h2_ld_score_est2 * pr_sig2[chrom_str] / h2_new2

    ########################### using AnnoPred-baseline as initial values ###############################
    init_betas_path = '%s.pickled.gz' % init_betas_prefix
    if not os.path.isfile(init_betas_path):
        print 'No initial values for mcmc found, generating ... '
        anno_post1 = {}
        anno_post2 = {}
        for chrom_str in chromosomes_list:
            if chrom_str in chr_list:
                pval_derived_betas1 = chrom_betas1[chrom_str]
                pval_derived_betas2 = chrom_betas2[chrom_str]
                annopred_betas1 = annopred_inf(
                    pval_derived_betas1,
                    pr_sigi=pr_sig1[chrom_str],
                    reference_ld_mats=chrom_ref_ld_mats[chrom_str],
                    n=n1,
                    ld_window_size=2 * ld_radius)
                annopred_betas2 = annopred_inf(
                    pval_derived_betas2,
                    pr_sigi=pr_sig2[chrom_str],
                    reference_ld_mats=chrom_ref_ld_mats[chrom_str],
                    n=n2,
                    ld_window_size=2 * ld_radius)
                anno_post1[chrom_str] = annopred_betas1
                anno_post2[chrom_str] = annopred_betas2
        init_betas = {'anno_post1': anno_post1, 'anno_post2': anno_post2}
        f = gzip.open(init_betas_path, 'wb')
        cPickle.dump(init_betas, f, protocol=2)
        f.close()
        print 'LD information is now pickled at %s' % init_betas_path
    else:
        print 'Loading initial values for mcmc from file: %s' % init_betas_path
        f = gzip.open(init_betas_path, 'r')
        init_betas = cPickle.load(f)
        f.close()
    #### initial values ####
    print 'Preparing initial values for MCMC'
    beta1_current = init_betas['anno_post1']
    beta2_current = init_betas['anno_post2']
    avg_betas1 = {}
    avg_betas2 = {}
    avg_PV = sp.zeros(4)
    for chrom_str in chromosomes_list:
        if chrom_str in chr_list:
            avg_betas1[chrom_str] = sp.zeros(len(chrom_betas1[chrom_str]))
            avg_betas2[chrom_str] = sp.zeros(len(chrom_betas2[chrom_str]))

#    Pi = sp.random.dirichlet((alpha,alpha,alpha,alpha),1).flatten()
    print 'Initial PV: (' + str(Pi[0]) + ', ' + str(Pi[1]) + ', ' + str(
        Pi[2]) + ', ' + str(Pi[3]) + ')'
    sp.savetxt('%s_Initial_PV' % (out_file_prefix) + '.txt', Pi)
    pb = 0
    pbar = ProgressBar(widgets=[Percentage(), ' ',
                                Bar(), " ",
                                Timer()],
                       maxval=num_iter * 22).start()
    for k in range(num_iter):  #Big iteration
        A1 = 0
        A2 = 0
        A3 = 0
        A4 = 0
        for chrom_str in chromosomes_list:
            if chrom_str in chr_list:
                n_snps_chrom = len(chrom_snpids[chrom_str])
                posterior_betas = post_betas.bi_mcmc_all_chr(
                    chrom_betas1[chrom_str],
                    chrom_betas2[chrom_str],
                    Pi=Pi,
                    pr_sig1=pr_sig1[chrom_str],
                    pr_sig2=pr_sig2[chrom_str],
                    start_betas1=beta1_current[chrom_str],
                    start_betas2=beta2_current[chrom_str],
                    h2_D1=gw_h2_ld_score_est1 *
                    (n_snps_chrom / float(num_snps1)),
                    n1=n1,
                    h2_D2=gw_h2_ld_score_est2 *
                    (n_snps_chrom / float(num_snps2)),
                    n2=n2,
                    ld_radius=ld_radius,
                    zj_p=zero_jump_prob,
                    ld_dict1=chrom_ld_dict[chrom_str],
                    ld_dict2=chrom_ld_dict[chrom_str])
                A1 += posterior_betas['A1']
                A2 += posterior_betas['A2']
                A3 += posterior_betas['A3']
                A4 += posterior_betas['A4']
                beta1_current[chrom_str] = posterior_betas['proposed_betas1']
                beta2_current[chrom_str] = posterior_betas['proposed_betas2']
                if k >= burn_in:
                    avg_betas1[chrom_str] += posterior_betas[
                        'curr_post_means1']  #Averaging over the posterior means instead of samples.
                    avg_betas2[chrom_str] += posterior_betas[
                        'curr_post_means2']
                pb = pb + 1
                pbar.update(pb)
        Pi = sp.random.dirichlet(
            (alpha[0] + A1, alpha[1] + A2, alpha[2] + A3, alpha[3] + A4),
            1).flatten()
        if k >= burn_in:
            avg_PV += Pi
    pbar.finish()

    ## prs and auc ##
    avg_PV = avg_PV / float(num_iter - burn_in)
    print 'Initial PV: (' + str(avg_PV[0]) + ', ' + str(
        avg_PV[1]) + ', ' + str(avg_PV[2]) + ', ' + str(avg_PV[3]) + ')'
    sp.savetxt('%s_Avg_PV' % (out_file_prefix) + '.txt', avg_PV)

    for chrom_str in chromosomes_list:
        if chrom_str in chr_list:
            avg_betas1[chrom_str] = avg_betas1[chrom_str] / float(num_iter -
                                                                  burn_in)
            avg_betas2[chrom_str] = avg_betas2[chrom_str] / float(num_iter -
                                                                  burn_in)
            if has_phenotypes1:
                prs_chr_D1 = sp.dot(avg_betas1[chrom_str],
                                    chrom_snps[chrom_str])
                prs_D1 += prs_chr_D1
            if has_phenotypes2:
                prs_chr_D2 = sp.dot(avg_betas2[chrom_str],
                                    chrom_snps[chrom_str])
                prs_D2 += prs_chr_D2


############ PleioPred results #############
    corr_inf1 = sp.corrcoef(y1, prs_D1)[0, 1]
    r2_inf1 = corr_inf1**2
    #results_dict[p_str]['r2_pd']=r2_inf
    print 'D1: the R2 prediction accuracy (observed scale) of PleioPred was: %0.4f (%0.6f)' % (
        r2_inf1, ((1 - r2_inf1)**2) / num_individs1)
    out1.append(
        'D1: the R2 prediction accuracy (observed scale) of PleioPred was: ' +
        str(r2_inf1) + ' (' + str(((1 - r2_inf1)**2) / num_individs1) + ')\n')

    if corr_inf1 < 0:
        prs_D1 = -1 * prs_D1
    auc1 = pred_accuracy(y1, prs_D1)
    print 'D1: PleioPred AUC for the whole genome was: %0.4f' % auc1
    out1.append('D1: PleioPred AUC for the whole genome was: ' + str(auc1) +
                '\n')
    out1.append('D1: PleioPred COR for the whole genome was: ' +
                str(corr_inf1) + '\n')

    sp.savetxt('%s_y_' % (out_file_prefix) + '_D1.txt', y1)
    sp.savetxt('%s_prs' % (out_file_prefix) + '_PleioPred_D1.txt', prs_D1)

    #Now calibration
    ff_inf = open('%s_auc_' % (out_file_prefix) + '_PleioPred_D1.txt', "w")
    ff_inf.writelines(out1)
    ff_inf.close()

    corr_inf2 = sp.corrcoef(y2, prs_D2)[0, 1]
    r2_inf2 = corr_inf2**2
    #results_dict[p_str]['r2_pd']=r2_inf
    print 'D2: the R2 prediction accuracy (observed scale) of PleioPred was: %0.4f (%0.6f)' % (
        r2_inf2, ((1 - r2_inf2)**2) / num_individs2)
    out2.append(
        'D2: the R2 prediction accuracy (observed scale) of PleioPred was: ' +
        str(r2_inf2) + ' (' + str(((1 - r2_inf2)**2) / num_individs2) + ')\n')

    if corr_inf2 < 0:
        prs_D2 = -1 * prs_D2
    auc2 = pred_accuracy(y2, prs_D2)
    print 'D2: PleioPred AUC for the whole genome was: %0.4f' % auc2
    out2.append('D2: PleioPred AUC for the whole genome was: ' + str(auc2) +
                '\n')
    out2.append('D2: PleioPred COR for the whole genome was: ' +
                str(corr_inf2) + '\n')

    sp.savetxt('%s_y_' % (out_file_prefix) + '_D2.txt', y2)
    sp.savetxt('%s_prs' % (out_file_prefix) + '_PleioPred_D2.txt', prs_D2)

    #Now calibration
    ff_inf = open('%s_auc_' % (out_file_prefix) + '_PleioPred_D2.txt', "w")
    ff_inf.writelines(out2)
    ff_inf.close()

    f = gzip.open('%s_betas' % (out_file_prefix) + '_PleioPred_D1.pickled.gz',
                  'wb')
    cPickle.dump(avg_betas1, f, protocol=2)
    f.close()

    f = gzip.open('%s_betas' % (out_file_prefix) + '_PleioPred_D2.pickled.gz',
                  'wb')
    cPickle.dump(avg_betas2, f, protocol=2)
    f.close()
import scipy
from SloppyCell.ReactionNetworks import *
import os
from Nets import *

net.set_var_optimizable('Iis', False)
net.set_var_optimizable('Ils', False)

sens_traj = Dynamics.integrate_sensitivity(net,
                                           int_times,
                                           rtol=1e-9,
                                           fill_traj=True)

data_ids = [id for id in net.species.keys() if not net.get_var_constant(id)]

h, h_d = PerfectData.hessian_log_params(sens_traj,
                                        fixed_sf=True,
                                        return_dict=True,
                                        data_ids=data_ids)

scipy.savetxt('hessian.dat', h)
Example #58
0
    chi, shiftuse, frac = HM(1000, lref, l, shift0)

    print 'chi2,shiftuse,frac'
    print chi, shiftuse, frac

    shiftout.append(shiftuse)
    s.wv -= shiftuse

    trim = int(abs(shiftuse / (s.wv[1] - s.wv[0]))) + 1
    #    if trim > trimmax: trimmax = trim
    #    print 'trim',trimmax

    y1, z1 = s.interp(S[0].wv[trim:-trim])

    plt.plot(S[0].wv[trim:-trim], y1, 'k')
    plt.plot(S[0].wv[trim:-trim], z1, 'r')
    plt.show()

    xout.append(S[0].wv[trim:-trim])
    yout.append(y1)
    zout.append(z1)

xout.append(S[0].wv)
yout.append(S[0].f)
zout.append(S[0].ef)

xref, yref, zref = tidy(xout, yout, zout)

sp.savetxt(sys.argv[3], sp.c_[xref, yref, zref])
sp.savetxt('ref_shifts.dat', sp.c_[shiftout])
Example #59
0
        energy.append(nd.power.energy * 1000.0)
        loss.append(nd.n_received_failed_loss)
        total_energy += nd.power.energy_consumption * 1000.0

    c_power.append(consume[n - 1])  # coordinator

    n_power.append((
        n,
        consume[n - 1],
        total_energy,  # sum of other node consumption
        coordinator_node.n_received,
        coordinator_node.n_received_failed_loss))

    sp.savetxt(get_path(folder, "energy-%s.csv" % n),
               list(zip(range(1, n + 1), energy)),
               delimiter="\t",
               fmt="%s",
               header="Nodes\tEnergy (mJ)",
               comments='')

    sp.savetxt(get_path(folder, "stats-%s.csv" % n),
               message_stats,
               delimiter=",",
               fmt="%8s",
               comments='',
               header="Node,transmit,transmit_failed_power,"
               "received,received_failed_power,"
               "received_failed_loss")

    # plotting results
    #print len(snr_base)
    lqi_base = 6 * np.array(snr_base) + 538
AtomName1 = 'C'
ResidueID1 = 1
AtomName2 = 'N'
ResidueID2 = 23
path = '/Users/tud51931/projects/MSM/msm/ff03ERR-hybridkcenter/RMSDCluster4.0'
Distances = []
ProjectInfo = Serializer.LoadFromHDF('%s/ProjectInfo.h5' % path)
LongestTrajLength = max(ProjectInfo['TrajLengths'])
os.chdir(path)
if os.path.exists('EndtoEndDistances.dat'):
    print "EndtoEndDistances.dat exists!"
    sys.exit()
print 'Calculating the eeDistance of each trajectory......'
for i in range(ProjectInfo['NumTrajs']):
    trajfile = ProjectInfo['TrajFilePath'] + ProjectInfo[
        'TrajFileBaseName'] + '%d' % i + ProjectInfo['TrajFileType']
    print '%d in %d Trajectories' % (i, ProjectInfo['NumTrajs']), trajfile
    d = calculatedistance(AtomName1, ResidueID1, AtomName2, ResidueID2,
                          trajfile, LongestTrajLength)
    Distances.append(d)
print "Save data to ./EndtoEndDistance.dat"
savetxt('EndtoEndDistances.dat', Distances)
print "Done."

#distance = loadtxt('EndtoEndDistances.dat')
#print 'distance',distance
#distancemasked = np.ma.array(distance,mask=(distance==-1))
#print 'masked distance', distancemasked
#savetxt('EndtoEndDistances.Masked.dat',distancemasked)