コード例 #1
0
ファイル: Process.py プロジェクト: radut/aja
def processTexFileForCPIDs(textFileName):
    textFile = open(textFileName, 'r')
    i = 1
    for line in textFile:
        #print line
        if line.find("CPID $") != -1:
           cpid = "CPID $" + line.split("CPID $")[1].strip()
           print cpid
           cpidMap[i] = cpid
           i = i + 1
        if line.find("Introduction") == 0:
           toCSV(cpidMap, cpidOutFileName)
           if os.path.isfile(cpidOutFileName):
              transposer.transpose(i=cpidOutFileName, o="DiagnosticsValidationCPIDTransposed"+'.csv')
              print "Found "+cpidOutFileName+" transposing it for easy analysing data"
           return
コード例 #2
0
ファイル: test.py プロジェクト: mqondisi/transposer
    def test_column_to_row_with_write(self):
        test_in_file = '/tmp/in.csv'
        test_out_file = '/tmp/rows.csv'
        
        test_in_data = [
            ['FirstName', 'LastName', 'Age'],
            ['Frank', 'Jones', '42'],
            ['Samantha', 'Clemons', '24'],
            ['Keith', 'Hamilton', '35'],
        ]

        # write test in data
        f = open(test_in_file, 'w')
        for l in test_in_data:
            f.write(','.join(l) + '\n')
        f.close()
        
        out_data = transposer.transpose(i=test_in_file, o=test_out_file, d=',')
        
        # read in output
        f = open(test_out_file, 'r')
        lines = f.readlines()
        f.close()

        self.assertEqual(['FirstName', 'Frank', 'Samantha', 'Keith'], 
                         lines[0].strip().split(','))
        self.assertEqual('LastName,Jones,Clemons,Hamilton', lines[1].strip())
        self.assertEqual('Age,42,24,35', lines[2].strip())

        for i, x in enumerate(out_data):
            self.assertEqual(out_data[i], lines[i].strip())
コード例 #3
0
ファイル: test.py プロジェクト: mqondisi/transposer
    def test_row_to_column_no_write(self):
        original_in_file = '/tmp/in.csv'
        test_in_file = '/tmp/rows.csv'
        test_out_file = '/tmp/cols.csv'
        
        out_data = transposer.transpose(i=test_in_file, o=None, d=',')
        
        self.assertFalse(os.path.exists(test_out_file))

        # read in original input
        g = open(original_in_file, 'r')
        original_lines = g.readlines()
        g.close()
        
        for i, x in enumerate(out_data):
            self.assertEqual(out_data[i].strip(), original_lines[i].strip())
コード例 #4
0
ファイル: LD_prune.py プロジェクト: pmonnahan/ArenosaPloidy
                if line_idx % 100000 == 0:
                    print(line_idx)

        print 'Finished vcf: ', vcf
        print 'mean r2: ', numpy.mean(r2_list)
        print 'Screened sites for vcf: ', vcf_num_screened
        print 'Tot sites for vcf: ', vcf_sites

    # This section transposes the temporary files created during this replicate and adds headers, so that they are formatted according to structure
    structtempfile.close()
    subtempfile.close()

    # Transposes file
    jj = transposer.transpose(
        i=args.v + 'LD_Pruned/' + args.o + ".rep" + str(rep) +
        ".LD_Pruned.TransposedStruct.txt",
        d="\t",
    )
    kk = transposer.transpose(
        i=args.v + 'LD_Pruned/' + args.o + ".rep" + str(rep) +
        ".LD_Pruned.TransposedStructSubSample.txt",
        d="\t",
    )

    # Write header names for each marker
    for marker in markernames:
        structfile.write("%s\t" % marker)
        if args.s == 'true':
            subfile.write("%s\t" % marker)
    structfile.write("\n")
    if args.s == 'true':
コード例 #5
0
#pool 1
g = open('randomAmanitaP1.txt', 'w')
for x in times:
    p1list = []
    for num in number_coord:
        v = random.choice(pool1)  #of 100 from p1
        p1list.append(v)

    for thing in p1list:
        g.write(thing + ',')
    g.write('\n')
g.close()

# pool 2
h = open('randomAmanitaP2.txt', 'w')
for x in times:
    p2list = []
    for num in number_coord:
        v = random.choice(pool2)  #of 100 from p1
        p2list.append(v)

    for thing in p2list:
        h.write(thing + ',')
    h.write('\n')
h.close()

#transpose the tab delimited file
transposer.transpose(i='randomAmanitaP1.txt', o='outputAmanitaP1.csv')
transposer.transpose(i='randomAmanitaP2.txt', o='outputAmanitaP2.csv')
コード例 #6
0
ファイル: ParseZygoteLogs.py プロジェクト: radut/aja
        #Patern: BOOT-TIME| DomainServiceController : com.gm.server.erasedataservice.GMEraseDataService : phase : 4 : took |14
        domainServiceInfo = line.split("DomainServiceController")
        if len(domainServiceInfo) > 1:
           ds = domainServiceInfo[1]
           name = ds.split(':')[1].strip()
           time = ds.split('|')[-1].strip()
           processTimeMap[name] = time
           processOrder[name] = i
           #print name, time
        i = i + 1
        if line.find("HMIReady") != -1: #work around for HMIReady which is out of pattern
           processTimeMap["HMIReady"] = line.split('|')[-1].strip()
           processOrder["HMIReady"] = 99
    #print processTimeMap
    if processOrderInsertFlag == True: #To insert the number for ease of sorting in excel
       toCSV(processOrder, parsedFile, "ProcessOrder")
       processOrderInsertFlag = False
    toCSV(processTimeMap, parsedFile, logFileName)
    logFile.close()

print "Processing the folder <before>..."
processFile(beforeDir)
print "Processing the folder <after>..."
processFile(afterDir)

if os.path.isfile(parsedFile+".csv"):
  transposer.transpose(i=parsedFile+'.csv', o=parsedFile+"Transposed"+'.csv')
  print "Found "+parsedFile+".csv, transposing it for easy analysing data"


コード例 #7
0
ファイル: console_script.py プロジェクト: mqondisi/transposer
def main():
    args = parse_args()
    transposer.transpose(i=args.file_in, o=args.file_out, d=args.delimiter)
コード例 #8
0
def transpose():
	files = sort()
	for file in files:
		if file.endswith('.csv'):
			print file
			transposer.transpose(i=file, o=file)