def test_run(self): c_1 = ['AAAATCA', 'AATCAGG', 'TTTTTTT'] reads_dict_1 = {'AAAAT':[[0,0,0.5,1]],'AAATC':[[0,1,0.5,1]],'AATCA':[[0,2,0.5,1],[1,0,0.5,1]],'ATCAG':[[1,1,0.5,1]],'TTTTT':[[2,0,0.5,1]]} new_contigs_test_1, new_reads_test_1 = mc.run_merge(c_1,reads_dict_1,3) new_contigs_truth_1 = ['AAAATCAGG','TTTTTTT'] new_reads_dict_truth_1 = {'ATCAG': [[0,3,0.5,1]], 'AATCA': [[0,2,0.5,1]], 'AAAAT': [[0,0,0.5,1]], 'TTTTT': [[1,0,0.5,1]], 'AAATC': [[0,1,0.5,1]]} self.assertEqual(new_reads_dict_truth_1, new_reads_test_1) self.assertEqual(new_contigs_truth_1,new_contigs_test_1)
def test_run(self): c_1 = ['AAAATCA', 'AATCAGG', 'TTTTTTT'] reads_dict_1 = { 'AAAAT': [[0, 0, 0.5, 1]], 'AAATC': [[0, 1, 0.5, 1]], 'AATCA': [[0, 2, 0.5, 1], [1, 0, 0.5, 1]], 'ATCAG': [[1, 1, 0.5, 1]], 'TTTTT': [[2, 0, 0.5, 1]] } new_contigs_test_1, new_reads_test_1 = mc.run_merge( c_1, reads_dict_1, 3) new_contigs_truth_1 = ['AAAATCAGG', 'TTTTTTT'] new_reads_dict_truth_1 = { 'ATCAG': [[0, 3, 0.5, 1]], 'AATCA': [[0, 2, 0.5, 1]], 'AAAAT': [[0, 0, 0.5, 1]], 'TTTTT': [[1, 0, 0.5, 1]], 'AAATC': [[0, 1, 0.5, 1]] } self.assertEqual(new_reads_dict_truth_1, new_reads_test_1) self.assertEqual(new_contigs_truth_1, new_contigs_test_1)
def _main(): # Open files and process reads dictionary f = open(sys.argv[1], 'r') reads_dict = init._process(f) # Get contigs from first consensus sequence contigs = cs.run_consensus(reads_dict) contig_file = open(sys.argv[2] + '/contig.txt', 'w+') ll_file = open(sys.argv[2] + '/likelihood.txt', 'w+') # Set initial parameters likelihood = 0 likelihood_new = 0 #likelihood_list = [] for i in range(NUM_ITERS): '''FILE WRITES''' # Contigs file write data contig_file.write('%s\tstart\t' % (str(i))) for c in contigs: contig_file.write('%s\t' % (str(c))) contig_file.write('\n') contig_file.flush() # Likelihood file write data ll_file.write( '%s\t%s\t%s\n' % (str(i), str(likelihood), str(len(contigs)))), ll_file.flush() #likelihood_list.append(float(likelihood)) # Reads file write data reads_file = open(sys.argv[2] + '/reads_trial_' + str(i) + '.txt', 'w') for r in reads_dict: for l in reads_dict[r]: reads_file.write( str(l[3]) + ',' + str(l[0]) + ',' + str(l[1]) + str(',') + str(l[3]) + '\n') reads_file.close() '''COMPUTATION OF ALGORITHM''' # Update likelihood likelihood = likelihood_new # Map reads reads_dict = rm.run(reads_dict, contigs) # Run Consensus Sequence contigs = cs.run_consensus(reads_dict) # Print data to file contig_file.write('%s\tmerge\t' % (str(i))) for c in contigs: contig_file.write('%s\t' % (str(c))) contig_file.write('\n') # Run merge contigs, reads_dict = mc.run_merge( contigs, reads_dict ) # how do we know if a merge has happened..do we need to know? # Get new likelihood likelihood_new = ll._likelihood(reads_dict, contigs) '''FILE WRITES''' # Reads file write data reads_file = open(sys.argv[2] + '/reads_trial_' + str(i + 1) + '.txt', 'w') for r in reads_dict: for l in reads_dict[r]: reads_file.write( str(l[3]) + ',' + str(l[0]) + ',' + str(l[1]) + str(',') + str(l[3]) + '\n') reads_file.close() # Print data to file for c in contigs: contig_file.write('1000\tend\t%s\n' % (str(c))) ll_file.write( '%s\t%s\t%s\n' % (str(NUM_ITERS), str(likelihood), str(len(contigs)))), ll_file.flush()
def _main(): # Open files and process reads dictionary f = open(sys.argv[1], 'r') reads_dict = init._process(f) # Get contigs from first consensus sequence contigs = cs.run_consensus(reads_dict) contig_file = open(sys.argv[2] + '/contig.txt', 'w+') ll_file = open(sys.argv[2] + '/likelihood.txt', 'w+') # Set initial parameters likelihood = 0 likelihood_new = 0 #likelihood_list = [] for i in range(NUM_ITERS): '''FILE WRITES''' # Contigs file write data contig_file.write('%s\tstart\t' %(str(i))) for c in contigs: contig_file.write('%s\t' %(str(c))) contig_file.write('\n') contig_file.flush() # Likelihood file write data ll_file.write('%s\t%s\t%s\n' %(str(i), str(likelihood), str(len(contigs)))), ll_file.flush() #likelihood_list.append(float(likelihood)) # Reads file write data reads_file = open(sys.argv[2] + '/reads_trial_' + str(i) + '.txt','w') for r in reads_dict: for l in reads_dict[r]: reads_file.write(str(l[3])+','+str(l[0])+','+str(l[1])+str(',')+str(l[3])+'\n') reads_file.close() '''COMPUTATION OF ALGORITHM''' # Update likelihood likelihood = likelihood_new # Map reads reads_dict = rm.run(reads_dict, contigs) # Run Consensus Sequence contigs = cs.run_consensus(reads_dict) # Print data to file contig_file.write('%s\tmerge\t' %(str(i))) for c in contigs: contig_file.write('%s\t' %(str(c))) contig_file.write('\n') # Run merge contigs, reads_dict = mc.run_merge(contigs,reads_dict) # how do we know if a merge has happened..do we need to know? # Get new likelihood likelihood_new = ll._likelihood(reads_dict,contigs) '''FILE WRITES''' # Reads file write data reads_file = open(sys.argv[2] + '/reads_trial_' + str(i+1) + '.txt','w') for r in reads_dict: for l in reads_dict[r]: reads_file.write(str(l[3])+','+str(l[0])+','+str(l[1])+str(',')+str(l[3])+'\n') reads_file.close() # Print data to file for c in contigs: contig_file.write('1000\tend\t%s\n' %(str(c))) ll_file.write('%s\t%s\t%s\n' %(str(NUM_ITERS), str(likelihood), str(len(contigs)))), ll_file.flush()