def count_alignments(bam_filename): import subprocess from Betsy import module_utils as mlib samtools = mlib.findbin("samtools") x = [samtools, "view", bam_filename] p = subprocess.Popen(x, bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True) r = p.stdout alignments = 0 aligned_reads = {} for line in r: # M03807:17:000000000-AHGYH:1:2108:11122:14861 99 1 14172 0 12S128... # ST-J00106:110:H5NY5BBXX:4:1101:1702:1209 2:N:0:NTCACG 141 * x = line.split("\t") assert len(x) >= 11, "SAM format" qname, flag = x[:2] flag = int(flag) # 2 mapped in proper pair # 4 query is unmapped # 8 mate is unmapped #is_aligned = flag & 0x02 is_aligned = not (flag & 0x04) if is_aligned: alignments += 1 aligned_reads[qname] = 1 aligned_reads = len(aligned_reads) return aligned_reads, alignments
def run(self, network, antecedents, out_attributes, user_options, num_cores, out_path): import os from genomicode import parallel from genomicode import alignlib from genomicode import filelib from Betsy import module_utils as mlib bam_node, ref_node = antecedents bam_filenames = mlib.find_bam_files(bam_node.identifier) assert bam_filenames, "No .bam files." ref = alignlib.create_reference_genome(ref_node.identifier) filelib.safe_mkdir(out_path) metadata = {} metadata["tool"] = "samtools %s" % alignlib.get_samtools_version() # list of (in_filename, err_filename, out_filename) jobs = [] for in_filename in bam_filenames: p, f = os.path.split(in_filename) sample, ext = os.path.splitext(f) err_filename = os.path.join(out_path, "%s.log" % sample) out_filename = os.path.join(out_path, "%s.pileup" % sample) x = in_filename, err_filename, out_filename jobs.append(x) # samtools mpileup -f [reference sequence] [BAM file(s)] # > myData.mpileup samtools = mlib.findbin("samtools") sq = mlib.sq commands = [] for x in jobs: in_filename, err_filename, out_filename = x x = [ sq(samtools), "mpileup", "-f", sq(ref.fasta_file_full), ] x.append(sq(in_filename)) x = " ".join(map(str, x)) x = "%s 2> %s 1> %s" % (x, err_filename, out_filename) commands.append(x) parallel.pshell(commands, max_procs=num_cores) metadata["num_cores"] = num_cores metadata["commands"] = commands x = [x[-1] for x in jobs] filelib.assert_exists_nz_many(x) return metadata
def run(self, network, in_data, out_attributes, user_options, num_cores, out_path): import os from genomicode import filelib from genomicode import parallel from Betsy import module_utils as mlib sam_filenames = mlib.find_sam_files(in_data.identifier) assert sam_filenames, "No .sam files." filelib.safe_mkdir(out_path) metadata = {} samtools = mlib.findbin("samtools") jobs = [] # list of (sam_filename, bam_filename) for sam_filename in sam_filenames: p, f = os.path.split(sam_filename) assert f.endswith(".sam") f = f.replace(".sam", ".bam") bam_filename = os.path.join(out_path, f) x = sam_filename, bam_filename jobs.append(x) # Make a list of samtools commands. sq = parallel.quote commands = [] for x in jobs: sam_filename, bam_filename = x # samtools view -bS -o <bam_filename> <sam_filename> x = [ sq(samtools), "view", "-bS", "-o", sq(bam_filename), sq(sam_filename), ] x = " ".join(x) commands.append(x) metadata["commands"] = commands metadata["num_cores"] = num_cores parallel.pshell(commands, max_procs=num_cores) # Make sure the analysis completed successfully. x = [x[-1] for x in jobs] filelib.assert_exists_nz_many(x) return metadata
def run(self, network, in_data, out_attributes, user_options, num_cores, out_path): import os from genomicode import filelib from genomicode import parallel from Betsy import module_utils as mlib filenames = mlib.find_fastq_files(in_data.identifier) assert filenames, "FASTQ files not found: %s" % in_data.identifier filelib.safe_mkdir(out_path) metadata = {} fastqc = mlib.findbin("fastqc") fastqc_q = parallel.quote(fastqc) commands = [ "%s --outdir=%s --extract %s" % (fastqc_q, out_path, x) for x in filenames ] metadata["commands"] = commands metadata["num_cores"] = num_cores #commands = ["ls > %s" % x for x in filenames] parallel.pshell(commands, max_procs=num_cores) # Fastqc generates files: # <file>_fastqc/ # <file>_fastqc.zip # The contents of the .zip file are identical to the directories. # If this happens, then delete the .zip files because they are # redundant. files = os.listdir(out_path) filenames = [os.path.join(out_path, x) for x in files] for filename in filenames: zip_filename = "%s.zip" % filename if os.path.exists(zip_filename): os.unlink(zip_filename)
def run( self, network, in_data, out_attributes, user_options, num_cores, out_path): import os import shutil from genomicode import parallel from genomicode import filelib from genomicode import alignlib from Betsy import module_utils as mlib bam_filenames = mlib.find_bam_files(in_data.identifier) filelib.safe_mkdir(out_path) metadata = {} metadata["tool"] = "bam2fastx (unknown version)" # Somehow bam2fastx doesn't work if there are spaces in the # filename. Make a temporary filename with no spaces, and # then rename it later. # Actually, may not be bam2fastx's fault. jobs = [] for i, bam_filename in enumerate(bam_filenames): p, f, e = mlib.splitpath(bam_filename) #bai_filename = alignlib.find_bai_file(bam_filename) #assert bai_filename, "Missing index for: %s" % bam_filename #temp_bam_filename = "%d.bam" % i #temp_bai_filename = "%d.bam.bai" % i #temp_fa_filename = "%d.fa" % i fa_filename = os.path.join(out_path, "%s.fa" % f) x = filelib.GenericObject( bam_filename=bam_filename, #bai_filename=bai_filename, #temp_bam_filename=temp_bam_filename, #temp_bai_filename=temp_bai_filename, #temp_fa_filename=temp_fa_filename, fa_filename=fa_filename) jobs.append(x) bam2fastx = mlib.findbin("bam2fastx") # Link all the bam files. #for j in jobs: # assert not os.path.exists(j.temp_bam_filename) # #assert not os.path.exists(j.temp_bai_filename) # os.symlink(j.bam_filename, j.temp_bam_filename) # #os.symlink(j.bai_filename, j.temp_bai_filename) commands = [] for j in jobs: # bam2fastx -A --fasta -o rqc14.fa rqc11.bam x = [ mlib.sq(bam2fastx), "-A", "--fasta", #"-o", mlib.sq(j.temp_fa_filename), #mlib.sq(j.temp_bam_filename), "-o", mlib.sq(j.fa_filename), mlib.sq(j.bam_filename), ] x = " ".join(x) commands.append(x) metadata["commands"] = commands metadata["num_cores"] = num_cores parallel.pshell(commands, max_procs=num_cores) #for j in jobs: # # Move the temporary files to the final location. # shutil.move(j.temp_fa_filename, j.fa_filename) # # Remove the link to the BAM file. # os.unlink(j.temp_bam_filename) x = [j.fa_filename for x in jobs] filelib.assert_exists_nz_many(x) return metadata
def run(self, network, antecedents, out_attributes, user_options, num_cores, outfile): from genomicode import parselib from genomicode import parallel from Betsy import module_utils as mlib MAX_CORES = 4 # I/O intensive. fastq_node, sample_node, bam_node = antecedents bam_filenames = mlib.find_bam_files(bam_node.identifier) sample2fastq = mlib.find_merged_fastq_files(sample_node.identifier, fastq_node.identifier, as_dict=True) metadata = {} jobs = [] # list of (sample, bam_file, fastq_file) for filename in bam_filenames: path, sample, ext = mlib.splitpath(filename) assert sample in sample2fastq, "Missing fastq: %s" % sample fastq1, fastq2 = sample2fastq[sample] x = sample, filename, fastq1 jobs.append(x) funcalls = [] for x in jobs: sample, bam_filename, fastq_filename = x # Count the number of reads. x1 = count_reads, (fastq_filename, ), {} # Count the number of alignments. x2 = count_alignments, (bam_filename, ), {} funcalls.append(x1) funcalls.append(x2) assert len(funcalls) == len(jobs) * 2 nc = min(num_cores, MAX_CORES) results = parallel.pyfun(funcalls, num_procs=nc) metadata["num_cores"] = nc # list of (sample, aligns, aligned_reads, total_reads, perc_aligned). results2 = [] for i, x in enumerate(jobs): sample, bam_filename, fastq_filename = x x1 = results[i * 2] x2 = results[i * 2 + 1] total_reads = x1 aligned_reads, alignments = x2 perc_aligned = float(aligned_reads) / total_reads x = sample, alignments, aligned_reads, total_reads, perc_aligned results2.append(x) results = results2 # sort by sample name results.sort() # Make table where the rows are the samples and the columns # are the statistics. table = [] header = ("Sample", "Alignments", "Aligned Reads", "Total Reads", "Perc Aligned") table.append(header) for x in results: sample, alignments, aligned_reads, total_reads, perc_aligned = x x1 = parselib.pretty_int(alignments) x2 = parselib.pretty_int(aligned_reads) x3 = parselib.pretty_int(total_reads) x4 = "%.2f%%" % (perc_aligned * 100) x = sample, x1, x2, x3, x4 assert len(x) == len(header) table.append(x) # Write out the table as text file. TXT_FILE = "summary.txt" handle = open(TXT_FILE, 'w') for x in table: print >> handle, "\t".join(x) handle.close() txt2xls = mlib.findbin("txt2xls", quote=True) parallel.sshell("%s -b %s > %s" % (txt2xls, TXT_FILE, outfile)) return metadata
def run(self, network, antecedents, out_attributes, user_options, num_cores, out_path): import os from genomicode import filelib from genomicode import parallel from genomicode import alignlib from Betsy import module_utils as mlib bam_node, nc_node, ref_node = antecedents bam_filenames = mlib.find_bam_files(bam_node.identifier) assert bam_filenames, "No .bam files." nc_match = mlib.read_normal_cancer_file(nc_node.identifier) ref = alignlib.create_reference_genome(ref_node.identifier) filelib.safe_mkdir(out_path) metadata = {} metadata["tool"] = "MuSE %s" % alignlib.get_muse_version() wgs_or_wes = mlib.get_user_option(user_options, "wgs_or_wes", not_empty=True, allowed_values=["wgs", "wes"]) dbsnp_file = mlib.get_user_option(user_options, "muse_dbsnp_vcf", not_empty=True, check_file=True) # Make sure dbsnp_file is compressed and indexed. assert dbsnp_file.endswith(".vcf.gz"), \ "muse_dbsnp_vcf must be bgzip compressed." x = "%s.tbi" % dbsnp_file assert filelib.exists_nz(x), "muse_dbsnp_vcf must be tabix indexed." # sample -> bam filename sample2bamfile = mlib.root2filename(bam_filenames) # Make sure files exist for all the samples. mlib.assert_normal_cancer_samples(nc_match, sample2bamfile) # list of (normal_sample, cancer_sample, normal_bamfile, tumor_bamfile, # muse_call_stem, muse_call_file, raw_vcf_outfile, vcf_outfile, # logfile1, logfile2) opj = os.path.join jobs = [] for (normal_sample, cancer_sample) in nc_match: normal_bamfile = sample2bamfile[normal_sample] cancer_bamfile = sample2bamfile[cancer_sample] path, sample, ext = mlib.splitpath(cancer_bamfile) muse_call_stem = opj(out_path, "%s.call" % cancer_sample) muse_call_file = "%s.MuSE.txt" % muse_call_stem raw_vcf_outfile = opj(out_path, "%s.vcf.raw" % cancer_sample) vcf_outfile = opj(out_path, "%s.vcf" % cancer_sample) log_outfile1 = opj(out_path, "%s.call.log" % cancer_sample) log_outfile2 = opj(out_path, "%s.sump.log" % cancer_sample) x = normal_sample, cancer_sample, normal_bamfile, cancer_bamfile, \ muse_call_stem, muse_call_file, raw_vcf_outfile, vcf_outfile, \ log_outfile1, log_outfile2 jobs.append(x) # Generate the commands. # MuSE call -O test11 -f genomes/Broad.hg19/Homo_sapiens_assembly19.fa\ # bam04/196B-MG.bam bam04/PIM001_G.bam # MuSE sump -I test11.MuSE.txt -E -O test12.vcf \ # -D MuSE/dbsnp_132_b37.leftAligned.vcf.gz MuSE = mlib.findbin("muse") sq = mlib.sq commands = [] for x in jobs: normal_sample, cancer_sample, normal_bamfile, cancer_bamfile, \ muse_call_stem, muse_call_file, raw_vcf_outfile, vcf_outfile, \ log_outfile1, log_outfile2 = x x = [ sq(MuSE), "call", "-O", muse_call_stem, "-f", sq(ref.fasta_file_full), cancer_bamfile, normal_bamfile, ] x = " ".join(x) x = "%s >& %s" % (x, log_outfile1) commands.append(x) assert len(commands) == len(jobs) # Not sure about RAM. nc = mlib.calc_max_procs_from_ram(10, upper_max=num_cores) parallel.pshell(commands, max_procs=nc) metadata["num_cores"] = nc metadata["commands"] = commands # Make sure the log files have no errors. The files should be # empty. log_files = [x[8] for x in jobs] filelib.assert_exists_z_many(log_files) # Make sure the call files are created and not empty. call_files = [x[5] for x in jobs] filelib.assert_exists_nz_many(call_files) # Run the "sump" step. commands = [] for x in jobs: normal_sample, cancer_sample, normal_bamfile, cancer_bamfile, \ muse_call_stem, muse_call_file, raw_vcf_outfile, vcf_outfile, \ log_outfile1, log_outfile2 = x x = [ sq(MuSE), "sump", "-I", sq(muse_call_file), ] assert wgs_or_wes in ["wgs", "wes"] if wgs_or_wes == "wgs": x += ["-G"] else: x += ["-E"] x += [ "-O", sq(raw_vcf_outfile), "-D", sq(dbsnp_file), ] x = " ".join(x) x = "%s >& %s" % (x, log_outfile2) commands.append(x) assert len(commands) == len(jobs) # Not sure about RAM. nc = mlib.calc_max_procs_from_ram(10, upper_max=num_cores) parallel.pshell(commands, max_procs=nc) metadata["commands"] = metadata["commands"] + commands # Make sure the log files have no errors. The files should be # empty. log_files = [x[9] for x in jobs] filelib.assert_exists_z_many(log_files) # Make sure the raw files are created and not empty. vcf_files = [x[6] for x in jobs] filelib.assert_exists_nz_many(vcf_files) # Fix the files. commands = [] # Should be python commands. for x in jobs: normal_sample, cancer_sample, normal_bamfile, cancer_bamfile, \ muse_call_stem, muse_call_file, raw_vcf_outfile, vcf_outfile, \ log_outfile1, log_outfile2 = x args = normal_sample, cancer_sample, raw_vcf_outfile, vcf_outfile x = alignlib.clean_muse_vcf, args, {} commands.append(x) parallel.pyfun(commands, num_procs=num_cores) # Delete the log_outfiles if empty. for x in jobs: normal_sample, cancer_sample, normal_bamfile, cancer_bamfile, \ muse_call_stem, muse_call_file, raw_vcf_outfile, vcf_outfile, \ log_outfile1, log_outfile2 = x if os.path.exists(log_outfile1): os.unlink(log_outfile1) if os.path.exists(log_outfile2): os.unlink(log_outfile2) # Make sure output VCF files exist. x = [x[7] for x in jobs] filelib.assert_exists_many(x) return metadata
def run(self, network, antecedents, out_attributes, user_options, num_cores, out_path): import os from genomicode import parallel from genomicode import filelib from genomicode import alignlib from genomicode import hashlib from Betsy import module_utils as mlib fastq_node, sample_node, orient_node, reference_node = antecedents fastq_files = mlib.find_merged_fastq_files(sample_node.identifier, fastq_node.identifier) ref = alignlib.create_reference_genome(reference_node.identifier) assert os.path.exists(ref.fasta_file_full) orient = mlib.read_orientation(orient_node.identifier) filelib.safe_mkdir(out_path) metadata = {} metadata["tool"] = "bowtie2 %s" % alignlib.get_bowtie2_version() # Bowtie2 doesn't handle files with spaces in them. Make # temporary files without spaces. # Make a list of the jobs to run. jobs = [] for i, x in enumerate(fastq_files): sample, pair1, pair2 = x bam_filename = os.path.join(out_path, "%s.bam" % sample) log_filename = os.path.join(out_path, "%s.log" % sample) sample_h = hashlib.hash_var(sample) temp_pair1 = "%d_%s_1.fa" % (i, sample_h) temp_pair2 = None if pair2: temp_pair2 = "%d_%s_2.fa" % (i, sample_h) j = filelib.GenericObject(sample=sample, pair1=pair1, pair2=pair2, temp_pair1=temp_pair1, temp_pair2=temp_pair2, bam_filename=bam_filename, log_filename=log_filename) jobs.append(j) for j in jobs: os.symlink(j.pair1, j.temp_pair1) if pair2: os.symlink(j.pair2, j.temp_pair2) # Generate bowtie2 commands for each of the files. attr2orient = { "single": None, "paired_fr": "fr", "paired_rf": "rf", "paired_ff": "ff", } orientation = attr2orient[orient.orientation] #x = sample_node.data.attributes["orientation"] #orientation = attr2orient[x] # Takes ~4 Gb per job. samtools = mlib.findbin("samtools") sq = parallel.quote commands = [] for j in jobs: #sample, pair1, pair2, bam_filename, log_filename = x nc = max(1, num_cores / len(jobs)) # bowtie2 -p 8 -x <genome> -1 <.fq> -2 <.fq> --fr # 2> test.log | samtools view -bS -o test.bam - x1 = alignlib.make_bowtie2_command(ref.fasta_file_full, j.temp_pair1, fastq_file2=j.temp_pair2, orientation=orientation, num_threads=nc) x2 = [ sq(samtools), "view", "-bS", "-o", sq(j.bam_filename), "-", ] x2 = " ".join(x2) x = "%s 2> %s | %s" % (x1, sq(j.log_filename), x2) #x = "%s >& %s" % (x, sq(log_filename)) commands.append(x) metadata["commands"] = commands parallel.pshell(commands, max_procs=num_cores) # Make sure the analysis completed successfully. x1 = [x.bam_filename for x in jobs] x2 = [x.log_filename for x in jobs] filelib.assert_exists_nz_many(x1 + x2) return metadata
def run( self, network, antecedents, out_attributes, user_options, num_cores, out_path): import os from genomicode import parallel from genomicode import filelib from genomicode import alignlib from Betsy import module_utils as mlib fastq_node, sai_node, orient_node, sample_node, reference_node = \ antecedents fastq_files = mlib.find_merged_fastq_files( sample_node.identifier, fastq_node.identifier) sai_path = sai_node.identifier assert filelib.dir_exists(sai_path) orient = mlib.read_orientation(orient_node.identifier) ref = alignlib.create_reference_genome(reference_node.identifier) filelib.safe_mkdir(out_path) metadata = {} metadata["tool"] = "bwa %s" % alignlib.get_bwa_version() # Technically, doesn't need the SampleGroupFile, since that's # already reflected in the sai data. But better, because the # sai data might not always be generated by BETSY. # Find the merged fastq files. # Find the sai files. sai_filenames = filelib.list_files_in_path( sai_path, endswith=".sai", case_insensitive=True) assert sai_filenames, "No .sai files." bwa = mlib.findbin("bwa") # bwa samse -f <output.sam> <reference.fa> <input.sai> <input.fq> # bwa sampe -f <output.sam> <reference.fa> <input_1.sai> <input_2.sai> # <input_1.fq> <input_2.fq> > # list of (pair1.fq, pair1.sai, pair2.fq, pair2.sai, output.sam) # all full paths jobs = [] for x in fastq_files: sample, pair1_fq, pair2_fq = x # The sai file should be in the format: # <sai_path>/<sample>.sai Single end read # <sai_path>/<sample>_1.sai Paired end read # <sai_path>/<sample>_2.sai Paired end read # Look for pair1_sai and pair2_sai. pair1_sai = pair2_sai = None for sai_filename in sai_filenames: p, s, e = mlib.splitpath(sai_filename) assert e == ".sai" if s == sample: assert not pair1_sai pair1_sai = sai_filename elif s == "%s_1" % (sample): assert not pair1_sai pair1_sai = sai_filename elif s == "%s_2" % (sample): assert not pair2_sai pair2_sai = sai_filename assert pair1_sai, "Missing .sai file: %s" % sample if pair2_fq: assert pair2_sai, "Missing .sai file 2: %s" % sample if pair2_sai: assert pair2_fq, "Missing .fq file 2: %s" % sample sam_filename = os.path.join(out_path, "%s.sam" % sample) log_filename = os.path.join(out_path, "%s.log" % sample) x = sample, pair1_fq, pair1_sai, pair2_fq, pair2_sai, \ sam_filename, log_filename jobs.append(x) orientation = orient.orientation #orientation = sample_node.data.attributes["orientation"] assert orientation in ["single", "paired_fr", "paired_rf"] # Make a list of bwa commands. sq = mlib.sq commands = [] for x in jobs: sample, pair1_fq, pair1_sai, pair2_fq, pair2_sai, \ sam_filename, log_filename = x if orientation == "single": assert not pair2_fq assert not pair2_sai samse = "samse" if orientation.startswith("paired"): samse = "sampe" x = [ sq(bwa), samse, "-f", sq(sam_filename), sq(ref.fasta_file_full), ] if orientation == "single": x += [ sq(pair1_sai), sq(pair1_fq), ] else: y = [ sq(pair1_sai), sq(pair2_sai), sq(pair1_fq), sq(pair2_fq), ] if orientation == "paired_rf": y = [ sq(pair2_sai), sq(pair1_sai), sq(pair2_fq), sq(pair1_fq), ] x += y x += [ ">&", sq(log_filename), ] x = " ".join(x) commands.append(x) metadata["commands"] = commands metadata["num_cores"] = num_cores parallel.pshell(commands, max_procs=num_cores) # Make sure the analysis completed successfully. x = [x[-2] for x in jobs] filelib.assert_exists_nz_many(x) return metadata
def run(self, network, antecedents, out_attributes, user_options, num_cores, out_path): import os from genomicode import parallel from genomicode import filelib from genomicode import genomelib from genomicode import config from Betsy import module_utils as mlib fasta_node, bam_node, sample_node, orient_node = antecedents fasta_data = mlib.find_merged_fastq_files(sample_node.identifier, fasta_node.identifier, find_fasta=True) bam_filenames = mlib.find_bam_files(bam_node.identifier) orient = mlib.read_orientation(orient_node.identifier) filelib.safe_mkdir(out_path) # TODO: Try to figure out version. metadata = {} metadata["tool"] = "RSeQC (unknown version)" pyrseqc = mlib.findbin("pyrseqc") gene_model = mlib.get_user_option(user_options, "gene_model", not_empty=True, allowed_values=["hg19"]) if gene_model == "hg19": gene_path = config.rseqc_hg19 else: raise AssertionError, "Unhandled: %s" % gene_model filelib.dir_exists(gene_path) gene_model_bed = os.path.join(gene_path, "RefSeq.bed12") housekeeping_model_bed = os.path.join(gene_path, "HouseKeepingGenes.bed") sample2fastadata = {} for x in fasta_data: sample, f1, f2 = x sample2fastadata[sample] = x is_paired = orient.orientation.startswith("paired") # Guess the read length. Read the first fasta. assert sample2fastadata x = sample2fastadata.keys()[0] filename = sample2fastadata[x][1] lengths = {} # length -> count for i, x in enumerate(genomelib.read_fasta_many(filename)): if i >= 100: break title, sequence = x l = len(sequence) lengths[l] = lengths.get(l, 0) + 1 # Use the most common length. c_length = c_count = None for (l, c) in lengths.iteritems(): if c_count is None or c > c_count: c_length, c_count = l, c assert c_length read_length = c_length jobs = [] # sample, bam_filename, fasta_file1, fasta_file2, outdir for bam_filename in bam_filenames: # <path>/<sample>.bam p, sample, e = mlib.splitpath(bam_filename) assert sample in sample2fastadata x, f1, f2 = sample2fastadata[sample] outdir = os.path.join(out_path, sample) x = sample, bam_filename, f1, f2, outdir jobs.append(x) # Some of the modules of RSeQC uses a lot of memory. Have # seen a Python process take 33 Gb, and an R process take 200 # Gb. However, most of the modules use much less memory. So # run one pyrseqc at a time, and run each one of those # processes in parallel. Is probably slower than running # multiple pyrseqc, but takes less memory. commands = [] for x in jobs: sample, bam_filename, fasta_filename1, fasta_filename2, outdir = x # pyrseqc.py -j 20 --paired_end rqc11.bam rqc14.fa 76 \ # mod07.txt hg19.HouseKeepingGenes.bed rqc21 --dry_run x = [ mlib.sq(pyrseqc), "-j", str(num_cores), ] if is_paired: x += ["--paired_end"] x += [ mlib.sq(bam_filename), mlib.sq(fasta_filename1), str(read_length), mlib.sq(gene_model_bed), mlib.sq(housekeeping_model_bed), mlib.sq(outdir), ] x = " ".join(x) commands.append(x) metadata["commands"] = commands metadata["num_cores"] = num_cores # pyrseqc takes up to ~40 Gb per process. # read_distribution.py takes 33 Gb. # read_quality.py spins off an R process that takes ~200 Gb. # Make sure we don't use up more memory than is available on # the machine. #nc = mlib.calc_max_procs_from_ram(60, upper_max=num_cores) #metadata["num cores"] = nc #x = parallel.pshell(commands, max_procs=nc) # Because of memory, just run one at a time, but each one, use # multiple cores. for cmd in commands: x = parallel.sshell(cmd) assert x.find("Traceback") < 0, x filelib.assert_exists_nz(out_path) return metadata
def merge_vcf_files(vcf_filenames, out_filename, num_cores, tmp_path): # Put indexed files in tmp_path. import os import stat import shutil from genomicode import filelib from genomicode import hashlib from genomicode import parallel from Betsy import module_utils as mlib # TODO: find the version number of these tools. bgzip = mlib.findbin("bgzip") tabix = mlib.findbin("tabix") bcftools = mlib.findbin("bcftools") sq = parallel.quote tmp_path = os.path.realpath(tmp_path) filelib.safe_mkdir(tmp_path) # Keep track of all commands run. metadata = {} metadata["commands"] = [] # Ignore VCF files that don't have any variants. vcf_filenames = [x for x in vcf_filenames if os.stat(x)[stat.ST_SIZE] > 0] # If there are no VCF files with any variants, then just create an # empty outfile and return. if not vcf_filenames: open(out_filename, 'w') return # 1. Copy VCF files to temporary directory. tmp_filename # 2. Fix VCF files (e.g. NextGENe, JointSNVMix broken) # 3. Sort the VCF files (needed for tabix) # 4. Compress (bgzip) # 5. Index (tabix) # 6. Merge jobs = [] for in_filename in vcf_filenames: path, root, ext = mlib.splitpath(in_filename) sample = root x = "%s%s" % (hashlib.hash_var(root), ext) tmp_filename = os.path.join(tmp_path, x) x = filelib.GenericObject( sample=sample, in_filename=in_filename, tmp_filename=tmp_filename, ) jobs.append(x) # Make sure temporary files are unique. seen = {} for j in jobs: assert j.tmp_filename not in seen seen[j.tmp_filename] = 1 # Merge them in order of sample. The germline sample will be # duplicated, and we will know the order of the germline sample. schwartz = [(x.sample, x) for x in jobs] schwartz.sort() jobs = [x[-1] for x in schwartz] # Copy all the VCF files to a temporary directory. for j in jobs: shutil.copy2(j.in_filename, j.tmp_filename) #for j in jobs: # make_file_smaller(j.tmp_filename, 1000) for j in jobs: # NextGENe creates broken VCF files. Fix them. fix_nextgene_vcf(j.tmp_filename) # JointSNVMix creates broken VCF files. Fix them. fix_jointsnvmix_vcf(j.tmp_filename) for j in jobs: sort_vcf_file(j.tmp_filename) ## # Since we are merging the files, we need to make sure that ## # each file has a unique name. If the names aren't unique, ## # then make them unique by adding the name of the file. ## all_unique = True ## seen = {} ## for x in jobs: ## sample, in_filename, tmp_filename = x ## samples = _get_samples_from_vcf(tmp_filename) ## for s in samples: ## if s in seen: ## all_unique = False ## break ## seen[s] = 1 ## if not all_unique: ## break ## if not all_unique: ## for x in jobs: ## sample, in_filename, tmp_filename = x ## _uniquify_samples_in_vcf(tmp_filename, sample) # Compress the VCF files. # bgzip file.vcf commands = [] for j in jobs: x = "%s %s" % (sq(bgzip), sq(j.tmp_filename)) commands.append(x) parallel.pshell(commands, max_procs=num_cores, path=tmp_path) metadata["commands"].extend(commands) metadata["num_cores"] = num_cores x = ["%s.gz" % x.tmp_filename for x in jobs] filelib.assert_exists_nz_many(x) # Index the VCF files. # tabix -p vcf file.vcf.gz commands = [] for j in jobs: x = "%s -p vcf %s.gz" % (sq(tabix), sq(j.tmp_filename)) commands.append(x) parallel.pshell(commands, max_procs=num_cores, path=tmp_path) metadata["commands"].extend(commands) x = ["%s.gz.tbi" % j.tmp_filename for j in jobs] filelib.assert_exists_nz_many(x) # Run bcftools ## For VCF files from somatic calls, the germline sample will ## be duplicated. Add --force-samples to make sure this is ## still merged. # Since we need to append all the VCF files, it's easy to run # into error: # OSError: [Errno 7] Argument list too long # # To reduce the chance of this, figure out the path of the # tmp_filename, and run the analysis in that path so we can # use relative filenames. tmp_path = None for j in jobs: path, file_ = os.path.split(j.tmp_filename) if tmp_path is None: tmp_path = path assert path == tmp_path cmd = [ sq(bcftools), "merge", "-o %s" % sq(out_filename), "-O v", "--force-samples", ] for j in jobs: path, file_ = os.path.split(j.tmp_filename) assert path == tmp_path cmd.append("%s.gz" % file_) x = " ".join(cmd) parallel.sshell(x, path=tmp_path) metadata["commands"].append(x) return metadata
def run(self, network, antecedents, out_attributes, user_options, num_cores, out_path): import os import shutil from genomicode import filelib from genomicode import parallel from genomicode import parselib from Betsy import module_utils as mlib mpileup_node, nc_node = antecedents mpileup_filenames = filelib.list_files_in_path(mpileup_node.identifier, endswith=".pileup") assert mpileup_filenames, "No .pileup files." nc_match = mlib.read_normal_cancer_file(nc_node.identifier) #ref = alignlib.create_reference_genome(ref_node.identifier) filelib.safe_mkdir(out_path) # Figure out whether the purpose is to get coverage. Change # the parameters if it is. assert "vartype" in out_attributes vartype = out_attributes["vartype"] assert vartype in ["snp", "indel"] sample2pufile = {} # sample -> mpileup filename for filename in mpileup_filenames: path, sample, ext = mlib.splitpath(filename) sample2pufile[sample] = filename # Make sure files exist for all the samples. all_samples = [] for (normal_sample, cancer_sample) in nc_match: if normal_sample not in all_samples: all_samples.append(normal_sample) if cancer_sample not in all_samples: all_samples.append(cancer_sample) missing = [x for x in all_samples if x not in sample2pufile] x = parselib.pretty_list(missing, max_items=5) assert not missing, "Missing BAM files for samples: %s" % x # list of (sample, normal_pileup, cancer_pileup, # tmp1_normal, tmp1_cancer, log_filename, out_filename) opj = os.path.join jobs = [] for (normal_sample, cancer_sample) in nc_match: normal_pileup = sample2pufile[normal_sample] cancer_pileup = sample2pufile[cancer_sample] p, sample, ext = mlib.splitpath(cancer_pileup) tmp1_normal = opj(out_path, "%s.normal.tmp1" % sample) tmp1_cancer = opj(out_path, "%s.cancer.tmp1" % sample) log_filename = opj(out_path, "%s.log" % sample) out_filename = opj(out_path, "%s.vcf" % sample) x = sample, normal_sample, cancer_sample, \ normal_pileup, cancer_pileup, \ tmp1_normal, tmp1_cancer, log_filename, out_filename jobs.append(x) # VarScan will generate a "Parsing Exception" if there are 0 # reads in a location. Will be either "0" or blank. Filter # those lines out. sq = parallel.quote commands = [] for x in jobs: sample, normal_sample, cancer_sample, \ normal_pileup, cancer_pileup, \ tmp1_normal, tmp1_cancer, log_filename, out_filename = x x1 = "awk -F'\t' '$4 >= 1 {print}' %s > %s" % (normal_pileup, tmp1_normal) x2 = "awk -F'\t' '$4 >= 1 {print}' %s > %s" % (cancer_pileup, tmp1_cancer) commands.extend([x1, x2]) parallel.pshell(commands, max_procs=num_cores) x = [x[3] for x in jobs] + [x[4] for x in jobs] filelib.assert_exists_nz_many(x) # java -jar VarScan.jar somatic [normal_pileup] [tumor_pileup] # [output] OPTIONS varscan = mlib.findbin("varscan_jar") # Use parameters from: # Using VarScan 2 for Germline Variant Calling and Somatic # Mutation Detection # Make a list of commands. commands = [] for x in jobs: sample, normal_sample, cancer_sample, \ normal_pileup, cancer_pileup, \ tmp1_normal, tmp1_cancer, log_filename, out_filename = x x = [ "java", "-jar", sq(varscan), "somatic", sq(tmp1_normal), sq(tmp1_cancer), sample, "--min-coverage", 10, "--min-avg-qual", 15, "--min-normal-coverage", 10, "--min-tumor-coverage", 10, "--min-var-freq", 0.05, "--somatic-p-value", 0.05, "--output-vcf", 1, ] x = " ".join(map(str, x)) x = "%s >& %s" % (x, log_filename) commands.append(x) parallel.pshell(commands, max_procs=num_cores) x = [x[5] for x in jobs] filelib.assert_exists_nz_many(x) # Files in out_path can get very big. Clean them up. # <sample>.normal.tmp1 Very big (10's Gb). # <sample>.cancer.tmp1 Very big (10's to 100 Gb). for x in jobs: sample, normal_sample, cancer_sample, \ normal_pileup, cancer_pileup, \ tmp1_normal, tmp1_cancer, log_filename, out_filename = x if os.path.exists(tmp1_normal): os.unlink(tmp1_normal) if os.path.exists(tmp1_cancer): os.unlink(tmp1_cancer) # Copy the final file to the right place. for x in jobs: sample, normal_sample, cancer_sample, \ normal_pileup, cancer_pileup, \ tmp1_normal, tmp1_cancer, log_filename, out_filename = x # Will be written in current directory. varscan_out = "%s.snp.vcf" % sample if vartype == "indel": varscan_out = "%s.indel.vcf" % sample filelib.assert_exists(varscan_out) shutil.copy2(varscan_out, out_filename) # VarScan names the samples "NORMAL" and "TUMOR". Replace # them with the actual names. for x in jobs: sample, normal_sample, cancer_sample, \ normal_pileup, cancer_pileup, \ tmp1_normal, tmp1_cancer, log_filename, out_filename = x _fix_normal_cancer_names(out_filename, normal_sample, cancer_sample)