def sortIndexBAMs(self, path_to_exe=False, force=False, max_cpus=-1): if not path_to_exe: path_to_exe = _get_exe_path('samtools') processes = set() max_processes = _decide_max_processes(max_cpus) paths_to_BAMs_dd_si = [] for SAM in self.paths_to_BAMs_dd: BAM_out = SAM[:-4] + '_si.bam' if not _os.path.exists(BAM_out) or force: cmd = '{0} sort {1} {2}_si; {0} index {2}_si.bam'.format( path_to_exe, SAM, SAM[:-4]) print('Called: %s' % cmd) processes.add(_subprocess.Popen(cmd, shell=True)) if len(processes) >= max_processes: (pid, exit_status) = _os.wait() processes.difference_update( [p for p in processes if p.poll() is not None]) else: print('Found:') print(BAM_out) print('use "force = True" to overwrite') paths_to_BAMs_dd_si += [BAM_out] # Check if all the child processes were closed for p in processes: if p.poll() is None: p.wait() self.paths_to_BAMs_dd_si = paths_to_BAMs_dd_si
def sortIndexBAMs(self, path_to_exe = False, force = False, max_cpus = -1): if not path_to_exe: path_to_exe = _get_exe_path('samtools') processes = set() max_processes = _decide_max_processes( max_cpus ) paths_to_BAMs_dd_si = [] for SAM in self.paths_to_BAMs_dd: BAM_out = SAM[:-4] + '_si.bam' if not _os.path.exists(BAM_out) or force: cmd = '{0} sort {1} {2}_si; {0} index {2}_si.bam'.format(path_to_exe, SAM, SAM[:-4]) print('Called: %s' % cmd) processes.add( _subprocess.Popen(cmd, shell=True) ) if len(processes) >= max_processes: (pid, exit_status) = _os.wait() processes.difference_update( [p for p in processes if p.poll() is not None]) else: print('Found:') print(BAM_out) print('use "force = True" to overwrite') paths_to_BAMs_dd_si += [BAM_out] # Check if all the child processes were closed for p in processes: if p.poll() is None: p.wait() self.paths_to_BAMs_dd_si = paths_to_BAMs_dd_si
def SPAdes(self, exe=[], output_folder=['assemblies', 'SPAdes'], mem_num_gigs=8, max_cpus=-1, single_assembly=False, careful=True, only_assembler=False): ''' de novo assembly of short reads using SPAdes By default, the provided short reads in dictionary: self.paths_to_reads will be assembled separately, unless single_assembly set to True in which case each set of paired read fastq files will be used in a single assembly. http://spades.bioinf.spbau.ru/release3.6.1/manual.html relevent inputs: -o <output_dir> Specify the output directory. Required option. --sc required for MDA (single-cell) data. --only-error-correction --only-assembler --careful reduce the number of mismatches and short indels. Run MismatchCorrector – a post processing tool. Recommended. --continue from the specified output folder starting from the last available check-point --restart-from <check_point> ec start from error correction as restart assembly module from the first iteration k<int> restart from the iteration with specified k values, e.g. k55 mc restart mismatch correction --pe1-12 <file_name> interlaced forward and reverse paired-end reads. --pe1-1 <file_name> File with forward reads. --pe1-2 <file_name> File with reverse reads. --pe1-s <file_name> File with unpaired reads . . use --pe2-... for next library --threads <int> --memory <int> max memory in Gb -k <int,int,...> Comma-separated list of odd ascending k-mers If --sc is set the default value are 21,33,55, for multicell data sets it is auto --cov-cutoff <float> positive float value, or 'auto', or 'off'. Default value is 'off' ''' assert isinstance( output_folder, list), 'Provide output folder as list of folders forming path' base_output_path = _os.path.sep.join(output_folder) if not _os.path.exists(base_output_path): _os.makedirs(base_output_path) # max threads is slightly different to cpus # . . can probably use more max_processes = _decide_max_processes(max_cpus) # if an exe is not provided, use that stored in Dependencies if len(exe): use_exe = _os.path.sep.join(exe) else: from baga import Dependencies use_exe = _get_exe_path('spades') def run_SPAdes(cmd): proc = _subprocess.Popen(cmd, stdout=_subprocess.PIPE, stderr=_subprocess.PIPE) # allow for failed SPAdes runs (possibly caused by small fastq files) <== but also check they were actually built properly try: stdout_value, stderr_value = proc.communicate() checkthese = [] getline = False for line in stdout_value.split('\n'): if 'Warnings saved to' in line: getline = False if getline: l = line.rstrip() if len(l): checkthese += [l] if 'SPAdes pipeline finished WITH WARNINGS!' in line: getline = True if len(checkthese): print('SPAdes completed with warnings:\n{}\n'.format( '\n'.join(checkthese))) else: print('SPAdes completed without warnings') # with open('___SPAdes_{}_good_{}.log'.format(cnum, thetime), 'w') as fout: # fout.write(stdout_value) path2contigs = _os.path.sep.join( [this_output_path, 'contigs.fasta']) except _subprocess.CalledProcessError as e: print('SPAdes probably did not complete: error returned ({})'. format(proc.returncode)) print('Error: {}'.format(e)) print( 'Writing some info relevent to SPAdes crash to ___SPAdes_{}_bad_{}.log' .format(cnum, thetime)) with open('___SPAdes_{}_bad_{}.log'.format(cnum, thetime), 'w') as fout: fout.write(dir(proc)) fout.write('\n' + str(e.returncode) + '\n') fout.write( _os.path.sep.join([this_output_path, 'contigs.fasta'])) path2contigs = None return (path2contigs) if isinstance(use_exe, list): # allow for use of prepended executable with script to run cmd = list(use_exe) else: # or just executable cmd = [use_exe] contigs = {} if single_assembly: print( 'Combining reads aligned at multiple regions into single assembly' ) if isinstance(use_exe, list): # allow for use of prepended executable with script to run cmd = list(use_exe) else: # or just executable cmd = [use_exe] for cnum, (pairname, files) in enumerate(self.read_files.items()): # allow use of tuples or dicts by converting dicts to lists if isinstance(files, dict): use_files = [] for k, v in sorted(files.items()): use_files += [v] else: use_files = files cmd += ['--pe{}-1'.format(cnum + 1), use_files[0]] cmd += ['--pe{}-2'.format(cnum + 1), use_files[1]] try: # use unpaired reads if available cmd += ['--pe{}-s'.format(cnum + 1), use_files[2]] except IndexError: pass try: # add a second library if provided if isinstance(self.read_files2[pairname], dict): # if a dict supplied, make it a list use_files2 = [] for k, v in sorted(self.read_files2[pairname].items()): use_files2 += [v] else: use_files2 = self.read_files2[pairname] cmd += ['--pe{}-1'.format(cnum + 2), use_files2[0]] cmd += ['--pe{}-2'.format(cnum + 2), use_files2[1]] try: cmd += ['--pe{}-s'.format(cnum + 2), use_files2[2]] except IndexError: pass except AttributeError: pass ## this isn't very flexible: # retain <sample>__<genome> from pairname: # pairname == <sample>__<genome>_<start>-<end>+<padding> # and replace with multiregion folder = '{}__{}_{}'.format( pairname.split('__')[0], pairname.split('__')[1].split('_')[0], 'multi_region') this_output_path = _os.path.sep.join(output_folder + [folder]) if not _os.path.exists(this_output_path): _os.makedirs(this_output_path) cmd += ['-o', this_output_path] cmd += ['--threads', str(max_processes)] cmd += ['--memory', str(mem_num_gigs)] if only_assembler: cmd += ['--only-assembler'] if careful: cmd += ['--careful'] thetime = _time.asctime(_time.localtime(_time.time())) print('about to launch SPAdes . . . at {}'.format(thetime)) print(' '.join(cmd)) contigs['multi_region'] = run_SPAdes(cmd) else: start_time = _time.time() # prepare commandline and launch each SPAdes assembly contigs = {} for cnum, (pairname, files) in enumerate(sorted(self.read_files.items())): if isinstance(use_exe, list): # allow for use of prepended executable with script to run cmd = list(use_exe) else: # or just executable cmd = [use_exe] # allow use of tuples or dicts by converting dicts to lists if isinstance(files, dict): use_files = [] for k, v in sorted(files.items()): use_files += [v] else: use_files = files cmd += ['--pe1-1', use_files[0]] cmd += ['--pe1-2', use_files[1]] try: # use unpaired reads if available cmd += ['--pe1-s', use_files[2]] except IndexError: pass try: # add a second library if provided if isinstance(self.read_files2[pairname], dict): # if a dict supplied, make it a list use_files2 = [] for k, v in sorted(self.read_files2[pairname].items()): use_files2 += [v] else: use_files2 = self.read_files2[pairname] cmd += ['--pe2-1', use_files2[0]] cmd += ['--pe2-2', use_files2[1]] try: cmd += ['--pe2-s', use_files2[2]] except IndexError: pass except AttributeError: pass this_output_path = _os.path.sep.join(output_folder + [pairname]) if not _os.path.exists(this_output_path): _os.makedirs(this_output_path) cmd += ['-o', this_output_path] cmd += ['--threads', str(max_processes)] cmd += ['--memory', str(mem_num_gigs)] if only_assembler: cmd += ['--only-assembler'] if careful: cmd += ['--careful'] thetime = _time.asctime(_time.localtime(_time.time())) print('about to launch SPAdes . . . at {}'.format(thetime)) print(' '.join(cmd)) contigs[pairname] = run_SPAdes(cmd) if len(self.read_files) > 1: # report durations, time left etc _report_time(start_time, cnum, len(self.read_files)) self.paths_to_contigs = contigs
def trim(self, path_to_exe = False, force = False, max_cpus = -1): if not path_to_exe: exe_sickle = _get_exe_path('sickle') else: exe_sickle = _os.path.sep.join(path_to_exe) e1 = 'Could not find "adaptorcut_read_files" attribute. \ Before quality score trimming, reads must be cleaned of \ library preparation sequences. Please run cutAdaptors() \ method on this Reads instance.' assert hasattr(self, 'adaptorcut_read_files'), e1 e2 = 'Could not find %s. Either run cutAdaptors() again \ or ensure file exists' for pairname, files in self.adaptorcut_read_files.items(): assert _os.path.exists(files[1]), e2 % files[1] assert _os.path.exists(files[1]), e2 % files[1] trimmed_read_files = {} print(sorted(self.adaptorcut_read_files)) cmds = [] processed_paths_to_do = [] for pairname,files in self.adaptorcut_read_files.items(): processed_path_1 = insert_suffix(files[1], '_qual') processed_path_2 = insert_suffix(files[2], '_qual') processed_path_s = insert_suffix(files[2], '_singletons_qual') # Illumina quality using CASAVA >= 1.8 is Sanger encoded QSscore_scale = 'sanger' cmd = [exe_sickle, 'pe', '-f', files[1] ,'-r', files[2], '-t', QSscore_scale, '-o', processed_path_1, '-p', processed_path_2, '-s', processed_path_s, # quality 25, length 50 (of 150) '-q','25','-l','50'] if not all([_os.path.exists(processed_path_1), _os.path.exists(processed_path_2), _os.path.exists(processed_path_s)]) \ or force: # collect expected outputs processed_paths_to_do += [(processed_path_1,processed_path_2,processed_path_s)] # collect all the commands to be issued cmds += [(pairname,cmd)] else: print('Found:') print(processed_path_1) print(processed_path_2) print(processed_path_s) print('use "force = True" to overwrite') trimmed_read_files[pairname] = {} trimmed_read_files[pairname][1] = processed_path_1 trimmed_read_files[pairname][2] = processed_path_2 if len(cmds): max_processes = _decide_max_processes(max_cpus) processes = {} ### how to combine this which hangs on _os.wait() for pairname,cmd in cmds: print('Called: "%s"' % ' '.join(cmd)) # process is key, open file being piped to is value # baga CollectReads currently includes path in pairname this_stdout_file = open(pairname+'_sickle.log',"w") thisprocess = _subprocess.Popen(cmd, shell = False, stdout = this_stdout_file) processes[thisprocess] = this_stdout_file if len(processes) >= max_processes: _os.wait() finished = dict([(p,f) for p,f in processes.items() if p.poll() is not None]) # close files for finished processes for process,stdout_file in finished.items(): stdout_file.close() # update active processes del processes[process] # Check if all the child processes were closed for p in processes: if p.poll() is None: p.wait() fails = [] for (pairname,cmd),(processed_path_1,processed_path_2,processed_path_s) in zip(cmds,processed_paths_to_do): if _os.path.exists(processed_path_1) and _os.path.exists(processed_path_2): print('Found:') print(processed_path_1) print(processed_path_2) trimmed_read_files[pairname] = {} trimmed_read_files[pairname][1] = processed_path_1 trimmed_read_files[pairname][2] = processed_path_2 else: print('Processing of the following pair seems to have failed') print(processed_path_1) print(processed_path_2) fails += [(processed_path_1,processed_path_2)] assert len(fails) == 0, 'There was a problem finding all of the output from sickle. Try repeating this or an earlier step with the --force option to overwite previous, possibly incomplete, files' self.trimmed_read_files = trimmed_read_files
def cutAdaptors(self, path_to_exe = False, force = False, max_cpus = -1): if not path_to_exe: path_to_exe = _get_exe_path('cutadapt') adaptorcut_read_files = {} adaptor_seqs = [ 'AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC', 'AGATCGGAAGAGCACACGTCT', 'AGATCGGAAGAGC', 'GATCGGAAGAGCGGTTCAGCAGGAATGCCGAG', 'ACACTCTTTCCCTACACGACGCTCTTCCGATCT', ] cmds = [] processed_paths_to_do = [] for cnum,(pairname,files) in enumerate(self.read_files.items()): processed_path_1 = insert_suffix(files[1], '_adpt') processed_path_2 = insert_suffix(files[2], '_adpt') # print(files[1], processed_path_1) # print(files[2], processed_path_2) # single end cmd = [path_to_exe] + \ [a for b in [('-a', a) for a in adaptor_seqs] for a in b] + \ ['-o', processed_path_1, files[1]] # paired end cmd = [path_to_exe] + \ [a for b in [('-a', a) for a in adaptor_seqs] for a in b] + \ [a for b in [('-A', a) for a in adaptor_seqs] for a in b] + \ ['-o', processed_path_1, '-p', processed_path_2] + \ [files[1], files[2]] if not all([_os.path.exists(processed_path_1), _os.path.exists(processed_path_2)]) \ or force: # collect expected outputs processed_paths_to_do += [(processed_path_1,processed_path_2)] # collect all the commands to be issued cmds += [(pairname,cmd)] else: print('Found:') print(processed_path_1) print(processed_path_2) print('use "force = True" to overwrite') adaptorcut_read_files[pairname] = {} adaptorcut_read_files[pairname][1] = processed_path_1 adaptorcut_read_files[pairname][2] = processed_path_2 if len(cmds): max_processes = _decide_max_processes(max_cpus) processes = {} ### how to combine this which hangs on _os.wait() for pairname,cmd in cmds: print('Called: "%s"' % ' '.join(cmd)) # process is key, open file being piped to is value # baga CollectReads currently includes path in pairname this_stdout_file = open(pairname+'_cutadapt.log',"w") thisprocess = _subprocess.Popen(cmd, shell=False, stdout = this_stdout_file) processes[thisprocess] = this_stdout_file if len(processes) >= max_processes: _os.wait() finished = dict([(p,f) for p,f in processes.items() if p.poll() is not None]) # close files for finished processes for process,stdout_file in finished.items(): stdout_file.close() # update active processes del processes[process] # Check if all the child processes were closed for p in processes: if p.poll() is None: p.wait() fails = [] for (pairname,cmd),(processed_path_1,processed_path_2) in zip(cmds,processed_paths_to_do): if _os.path.exists(processed_path_1) and _os.path.exists(processed_path_2): print('Found:') print(processed_path_1) print(processed_path_2) adaptorcut_read_files[pairname] = {} adaptorcut_read_files[pairname][1] = processed_path_1 adaptorcut_read_files[pairname][2] = processed_path_2 else: print('Processing of the following pair seems to have failed') print(processed_path_1) print(processed_path_2) fails += [(processed_path_1,processed_path_2)] assert len(fails) == 0, 'There was a problem finding all of the output from cutadapt. Try repeating this or an eralier step with the --force option to overwite previous, possibly incomplete, files' self.adaptorcut_read_files = adaptorcut_read_files
def SPAdes(self, exe = [], output_folder = ['assemblies','SPAdes'], mem_num_gigs = 8, max_cpus = -1, single_assembly = False, careful = True, only_assembler = False): ''' de novo assembly of short reads using SPAdes By default, the provided short reads in dictionary: self.paths_to_reads will be assembled separately, unless single_assembly set to True in which case each set of paired read fastq files will be used in a single assembly. http://spades.bioinf.spbau.ru/release3.6.1/manual.html relevent inputs: -o <output_dir> Specify the output directory. Required option. --sc required for MDA (single-cell) data. --only-error-correction --only-assembler --careful reduce the number of mismatches and short indels. Run MismatchCorrector – a post processing tool. Recommended. --continue from the specified output folder starting from the last available check-point --restart-from <check_point> ec start from error correction as restart assembly module from the first iteration k<int> restart from the iteration with specified k values, e.g. k55 mc restart mismatch correction --pe1-12 <file_name> interlaced forward and reverse paired-end reads. --pe1-1 <file_name> File with forward reads. --pe1-2 <file_name> File with reverse reads. --pe1-s <file_name> File with unpaired reads . . use --pe2-... for next library --threads <int> --memory <int> max memory in Gb -k <int,int,...> Comma-separated list of odd ascending k-mers If --sc is set the default value are 21,33,55, for multicell data sets it is auto --cov-cutoff <float> positive float value, or 'auto', or 'off'. Default value is 'off' ''' assert isinstance(output_folder, list), 'Provide output folder as list of folders forming path' base_output_path = _os.path.sep.join(output_folder) if not _os.path.exists(base_output_path): _os.makedirs(base_output_path) # max threads is slightly different to cpus # . . can probably use more max_processes = _decide_max_processes( max_cpus ) # if an exe is not provided, use that stored in Dependencies if len(exe): use_exe = _os.path.sep.join(exe) else: from baga import Dependencies use_exe = _get_exe_path('spades') def run_SPAdes(cmd): proc = _subprocess.Popen(cmd, stdout=_subprocess.PIPE, stderr=_subprocess.PIPE) # allow for failed SPAdes runs (possibly caused by small fastq files) <== but also check they were actually built properly try: stdout_value, stderr_value = proc.communicate() checkthese = [] getline = False for line in stdout_value.split('\n'): if 'Warnings saved to' in line: getline = False if getline: l = line.rstrip() if len(l): checkthese += [l] if 'SPAdes pipeline finished WITH WARNINGS!' in line: getline = True if len(checkthese): print('SPAdes completed with warnings:\n{}\n'.format('\n'.join(checkthese))) else: print('SPAdes completed without warnings') # with open('___SPAdes_{}_good_{}.log'.format(cnum, thetime), 'w') as fout: # fout.write(stdout_value) path2contigs = _os.path.sep.join([this_output_path,'contigs.fasta']) except _subprocess.CalledProcessError as e: print('SPAdes probably did not complete: error returned ({})'.format(proc.returncode)) print('Error: {}'.format(e)) print('Writing some info relevent to SPAdes crash to ___SPAdes_{}_bad_{}.log'.format(cnum, thetime)) with open('___SPAdes_{}_bad_{}.log'.format(cnum, thetime), 'w') as fout: fout.write(dir(proc)) fout.write('\n' + str(e.returncode) + '\n') fout.write(_os.path.sep.join([this_output_path,'contigs.fasta'])) path2contigs = None return(path2contigs) if isinstance(use_exe, list): # allow for use of prepended executable with script to run cmd = list(use_exe) else: # or just executable cmd = [use_exe] contigs = {} if single_assembly: print('Combining reads aligned at multiple regions into single assembly') if isinstance(use_exe, list): # allow for use of prepended executable with script to run cmd = list(use_exe) else: # or just executable cmd = [use_exe] for cnum, (pairname, files) in enumerate(self.read_files.items()): # allow use of tuples or dicts by converting dicts to lists if isinstance(files, dict): use_files = [] for k,v in sorted(files.items()): use_files += [v] else: use_files = files cmd += ['--pe{}-1'.format(cnum+1), use_files[0]] cmd += ['--pe{}-2'.format(cnum+1), use_files[1]] try: # use unpaired reads if available cmd += ['--pe{}-s'.format(cnum+1), use_files[2]] except IndexError: pass try: # add a second library if provided if isinstance(self.read_files2[pairname], dict): # if a dict supplied, make it a list use_files2 = [] for k,v in sorted(self.read_files2[pairname].items()): use_files2 += [v] else: use_files2 = self.read_files2[pairname] cmd += ['--pe{}-1'.format(cnum+2), use_files2[0]] cmd += ['--pe{}-2'.format(cnum+2), use_files2[1]] try: cmd += ['--pe{}-s'.format(cnum+2), use_files2[2]] except IndexError: pass except AttributeError: pass ## this isn't very flexible: # retain <sample>__<genome> from pairname: # pairname == <sample>__<genome>_<start>-<end>+<padding> # and replace with multiregion folder = '{}__{}_{}'.format(pairname.split('__')[0], pairname.split('__')[1].split('_')[0], 'multi_region') this_output_path = _os.path.sep.join(output_folder + [folder]) if not _os.path.exists(this_output_path): _os.makedirs(this_output_path) cmd += ['-o', this_output_path] cmd += ['--threads', str(max_processes)] cmd += ['--memory', str(mem_num_gigs)] if only_assembler: cmd += ['--only-assembler'] if careful: cmd += ['--careful'] thetime = _time.asctime( _time.localtime(_time.time()) ) print('about to launch SPAdes . . . at {}'.format(thetime)) print(' '.join(cmd)) contigs['multi_region'] = run_SPAdes(cmd) else: start_time = _time.time() # prepare commandline and launch each SPAdes assembly contigs = {} for cnum, (pairname, files) in enumerate(sorted(self.read_files.items())): if isinstance(use_exe, list): # allow for use of prepended executable with script to run cmd = list(use_exe) else: # or just executable cmd = [use_exe] # allow use of tuples or dicts by converting dicts to lists if isinstance(files, dict): use_files = [] for k,v in sorted(files.items()): use_files += [v] else: use_files = files cmd += ['--pe1-1', use_files[0]] cmd += ['--pe1-2', use_files[1]] try: # use unpaired reads if available cmd += ['--pe1-s', use_files[2]] except IndexError: pass try: # add a second library if provided if isinstance(self.read_files2[pairname], dict): # if a dict supplied, make it a list use_files2 = [] for k,v in sorted(self.read_files2[pairname].items()): use_files2 += [v] else: use_files2 = self.read_files2[pairname] cmd += ['--pe2-1', use_files2[0]] cmd += ['--pe2-2', use_files2[1]] try: cmd += ['--pe2-s', use_files2[2]] except IndexError: pass except AttributeError: pass this_output_path = _os.path.sep.join(output_folder + [pairname]) if not _os.path.exists(this_output_path): _os.makedirs(this_output_path) cmd += ['-o', this_output_path] cmd += ['--threads', str(max_processes)] cmd += ['--memory', str(mem_num_gigs)] if only_assembler: cmd += ['--only-assembler'] if careful: cmd += ['--careful'] thetime = _time.asctime( _time.localtime(_time.time()) ) print('about to launch SPAdes . . . at {}'.format(thetime)) print(' '.join(cmd)) contigs[pairname] = run_SPAdes(cmd) if len(self.read_files) > 1: # report durations, time left etc _report_time(start_time, cnum, len(self.read_files)) self.paths_to_contigs = contigs
def IndelRealignGATK(self, jar = ['external_programs', 'GenomeAnalysisTK', 'GenomeAnalysisTK.jar'], picard_jar = False, samtools_exe = False, use_java = 'java', force = False, mem_num_gigs = 2, max_cpus = -1): # GATK is manually downloaded by user and placed in folder of their choice jar = _os.path.sep.join(jar) if not picard_jar: picard_jar = _get_jar_path('picard') if not samtools_exe: samtools_exe = _get_exe_path('samtools') genome_fna = 'genome_sequences/{}.fna'.format(self.genome_id) e1 = 'Could not find "paths_to_BAMs_dd_si" attribute. Before starting GATK analysis, read alignments must have duplicates removed. Please run: .toBAMS(), .removeDuplicates(), .sortIndexBAMs() methods on this SAMs instance, or --deduplicate if using baga_cli.py.' assert hasattr(self, 'paths_to_BAMs_dd_si'), e1 e2 = 'Could not find %s. Please ensure file exists' for BAM in self.paths_to_BAMs_dd_si: assert _os.path.exists(BAM), e2 % BAM if not _os.path.exists(genome_fna[:-4] + '.dict'): print('Creating sequence dictionary for %s' % genome_fna) _subprocess.call([use_java, '-jar', picard_jar, 'CreateSequenceDictionary', 'R=', genome_fna, 'O=', genome_fna[:-4] + '.dict']) #have_index_files = [_os.path.exists(genome_fna + '.' + a) for a in ('ann','pac','amb','bwt','sa','fai')] have_index_files = [_os.path.exists(genome_fna + '.' + a) for a in ('fai',)] if not all(have_index_files): print('Writing index files for %s' % genome_fna) _subprocess.call([samtools_exe, 'faidx', genome_fna]) processes = set() max_processes = _decide_max_processes( max_cpus ) for BAM in self.paths_to_BAMs_dd_si: intervals = BAM[:-4] + '.intervals' if not _os.path.exists(intervals) or force: cmd = [use_java, '-Xmx%sg' % mem_num_gigs, '-jar', jar, '-T', 'RealignerTargetCreator', '-R', genome_fna, '-I', BAM, '-o', intervals] #, '--validation_strictness', 'LENIENT'] print(' '.join(map(str, cmd))) processes.add( _subprocess.Popen(cmd, shell=False) ) if len(processes) >= max_processes: (pid, exit_status) = _os.wait() processes.difference_update( [p for p in processes if p.poll() is not None]) else: print('Found:') print(intervals) print('use "force = True" to overwrite') # Check if all the child processes were closed for p in processes: if p.poll() is None: p.wait() paths_to_BAMs_dd_si_ra = [] for BAM in self.paths_to_BAMs_dd_si: intervals = BAM[:-4] + '.intervals' bam_out = BAM[:-4] + '_realn.bam' if not _os.path.exists(bam_out) or force: cmd = [use_java, '-Xmx4g', '-jar', jar, '-T', 'IndelRealigner', '-R', genome_fna, '-I', BAM, '-targetIntervals', intervals, '-o', bam_out, '--filter_bases_not_stored'] print(' '.join(map(str, cmd))) processes.add( _subprocess.Popen(cmd, shell=False) ) if len(processes) >= max_processes: _os.wait() processes.difference_update( [p for p in processes if p.poll() is not None]) else: print('Found:') print(bam_out) print('use "force = True" to overwrite') paths_to_BAMs_dd_si_ra += [bam_out] for p in processes: if p.poll() is None: p.wait() # the last list of BAMs in ready_BAMs is input for CallgVCFsGATK # both IndelRealignGATK and recalibBaseScoresGATK put here self.ready_BAMs = [paths_to_BAMs_dd_si_ra]
def align(self, insert_size = False, path_to_exe = False, local_alns_path = ['alignments'], force = False, max_cpus = -1): if not path_to_exe: path_to_exe = _get_exe_path('bwa') # write genome sequence to a fasta file try: _os.makedirs('genome_sequences') except OSError: pass genome_fna = 'genome_sequences/%s.fna' % self.genome_id _SeqIO.write(_SeqRecord(_Seq(self.genome_sequence.tostring()), id = self.genome_id), genome_fna, 'fasta') # make folder for alignments (BAMs) local_alns_path = _os.path.sep.join(local_alns_path) if not _os.path.exists(local_alns_path): _os.makedirs(local_alns_path) # make a subdir for this genome local_alns_path_genome = _os.path.sep.join([ local_alns_path, self.genome_id]) if not _os.path.exists(local_alns_path_genome): _os.makedirs(local_alns_path_genome) max_processes = _decide_max_processes( max_cpus ) e1 = 'Could not find "read_files" attribute. Before aligning to genome, reads must be quality score trimmed. Please run trim() method on this Reads instance.' assert hasattr(self, 'read_files'), e1 e2 = 'Could not find %s. Either run trim() again or ensure file exists' for pairname, files in self.read_files.items(): assert _os.path.exists(files[1]), e2 % files[1] assert _os.path.exists(files[2]), e2 % files[2] have_index_files = [_os.path.exists(genome_fna + '.' + a) for a in ('ann','pac','amb','bwt','sa')] if not all(have_index_files): print('Writing BWA index files for %s' % genome_fna) _subprocess.call([path_to_exe, 'index', genome_fna]) aligned_read_files = {} for pairname,files in self.read_files.items(): RGinfo = r"@RG\tID:%s\tSM:%s\tPL:ILLUMINA" % (pairname,pairname) if insert_size: cmd = [path_to_exe, 'mem', '-t', str(max_processes), '-M', '-a', '-I', insert_size, '-R', RGinfo, genome_fna, files[1], files[2]] else: # BWA can estimate on-the-fly cmd = [path_to_exe, 'mem', '-t', str(max_processes), '-M', '-a', '-R', RGinfo, genome_fna, files[1], files[2]] out_sam = _os.path.sep.join([local_alns_path_genome, '%s__%s.sam' % (pairname, self.genome_id)]) if not _os.path.exists(out_sam) or force: print('Called: "%s"' % ' '.join(cmd)) with open(out_sam, "wb") as out: _subprocess.call(cmd, stdout = out) else: print('Found:') print(out_sam) print('use "force = True" to overwrite') print(' '.join(cmd)) aligned_read_files[pairname] = out_sam self.aligned_read_files = aligned_read_files
def trim(self, path_to_exe=False, force=False, max_cpus=-1): if not path_to_exe: exe_sickle = _get_exe_path('sickle') else: exe_sickle = _os.path.sep.join(path_to_exe) e1 = 'Could not find "adaptorcut_read_files" attribute. \ Before quality score trimming, reads must be cleaned of \ library preparation sequences. Please run cutAdaptors() \ method on this Reads instance.' assert hasattr(self, 'adaptorcut_read_files'), e1 e2 = 'Could not find %s. Either run cutAdaptors() again \ or ensure file exists' for pairname, files in self.adaptorcut_read_files.items(): assert _os.path.exists(files[1]), e2 % files[1] assert _os.path.exists(files[1]), e2 % files[1] trimmed_read_files = {} print(sorted(self.adaptorcut_read_files)) cmds = [] processed_paths_to_do = [] for pairname, files in self.adaptorcut_read_files.items(): processed_path_1 = insert_suffix(files[1], '_qual') processed_path_2 = insert_suffix(files[2], '_qual') processed_path_s = insert_suffix(files[2], '_singletons_qual') # Illumina quality using CASAVA >= 1.8 is Sanger encoded QSscore_scale = 'sanger' cmd = [ exe_sickle, 'pe', '-f', files[1], '-r', files[2], '-t', QSscore_scale, '-o', processed_path_1, '-p', processed_path_2, '-s', processed_path_s, # quality 25, length 50 (of 150) '-q', '25', '-l', '50' ] if not all([_os.path.exists(processed_path_1), _os.path.exists(processed_path_2), _os.path.exists(processed_path_s)]) \ or force: # collect expected outputs processed_paths_to_do += [(processed_path_1, processed_path_2, processed_path_s)] # collect all the commands to be issued cmds += [(pairname, cmd)] else: print('Found:') print(processed_path_1) print(processed_path_2) print(processed_path_s) print('use "force = True" to overwrite') trimmed_read_files[pairname] = {} trimmed_read_files[pairname][1] = processed_path_1 trimmed_read_files[pairname][2] = processed_path_2 if len(cmds): max_processes = _decide_max_processes(max_cpus) processes = {} ### how to combine this which hangs on _os.wait() for pairname, cmd in cmds: print('Called: "%s"' % ' '.join(cmd)) # process is key, open file being piped to is value # baga CollectReads currently includes path in pairname this_stdout_file = open(pairname + '_sickle.log', "w") thisprocess = _subprocess.Popen(cmd, shell=False, stdout=this_stdout_file) processes[thisprocess] = this_stdout_file if len(processes) >= max_processes: _os.wait() finished = dict([(p, f) for p, f in processes.items() if p.poll() is not None]) # close files for finished processes for process, stdout_file in finished.items(): stdout_file.close() # update active processes del processes[process] # Check if all the child processes were closed for p in processes: if p.poll() is None: p.wait() fails = [] for (pairname, cmd), (processed_path_1, processed_path_2, processed_path_s) in zip(cmds, processed_paths_to_do): if _os.path.exists(processed_path_1) and _os.path.exists( processed_path_2): print('Found:') print(processed_path_1) print(processed_path_2) trimmed_read_files[pairname] = {} trimmed_read_files[pairname][1] = processed_path_1 trimmed_read_files[pairname][2] = processed_path_2 else: print('Processing of the following pair seems to have failed') print(processed_path_1) print(processed_path_2) fails += [(processed_path_1, processed_path_2)] assert len( fails ) == 0, 'There was a problem finding all of the output from sickle. Try repeating this or an earlier step with the --force option to overwite previous, possibly incomplete, files' self.trimmed_read_files = trimmed_read_files
def cutAdaptors(self, path_to_exe=False, force=False, max_cpus=-1): if not path_to_exe: path_to_exe = _get_exe_path('cutadapt') adaptorcut_read_files = {} adaptor_seqs = [ 'AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC', 'AGATCGGAAGAGCACACGTCT', 'AGATCGGAAGAGC', 'GATCGGAAGAGCGGTTCAGCAGGAATGCCGAG', 'ACACTCTTTCCCTACACGACGCTCTTCCGATCT', ] cmds = [] processed_paths_to_do = [] for cnum, (pairname, files) in enumerate(self.read_files.items()): processed_path_1 = insert_suffix(files[1], '_adpt') processed_path_2 = insert_suffix(files[2], '_adpt') # print(files[1], processed_path_1) # print(files[2], processed_path_2) # single end cmd = [path_to_exe] + \ [a for b in [('-a', a) for a in adaptor_seqs] for a in b] + \ ['-o', processed_path_1, files[1]] # paired end cmd = [path_to_exe] + \ [a for b in [('-a', a) for a in adaptor_seqs] for a in b] + \ [a for b in [('-A', a) for a in adaptor_seqs] for a in b] + \ ['-o', processed_path_1, '-p', processed_path_2] + \ [files[1], files[2]] if not all([_os.path.exists(processed_path_1), _os.path.exists(processed_path_2)]) \ or force: # collect expected outputs processed_paths_to_do += [(processed_path_1, processed_path_2)] # collect all the commands to be issued cmds += [(pairname, cmd)] else: print('Found:') print(processed_path_1) print(processed_path_2) print('use "force = True" to overwrite') adaptorcut_read_files[pairname] = {} adaptorcut_read_files[pairname][1] = processed_path_1 adaptorcut_read_files[pairname][2] = processed_path_2 if len(cmds): max_processes = _decide_max_processes(max_cpus) processes = {} ### how to combine this which hangs on _os.wait() for pairname, cmd in cmds: print('Called: "%s"' % ' '.join(cmd)) # process is key, open file being piped to is value # baga CollectReads currently includes path in pairname this_stdout_file = open(pairname + '_cutadapt.log', "w") thisprocess = _subprocess.Popen(cmd, shell=False, stdout=this_stdout_file) processes[thisprocess] = this_stdout_file if len(processes) >= max_processes: _os.wait() finished = dict([(p, f) for p, f in processes.items() if p.poll() is not None]) # close files for finished processes for process, stdout_file in finished.items(): stdout_file.close() # update active processes del processes[process] # Check if all the child processes were closed for p in processes: if p.poll() is None: p.wait() fails = [] for (pairname, cmd), (processed_path_1, processed_path_2) in zip(cmds, processed_paths_to_do): if _os.path.exists(processed_path_1) and _os.path.exists( processed_path_2): print('Found:') print(processed_path_1) print(processed_path_2) adaptorcut_read_files[pairname] = {} adaptorcut_read_files[pairname][1] = processed_path_1 adaptorcut_read_files[pairname][2] = processed_path_2 else: print('Processing of the following pair seems to have failed') print(processed_path_1) print(processed_path_2) fails += [(processed_path_1, processed_path_2)] assert len( fails ) == 0, 'There was a problem finding all of the output from cutadapt. Try repeating this or an eralier step with the --force option to overwite previous, possibly incomplete, files' self.adaptorcut_read_files = adaptorcut_read_files
def IndelRealignGATK(self, jar=[ 'external_programs', 'GenomeAnalysisTK', 'GenomeAnalysisTK.jar' ], picard_jar=False, samtools_exe=False, use_java='java', force=False, mem_num_gigs=2, max_cpus=-1): # GATK is manually downloaded by user and placed in folder of their choice jar = _os.path.sep.join(jar) if not picard_jar: picard_jar = _get_jar_path('picard') if not samtools_exe: samtools_exe = _get_exe_path('samtools') genome_fna = 'genome_sequences/{}.fna'.format(self.genome_id) e1 = 'Could not find "paths_to_BAMs_dd_si" attribute. Before starting GATK analysis, read alignments must have duplicates removed. Please run: .toBAMS(), .removeDuplicates(), .sortIndexBAMs() methods on this SAMs instance, or --deduplicate if using baga_cli.py.' assert hasattr(self, 'paths_to_BAMs_dd_si'), e1 e2 = 'Could not find %s. Please ensure file exists' for BAM in self.paths_to_BAMs_dd_si: assert _os.path.exists(BAM), e2 % BAM # always (re)generate dict in case of upstream changes in data print('Creating sequence dictionary for %s' % genome_fna) _subprocess.call([ use_java, '-jar', picard_jar, 'CreateSequenceDictionary', 'R=', genome_fna, 'O=', genome_fna[:-4] + '.dict' ]) # always (re)index in case of upstream changes in data print('Writing index files for %s' % genome_fna) _subprocess.call([samtools_exe, 'faidx', genome_fna]) processes = set() max_processes = _decide_max_processes(max_cpus) for BAM in self.paths_to_BAMs_dd_si: intervals = BAM[:-4] + '.intervals' if not _os.path.exists(intervals) or force: cmd = [ use_java, '-Xmx%sg' % mem_num_gigs, '-jar', jar, '-T', 'RealignerTargetCreator', '-R', genome_fna, '-I', BAM, '-o', intervals ] #, '--validation_strictness', 'LENIENT'] print(' '.join(map(str, cmd))) processes.add(_subprocess.Popen(cmd, shell=False)) if len(processes) >= max_processes: (pid, exit_status) = _os.wait() processes.difference_update( [p for p in processes if p.poll() is not None]) else: print('Found:') print(intervals) print('use "force = True" to overwrite') # Check if all the child processes were closed for p in processes: if p.poll() is None: p.wait() paths_to_BAMs_dd_si_ra = [] for BAM in self.paths_to_BAMs_dd_si: intervals = BAM[:-4] + '.intervals' bam_out = BAM[:-4] + '_realn.bam' if not _os.path.exists(bam_out) or force: cmd = [ use_java, '-Xmx4g', '-jar', jar, '-T', 'IndelRealigner', '-R', genome_fna, '-I', BAM, '-targetIntervals', intervals, '-o', bam_out, '--filter_bases_not_stored' ] print(' '.join(map(str, cmd))) processes.add(_subprocess.Popen(cmd, shell=False)) if len(processes) >= max_processes: _os.wait() processes.difference_update( [p for p in processes if p.poll() is not None]) else: print('Found:') print(bam_out) print('use "force = True" to overwrite') paths_to_BAMs_dd_si_ra += [bam_out] for p in processes: if p.poll() is None: p.wait() # the last list of BAMs in ready_BAMs is input for CallgVCFsGATK # both IndelRealignGATK and recalibBaseScoresGATK put here self.ready_BAMs = [paths_to_BAMs_dd_si_ra]
def align(self, insert_size=False, path_to_exe=False, local_alns_path=['alignments'], force=False, max_cpus=-1): if not path_to_exe: path_to_exe = _get_exe_path('bwa') # write genome sequence to a fasta file try: _os.makedirs('genome_sequences') except OSError: pass genome_fna = 'genome_sequences/%s.fna' % self.genome_id _SeqIO.write( _SeqRecord(_Seq(self.genome_sequence.tostring()), id=self.genome_id), genome_fna, 'fasta') # make folder for alignments (BAMs) local_alns_path = _os.path.sep.join(local_alns_path) if not _os.path.exists(local_alns_path): _os.makedirs(local_alns_path) # make a subdir for this genome local_alns_path_genome = _os.path.sep.join( [local_alns_path, self.genome_id]) if not _os.path.exists(local_alns_path_genome): _os.makedirs(local_alns_path_genome) max_processes = _decide_max_processes(max_cpus) e1 = 'Could not find "read_files" attribute. Before aligning to genome, reads must be quality score trimmed. Please run trim() method on this Reads instance.' assert hasattr(self, 'read_files'), e1 e2 = 'Could not find %s. Either run trim() again or ensure file exists' for pairname, files in self.read_files.items(): assert _os.path.exists(files[1]), e2 % files[1] assert _os.path.exists(files[2]), e2 % files[2] # always (re)index in case of upstream changes in data print('Writing BWA index files for %s' % genome_fna) _subprocess.call([path_to_exe, 'index', genome_fna]) aligned_read_files = {} for pairname, files in self.read_files.items(): RGinfo = r"@RG\tID:%s\tSM:%s\tPL:ILLUMINA" % (pairname, pairname) if insert_size: cmd = [ path_to_exe, 'mem', '-t', str(max_processes), '-M', '-a', '-I', insert_size, '-R', RGinfo, genome_fna, files[1], files[2] ] else: # BWA can estimate on-the-fly cmd = [ path_to_exe, 'mem', '-t', str(max_processes), '-M', '-a', '-R', RGinfo, genome_fna, files[1], files[2] ] out_sam = _os.path.sep.join([ local_alns_path_genome, '%s__%s.sam' % (pairname, self.genome_id) ]) if not _os.path.exists(out_sam) or force: print('Called: "%s"' % ' '.join(cmd)) with open(out_sam, "wb") as out: _subprocess.call(cmd, stdout=out) else: print('Found:') print(out_sam) print('use "force = True" to overwrite') print(' '.join(cmd)) aligned_read_files[pairname] = out_sam self.aligned_read_files = aligned_read_files
def generateReads(self, path_to_exe = False, paths_to_genomes = False, readcov = 60, readlen = 100, fraglen = 350, sterrfraglen = 20, model = 4, max_cpus = -1): ''' Call GemSIM to generate reads Need to have written genome sequences to generate from, possibly with generated SNPs, small indels and large deletions. ''' #max_cpus etc if paths_to_genomes: use_genomes = sorted(paths_to_genomes) elif hasattr(self, 'written_genomes'): use_genomes = sorted(self.written_genomes) else: raise ValueError('provide either paths_to_genomes or generate some then .writeSequences()') if not path_to_exe: path_to_exe = _get_exe_path('gemsim') comment2 = ''' to generate reads put GemSIM v1.6 into subfolder GemSIM_v1.6 and issue these commands: GemSIM_v1.6/GemReads.py -r LESB58_for_GemSim_01.fasta -n 1980527 -l d -u 350 -s 20 -m GemSIM_v1.6/models/ill100v4_p.gzip -c -q 33 -p -o GemSimLESB58_01 ''' num_pairs = len(self.genome.sequence) * readcov / (readlen*2) if model == 4: path_to_model = _os.path.sep.join(path_to_exe.split(_os.path.sep)[:-1] + ['models','ill100v4_p.gzip']) elif model == 5: path_to_model = _os.path.sep.join(path_to_exe.split(_os.path.sep)[:-1] + ['models','ill100v5_p.gzip']) print('Using error model: {}'.format(path_to_model)) print('Generating {:,} {}bp read pairs for {}x coverage depth of a {}bp genome ({})'.format( num_pairs, readlen, readcov, len(self.genome.sequence), self.genome.id)) processes = set() max_processes = _decide_max_processes( max_cpus ) import time start = time.time() out_raw = [] for i,genome_in in enumerate(use_genomes): # could use per genome length . . less consistent than using reference # genome_len = len(_SeqIO.read(genome_in,'fasta').seq) # num_pairs = genome_len * readcov / (readlen*2) outprefix = 'GemSim_{}_{:02d}'.format(self.genome.id, i+1) cmd = [path_to_exe, '-r', genome_in, '-n', num_pairs, '-l', 'd', '-u', fraglen, '-s', sterrfraglen, '-m', path_to_model, '-c', '-q', 33, '-p', '-o', outprefix] out_raw += [outprefix+'_fir.fastq', outprefix+'_sec.fastq'] # this would be better to rename and compress all in one # maybe as a shell script? Then resuming (--force) would be easier. if _os.path.exists(outprefix+'_fir.fastq') and \ _os.path.exists(outprefix+'_sec.fastq'): print('Found output for {}_fir.fastq (and sec), not regenerating, '\ 'delete these to start from scratch'.format(outprefix)) else: cmd = map(str,cmd) print(' '.join(cmd)) processes.add( _subprocess.Popen(cmd, shell=False) ) if len(processes) >= max_processes: (pid, exit_status) = _os.wait() processes.difference_update( [p for p in processes if p.poll() is not None]) # Check if all the child processes were closed for p in processes: if p.poll() is None: p.wait() missing = [] for o in out_raw: if not _os.path.exists(o): missing += [o] assert len(missing) == 0, 'Could not find:\n{}'.format('\n'.join(missing)) print('all finished after {} minutes'.format(int(round((time.time() - start)/60.0)))) outdir = _os.path.sep.join(['simulated_reads',self.genome.id]) try: _os.makedirs(outdir) except OSError: pass for o in out_raw: new = _os.path.sep.join([outdir, o.replace('fir','R1').replace('sec','R2')]) print('{} ==> {}'.format(o, new)) _os.rename(o, new) cmd = ['gzip', new] print(' '.join(cmd)) _subprocess.call(cmd)
def generateReads(self, path_to_exe=False, paths_to_genomes=False, readcov=60, readlen=100, fraglen=350, sterrfraglen=20, model=4, max_cpus=-1): ''' Call GemSIM to generate reads Need to have written genome sequences to generate from, possibly with generated SNPs, small indels and large deletions. ''' #max_cpus etc if paths_to_genomes: use_genomes = sorted(paths_to_genomes) elif hasattr(self, 'written_genomes'): use_genomes = sorted(self.written_genomes) else: raise ValueError( 'provide either paths_to_genomes or generate some then .writeSequences()' ) if not path_to_exe: path_to_exe = _get_exe_path('gemsim') comment2 = ''' to generate reads put GemSIM v1.6 into subfolder GemSIM_v1.6 and issue these commands: GemSIM_v1.6/GemReads.py -r LESB58_for_GemSim_01.fasta -n 1980527 -l d -u 350 -s 20 -m GemSIM_v1.6/models/ill100v4_p.gzip -c -q 33 -p -o GemSimLESB58_01 ''' num_pairs = len(self.genome.sequence) * readcov / (readlen * 2) if model == 4: path_to_model = _os.path.sep.join( path_to_exe.split(_os.path.sep)[:-1] + ['models', 'ill100v4_p.gzip']) elif model == 5: path_to_model = _os.path.sep.join( path_to_exe.split(_os.path.sep)[:-1] + ['models', 'ill100v5_p.gzip']) print('Using error model: {}'.format(path_to_model)) print( 'Generating {:,} {}bp read pairs for {}x coverage depth of a {}bp genome ({})' .format(num_pairs, readlen, readcov, len(self.genome.sequence), self.genome.id)) processes = set() max_processes = _decide_max_processes(max_cpus) import time start = time.time() out_raw = [] for i, genome_in in enumerate(use_genomes): # could use per genome length . . less consistent than using reference # genome_len = len(_SeqIO.read(genome_in,'fasta').seq) # num_pairs = genome_len * readcov / (readlen*2) outprefix = 'GemSim_{}_{:02d}'.format(self.genome.id, i + 1) cmd = [ path_to_exe, '-r', genome_in, '-n', num_pairs, '-l', 'd', '-u', fraglen, '-s', sterrfraglen, '-m', path_to_model, '-c', '-q', 33, '-p', '-o', outprefix ] out_raw += [outprefix + '_fir.fastq', outprefix + '_sec.fastq'] # this would be better to rename and compress all in one # maybe as a shell script? Then resuming (--force) would be easier. if _os.path.exists(outprefix+'_fir.fastq') and \ _os.path.exists(outprefix+'_sec.fastq'): print('Found output for {}_fir.fastq (and sec), not regenerating, '\ 'delete these to start from scratch'.format(outprefix)) else: cmd = map(str, cmd) print(' '.join(cmd)) processes.add(_subprocess.Popen(cmd, shell=False)) if len(processes) >= max_processes: (pid, exit_status) = _os.wait() processes.difference_update( [p for p in processes if p.poll() is not None]) # Check if all the child processes were closed for p in processes: if p.poll() is None: p.wait() missing = [] for o in out_raw: if not _os.path.exists(o): missing += [o] assert len(missing) == 0, 'Could not find:\n{}'.format( '\n'.join(missing)) print('all finished after {} minutes'.format( int(round((time.time() - start) / 60.0)))) outdir = _os.path.sep.join(['simulated_reads', self.genome.id]) try: _os.makedirs(outdir) except OSError: pass for o in out_raw: new = _os.path.sep.join( [outdir, o.replace('fir', 'R1').replace('sec', 'R2')]) print('{} ==> {}'.format(o, new)) _os.rename(o, new) cmd = ['gzip', new] print(' '.join(cmd)) _subprocess.call(cmd)