def __init__(self, screen_file): """ Create a new FastqscreenData instance """ TabFile.__init__(self, column_names=( 'Library', '%Unmapped', '%One_hit_one_library', '%Multiple_hits_one_library', '%One_hit_multiple_libraries', '%Multiple_hits_multiple_libraries', )) self._screen_file = os.path.abspath(screen_file) self._version = None self._no_hits = None # Read in data with open(self._screen_file, 'r') as fp: for line in fp: line = line.strip() if line.startswith('#Fastq_screen version:'): self._version = line.split()[2] continue elif line.startswith('Library') or line.startswith('Genome'): tabfile = TabFile(column_names=line.split()) continue elif line.startswith('%Hit_no_libraries:') or \ line.startswith('%Hit_no_genomes:'): self._no_hits = float(line.split()[-1]) continue elif not line or \ line.startswith('#') or \ line.startswith('%'): continue tabfile.append(tabdata=line) # Handle different terminology for different versions if tabfile.header()[0] == 'Library': library = 'Library' unmapped = '%Unmapped' one_hit_one_library = '%One_hit_one_library' multiple_hits_one_library = '%Multiple_hits_one_library' one_hit_multiple_libraries = '%One_hit_multiple_libraries' multiple_hits_multiple_libraries = '%Multiple_hits_multiple_libraries' elif tabfile.header()[0] == 'Genome': library = 'Genome' unmapped = '%Unmapped' one_hit_one_library = '%One_hit_one_genome' multiple_hits_one_library = '%Multiple_hits_one_genome' one_hit_multiple_libraries = '%One_hit_multiple_genomes' multiple_hits_multiple_libraries = '%Multiple_hits_multiple_genomes' # Copy data to main object for line in tabfile: data = [ line[library], line[unmapped], line[one_hit_one_library], line[multiple_hits_one_library], line[one_hit_multiple_libraries], line[multiple_hits_multiple_libraries] ] self.append(data=data)
def __init__(self,screen_file): """ Create a new FastqscreenData instance """ TabFile.__init__(self, column_names=('Library', '%Unmapped', '%One_hit_one_library', '%Multiple_hits_one_library', '%One_hit_multiple_libraries', '%Multiple_hits_multiple_libraries',)) self._screen_file = os.path.abspath(screen_file) self._version = None self._no_hits = None # Read in data with open(self._screen_file,'r') as fp: for line in fp: line = line.strip() if line.startswith('#Fastq_screen version:'): self._version = line.split()[2] continue elif line.startswith('Library') or line.startswith('Genome'): tabfile = TabFile(column_names=line.split()) continue elif line.startswith('%Hit_no_libraries:') or \ line.startswith('%Hit_no_genomes:'): self._no_hits = float(line.split()[-1]) continue elif not line or \ line.startswith('#') or \ line.startswith('%'): continue tabfile.append(tabdata=line) # Handle different terminology for different versions if tabfile.header()[0] == 'Library': library = 'Library' unmapped = '%Unmapped' one_hit_one_library = '%One_hit_one_library' multiple_hits_one_library = '%Multiple_hits_one_library' one_hit_multiple_libraries = '%One_hit_multiple_libraries' multiple_hits_multiple_libraries = '%Multiple_hits_multiple_libraries' elif tabfile.header()[0] == 'Genome': library = 'Genome' unmapped = '%Unmapped' one_hit_one_library = '%One_hit_one_genome' multiple_hits_one_library = '%Multiple_hits_one_genome' one_hit_multiple_libraries = '%One_hit_multiple_genomes' multiple_hits_multiple_libraries = '%Multiple_hits_multiple_genomes' # Copy data to main object for line in tabfile: data = [line[library], line[unmapped], line[one_hit_one_library], line[multiple_hits_one_library], line[one_hit_multiple_libraries], line[multiple_hits_multiple_libraries]] self.append(data=data)
def __init__(self,fastq_strand_out): """ Create a new Fastqstrand instance """ self._fastq_strand_out = os.path.abspath(fastq_strand_out) self._version = None self._genomes = AttributeDictionary() # Read in data tabfile = None with open(self._fastq_strand_out,'r') as fp: for line in fp: line = line.strip() if line.startswith('#fastq_strand version:'): self._version = line.split()[2] continue elif line.startswith('#Genome'): tabfile = TabFile(column_names=line[1:].split('\t')) continue tabfile.append(tabdata=line) # Check there is some data if tabfile is None: raise Exception("Unable to extract fastq_strand data from %s" % self._fastq_strand_out) # Copy data to main object for line in tabfile: # Store the data data = AttributeDictionary() self._genomes[line['Genome']] = data data['forward'] = line['1st forward'] data['reverse'] = line['2nd reverse'] # Additional processing if data.reverse > 0.0: ratio = float(data.forward)/float(data.reverse) elif data.forward > 0.0: ratio = float("+inf") else: ratio = None if ratio is not None: if ratio < 0.2: strandedness = "reverse" elif ratio > 5 or ratio == float("+inf"): strandedness = "forward" else: strandedness = "unstranded?" else: strandedness = "undetermined" data['ratio'] = ratio data['strandedness'] = strandedness
def report_basic_stats(self,out_file=None,fp=None): """ Report the 'basic' statistics For each FASTQ file, report the following information: - Project name - Sample name - FASTQ file name (without leading directory) - Size (human-readable) - Nreads (number of reads) - Paired_end ('Y' for paired-end, 'N' for single-end) Arguments: out_file (str): name of file to write report to (used if 'fp' is not supplied) fp (File): File-like object open for writing (defaults to stdout if 'out_file' also not supplied) """ # Determine output stream if fp is None: if out_file is None: fpp = sys.stdout else: fpp = open(out_file,'w') else: fpp = fp # Report stats = TabFile(column_names=('Project', 'Sample', 'Fastq', 'Size', 'Nreads', 'Paired_end')) for line in self._stats: data = [line[c] for c in stats.header()] stats.append(data=data) stats.write(fp=fpp,include_header=True) # Close file if fp is None and out_file is not None: fpp.close()
def __init__(self,screen_file): """ Create a new FastqscreenData instance """ TabFile.__init__(self, column_names=('Library', '%Unmapped', '%One_hit_one_library', '%Multiple_hits_one_library', '%One_hit_multiple_libraries', '%Multiple_hits_multiple_libraries',)) self._screen_file = os.path.abspath(screen_file) self._version = None self._no_hits = None # Read in data with open(self._screen_file,'r') as fp: for line in fp: line = line.strip() if line.startswith('#Fastq_screen version:'): self._version = line.split()[2] continue elif line.startswith('Library'): tabfile = TabFile(column_names=line.split()) continue elif line.startswith('%Hit_no_libraries:'): self._no_hits = float(line.split()[-1]) continue elif not line or \ line.startswith('#') or \ line.startswith('%'): continue tabfile.append(tabdata=line) # Move data to main object for line in tabfile: data = [] for col in self.header(): data.append(line[col]) self.append(data=data)
class MacsXLS: """Class for reading and manipulating XLS output from MACS Reads the XLS output file from the MACS peak caller and processes and stores the information for subsequent manipulation and output. To read in data from a MACS output file: >>> macs = MacsXLS("macs.xls") This reads in the data and prepends an additional 'order' column (a list of numbers from one to the number of data lines). To get the MACS version: >>> macs.macs_version 2.0.10 To access the 'header' information (as a Python list): >>> macs.header To see the column names (as a Python list): >>> macs.columns The data is stored as a TabFile object; to access the data use the 'data' property, e.g. >>> for line in macs.data: ... print "Chr %s Start %s End" % (line['chr'],line['start'],line['end']) To sort the data on a particular column use the 'sort_on' method, e.g. >>> macs.sort_on('chr') (Note that the order column is always recalculated after sorting.) """ def __init__(self,filen=None,fp=None,name=None): """Create a new MacsXLS instance Arguments: filen: name of the file to read the MACS output from. If None then fp argument must be supplied instead. fp: file-like object opened for reading. If None then filen argument must be supplied instead. If both filen and fp are supplied then fp will be used preferentially. """ # Store data self.__filen = filen self.__name = name self.__macs_version = None self.__command_line = None self.__header = [] self.__data = None # Open file, if necessary if fp is None: fp = open(filen,'r') else: filen = None # Iterate over header lines for line in fp: line = line.strip() if line.startswith('#') or line == '': # Header line self.__header.append(line) # Detect/extract data from header if line.startswith("# This file is generated by MACS version "): # Look for MACS version self.__macs_version = line.split()[8] elif self.__name is None and line.startswith("# name = "): # Look for 'name' if none set self.__name = line[len("# name = "):] elif line.startswith("# Command line: "): # Look for command line self.__command_line = line[16:] else: if self.__data is None: # First line of actual data should be the column names columns = line.split('\t') # Insert an additional column called 'order' columns.insert(0,"order") # Set up TabFile to handle actual data self.__data = TabFile(column_names=columns) else: # Assume it's actual data and store it self.__data.append(tabdata="\t%s" % line) # Close the file handle, if we opened it if filen is not None: fp.close() # Check that we actually got a version line if self.macs_version is None: raise Exception,"Failed to extract MACS version, not a MACS output file?" # Populate the 'order' column self.update_order() @property def filen(self): """Return the source file name """ return self.__filen @property def name(self): """Return the name property """ return self.__name @property def macs_version(self): """Return the MACS version extracted from the file """ return self.__macs_version @property def command_line(self): """Return the command line string extracted from the header This is the value associated with the "# Command line: ..." header line. Will be 'None' if no matching header line is found, else is the string following the ':'. """ return self.__command_line @property def columns(self): """Return the column names for the MACS data Returns a list of the column names from the data extracted from the file. """ return self.__data.header() @property def columns_as_xls_header(self): """Returns the column name list, with hash prepended """ return ['#'+self.columns[0]] + self.columns[1:] @property def header(self): """Return the header data from the file Returns a list of lines comprising the header extracted from the file. """ return self.__header @property def data(self): """Return the data from the file Returns a TabFile object comprising the data extracted from the file. """ return self.__data @property def with_broad_option(self): """Returns True if MACS was run with --broad option If --broad wasn't detected then returns False. """ if self.macs_version.startswith('1.'): # Not an option in MACS 1.* return False try: # Was --broad specified in the command line? return '--broad' in self.command_line.split() except AttributeError: # No command line? Check for 'abs_summit' column return 'abs_summit' not in self.columns def sort_on(self,column,reverse=True): """Sort data on specified column Sorts the data in-place, by the specified column. By default data is sorted in descending order; set 'reverse' argument to False to sort values in ascending order instead Note that the 'order' column is automatically updated after each sorting operation. Arguments: column: name of the column to sort on reverse: if True (default) then sort in descending order (i.e. largest to smallest). Otherwise sort in ascending order. """ # Sort the data self.__data.sort(lambda line: line[column],reverse=reverse) # Update the 'order' column self.update_order() def update_order(self): # Set/update values in 'order' column for i in range(0,len(self.__data)): self.__data[i]['order'] = i+1
print "Input file: %s" % macs_in print "Output XLS: %s" % xls_out # Extract the header from the MACS and feed actual data to # TabFile object header = [] data = TabFile(column_names=['chr','start','end','length','summit','tags', '-10*log10(pvalue)','fold_enrichment','FDR(%)']) fp = open(macs_in,'r') for line in fp: if line.startswith('#') or line.strip() == '': # Header line header.append(line.strip()) else: # Data data.append(tabdata=line.strip()) fp.close() # Temporarily remove first line header_line = str(data[0]) del(data[0]) # Attempt to detect MACS version macs_version = None for line in header: if line.startswith("# This file is generated by MACS version "): macs_version = line.split()[8] break if macs_version is None: logging.error("couldn't detect MACS version") sys.exit(1)
# Extract the header from the MACS and feed actual data to # TabFile object header = [] data = TabFile(column_names=[ 'chr', 'start', 'end', 'length', 'summit', 'tags', '-10*log10(pvalue)', 'fold_enrichment', 'FDR(%)' ]) fp = open(macs_in, 'r') for line in fp: if line.startswith('#') or line.strip() == '': # Header line header.append(line.strip()) else: # Data data.append(tabdata=line.strip()) fp.close() # Temporarily remove first line header_line = str(data[0]) del (data[0]) # Attempt to detect MACS version macs_version = None for line in header: if line.startswith("# This file is generated by MACS version "): macs_version = line.split()[8] break if macs_version is None: logging.error("couldn't detect MACS version") sys.exit(1)
class FastqStatistics: """ Class for collecting and reporting stats on Illumina FASTQs Given a directory with fastq(.gz) files arranged in the same structure as the output from bcl2fastq or bcl2fastq2, collects statistics for each file and provides methods for reporting different aspects. Example usage: >>> from IlluminaData import IlluminaData >>> data = IlluminaData('120117_BLAH_JSHJHXXX','bcl2fastq') >>> stats = FastqStatistics(data) >>> stats.report_basic_stats('basic_stats.out') """ def __init__(self, illumina_data, n_processors=1, add_to=None): """ Create a new FastqStatistics instance Arguments: illumina_data: populated IlluminaData object describing the run. n_processors: number of processors to use (if >1 then uses the multiprocessing library to run the statistics gathering using multiple cores). add_to: optional, add the data to that from an existing statistics file """ self._illumina_data = illumina_data self._n_processors = n_processors self._stats = None self._lane_names = [] self._get_data(filen=add_to) def _get_data(self, filen=None): """ Collect statistics for FASTQ outputs from an Illumina run """ # Collect FASTQ files fastqstats = [] for project in self._illumina_data.projects: for sample in project.samples: for fastq in sample.fastq: fastqstats.append( FastqStats(os.path.join(sample.dirn, fastq), project.name, sample.name)) # Gather same information for undetermined reads (if present) if self._illumina_data.undetermined is not None: for lane in self._illumina_data.undetermined.samples: for fastq in lane.fastq: fastqstats.append( FastqStats(os.path.join(lane.dirn, fastq), self._illumina_data.undetermined.name, lane.name)) # Collect the data for each file if self._n_processors > 1: # Multiple cores pool = Pool(self._n_processors) results = pool.map(collect_fastq_data, fastqstats) pool.close() pool.join() else: # Single core results = map(collect_fastq_data, fastqstats) # Set up tabfile to hold pre-existing data if filen is not None: existing_stats = TabFile(filen, first_line_is_header=True) else: existing_stats = None # Set up class to hold all collected data self._stats = TabFile(column_names=('Project', 'Sample', 'Fastq', 'Size', 'Nreads', 'Paired_end', 'Read_number')) # Split result sets into R1 and R2 results_r1 = filter(lambda f: f.read_number == 1, results) results_r2 = filter(lambda f: f.read_number == 2, results) # Determine which lanes are present and append # columns for each lanes = set() for fastq in results_r1: logger.debug("-- %s: lanes %s" % (fastq.name, ','.join([str(l) for l in fastq.lanes]))) for lane in fastq.lanes: lanes.add(lane) # Add lane numbers from pre-existing stats file if existing_stats is not None: for c in existing_stats.header(): if c.startswith('L'): lanes.add(int(c[1:])) self._lanes = sorted(list(lanes)) logger.debug("Lanes found: %s" % ','.join([str(l) for l in self._lanes])) for lane in self._lanes: self._stats.appendColumn("L%s" % lane) # Copy pre-existing stats into new tabfile if existing_stats: for line in existing_stats: data = [ line['Project'], line['Sample'], line['Fastq'], line['Size'], line['Nreads'], line['Paired_end'], line['Read_number'] ] for lane in lanes: try: data.append(line["L%s" % lane]) except: data.append('') self._stats.append(data=data) # Copy reads per lane from R1 FASTQs into R2 for r2_fastq in results_r2: # Get corresponding R1 name logger.debug("-- Fastq R2: %s" % r2_fastq.name) r1_fastq_name = IlluminaFastq(r2_fastq.name) r1_fastq_name.read_number = 1 r1_fastq_name = str(r1_fastq_name) logger.debug("-- -> R1: %s" % r1_fastq_name) # Locate corresponding data r1_fastq = filter(lambda f: f.name.startswith(r1_fastq_name), results_r1)[0] r2_fastq.reads_by_lane = dict(r1_fastq.reads_by_lane) # Write the data into the tabfile paired_end = ('Y' if self._illumina_data.paired_end else 'N') for fastq in results: # Check for existing entry existing_entry = False for line in self._stats: if (line['Project'] == fastq.project and line['Sample'] == fastq.sample and line['Fastq'] == fastq.name): # Overwrite the existing entry existing_entry = True break # Write the data if not existing_entry: # Append new entry data = [ fastq.project, fastq.sample, fastq.name, bcf_utils.format_file_size(fastq.fsize), fastq.nreads, paired_end, fastq.read_number ] for lane in lanes: try: data.append(fastq.reads_by_lane[lane]) except: data.append('') self._stats.append(data=data) else: # Overwrite existing entry logging.warning("Overwriting exisiting entry for " "%s/%s/%s" % (fastq.project, fastq.sample, fastq.name)) line['Size'] = bcf_utils.format_file_size(fastq.fsize) line['Nreads'] = fastq.nreads line['Paired_end'] = paired_end line['Read_number'] = fastq.read_number for lane in lanes: lane_name = "L%d" % lane try: line[lane_name] = fastq.reads_by_lane[lane] except: line[lane_name] = '' @property def lane_names(self): """ Return list of lane names (e.g. ['L1','L2',...]) """ return [("L%d" % l) for l in self._lanes] @property def raw(self): """ Return the 'raw' statistics TabFile instance """ return self._stats def report_full_stats(self, out_file=None, fp=None): """ Report all statistics gathered for all FASTQs Essentially a dump of all the data. Arguments: out_file (str): name of file to write report to (used if 'fp' is not supplied) fp (File): File-like object open for writing (defaults to stdout if 'out_file' also not supplied) """ # Determine output stream if fp is None: if out_file is None: fpp = sys.stdout else: fpp = open(out_file, 'w') else: fpp = fp # Report self._stats.write(fp=fpp, include_header=True) # Close file if fp is None and out_file is not None: fpp.close() def report_basic_stats(self, out_file=None, fp=None): """ Report the 'basic' statistics For each FASTQ file, report the following information: - Project name - Sample name - FASTQ file name (without leading directory) - Size (human-readable) - Nreads (number of reads) - Paired_end ('Y' for paired-end, 'N' for single-end) Arguments: out_file (str): name of file to write report to (used if 'fp' is not supplied) fp (File): File-like object open for writing (defaults to stdout if 'out_file' also not supplied) """ # Determine output stream if fp is None: if out_file is None: fpp = sys.stdout else: fpp = open(out_file, 'w') else: fpp = fp # Report stats = TabFile(column_names=('Project', 'Sample', 'Fastq', 'Size', 'Nreads', 'Paired_end')) for line in self._stats: data = [line[c] for c in stats.header()] stats.append(data=data) stats.write(fp=fpp, include_header=True) # Close file if fp is None and out_file is not None: fpp.close() def report_per_lane_sample_stats(self, out_file=None, fp=None): """ Report of reads per sample in each lane Reports the number of reads for each sample in each lane plus the total reads for each lane. Example output: Lane 1 Total reads = 182851745 - KatyDobbs/KD-K1 79888058 43.7% - KatyDobbs/KD-K3 97854292 53.5% - Undetermined_indices/lane1 5109395 2.8% ... Arguments: out_file (str): name of file to write report to (used if 'fp' is not supplied) fp (File): File-like object open for writing (defaults to stdout if 'out_file' also not supplied) """ # Determine output stream if fp is None: if out_file is None: fpp = sys.stdout else: fpp = open(out_file, 'w') else: fpp = fp # Report lanes = self.lane_names for lane in lanes: lane_number = int(lane[1:]) samples = filter(lambda x: x['Read_number'] == 1 and bool(x[lane]), self._stats) try: total_reads = sum([int(s[lane]) for s in samples]) except Exception as ex: for s in samples: try: int(s[lane]) except ValueError: logging.critical("Bad value for read count in " "lane %s sample %s: '%s'" % (lane, s['Sample'], s[lane])) raise ex fpp.write("\nLane %d\n" % lane_number) fpp.write("Total reads = %d\n" % total_reads) for sample in samples: sample_name = "%s/%s" % (sample['Project'], sample['Sample']) nreads = float(sample[lane]) fpp.write("- %s\t%d\t%.1f%%\n" % (sample_name, nreads, nreads / total_reads * 100.0)) # Close file if fp is None and out_file is not None: fpp.close() def report_per_lane_summary_stats(self, out_file=None, fp=None): """ Report summary of total and unassigned reads per-lane Arguments: out_file (str): name of file to write report to (used if 'fp' is not supplied) fp (File): File-like object open for writing (defaults to stdout if 'out_file' also not supplied) """ # Determine output stream if fp is None: if out_file is None: fpp = sys.stdout else: fpp = open(out_file, 'w') else: fpp = fp # Set up TabFile to hold the data collected per_lane_stats = TabFile(column_names=('Lane', 'Total reads', 'Assigned reads', 'Unassigned reads', '%assigned', '%unassigned')) # Initialise counts for each lane assigned = {} unassigned = {} for lane in self.lane_names: assigned[lane] = 0 unassigned[lane] = 0 # Count assigned and unassigned (= undetermined) reads for line in filter( lambda x: x['Read_number'] == 1 and not IlluminaFastq(x[ 'Fastq']).is_index_read, self._stats): if line['Project'] != 'Undetermined_indices': counts = assigned else: counts = unassigned for lane in self.lane_names: if line[lane]: try: counts[lane] += line[lane] except KeyError: counts[lane] = line[lane] # Write out data for each lane for lane in self.lane_names: lane_number = int(lane[1:]) assigned_reads = assigned[lane] try: unassigned_reads = unassigned[lane] except KeyError: # lane doesn't have any unassigned reads unassigned_reads = 0 total_reads = assigned_reads + unassigned_reads if total_reads > 0: percent_assigned = float(assigned_reads)/ \ float(total_reads)*100.0 percent_unassigned = float(unassigned_reads)/ \ float(total_reads)*100.0 else: percent_assigned = 0.0 percent_unassigned = 0.0 per_lane_stats.append(data=("Lane %d" % lane_number, total_reads, assigned_reads, unassigned_reads, "%.2f" % percent_assigned, "%.2f" % percent_unassigned)) # Write to file per_lane_stats.write(fp=fpp, include_header=True) # Close file if fp is None and out_file is not None: fpp.close()
def report_per_lane_summary_stats(self, out_file=None, fp=None): """ Report summary of total and unassigned reads per-lane Arguments: out_file (str): name of file to write report to (used if 'fp' is not supplied) fp (File): File-like object open for writing (defaults to stdout if 'out_file' also not supplied) """ # Determine output stream if fp is None: if out_file is None: fpp = sys.stdout else: fpp = open(out_file, 'w') else: fpp = fp # Set up TabFile to hold the data collected per_lane_stats = TabFile(column_names=('Lane', 'Total reads', 'Assigned reads', 'Unassigned reads', '%assigned', '%unassigned')) # Initialise counts for each lane assigned = {} unassigned = {} for lane in self.lane_names: assigned[lane] = 0 unassigned[lane] = 0 # Count assigned and unassigned (= undetermined) reads for line in filter( lambda x: x['Read_number'] == 1 and not IlluminaFastq(x[ 'Fastq']).is_index_read, self._stats): if line['Project'] != 'Undetermined_indices': counts = assigned else: counts = unassigned for lane in self.lane_names: if line[lane]: try: counts[lane] += line[lane] except KeyError: counts[lane] = line[lane] # Write out data for each lane for lane in self.lane_names: lane_number = int(lane[1:]) assigned_reads = assigned[lane] try: unassigned_reads = unassigned[lane] except KeyError: # lane doesn't have any unassigned reads unassigned_reads = 0 total_reads = assigned_reads + unassigned_reads if total_reads > 0: percent_assigned = float(assigned_reads)/ \ float(total_reads)*100.0 percent_unassigned = float(unassigned_reads)/ \ float(total_reads)*100.0 else: percent_assigned = 0.0 percent_unassigned = 0.0 per_lane_stats.append(data=("Lane %d" % lane_number, total_reads, assigned_reads, unassigned_reads, "%.2f" % percent_assigned, "%.2f" % percent_unassigned)) # Write to file per_lane_stats.write(fp=fpp, include_header=True) # Close file if fp is None and out_file is not None: fpp.close()
shutil.rmtree(working_dir) # Report the stats if args.stats_file is not None: # Output column names stats_file = os.path.abspath(args.stats_file) nreads_col = "Nreads%s" % ('' if args.suffix is None else args.suffix) umis_col = "Distinct_UMIs%s" % ('' if args.suffix is None else args.suffix) if not (os.path.isfile(stats_file) and args.append): # Create new stats file if well_list is not None: # Initialise barcode and sample names from well list stats_data = TabFile(column_names=('Barcode', 'Sample')) for barcode in well_list.barcodes(): stats_data.append(data=(barcode, well_list.sample(barcode))) else: # Barcodes from collected data stats_data = TabFile(column_names=('Barcode', )) for barcode in stats.barcodes(): stats_data.append(data=(barcode, )) else: # Append to an existing file stats_data = TabFile(filen=stats_file, first_line_is_header=True) # Add new columns of data stats_data.appendColumn(nreads_col) stats_data.appendColumn(umis_col) # Populate columns for data_line in stats_data: barcode = data_line['Barcode'] try: