コード例 #1
0
ファイル: test_metagene.py プロジェクト: zzygyx9119/plastid
def test_group_regions_make_windows_finds_masks():
    crossmap = GenomeHash(_MASKS)
    for flank_up, flank_down in _FLANKS:
        for test_name, test_group in _DO_GENERATE_MAX_WINDOW.items():
            result_group = _DO_GENERATE_MAX_WINDOW_RESULTS_MASKED[
                "%s_%s_%s" % (test_name, flank_up, flank_down)]
            yield check_maximal_window, test_name, crossmap, test_group, [
                result_group
            ], flank_up, flank_down
コード例 #2
0
    def test_genomehash_create_from_list(self):
#        """Test creation `GenomeHash`es from lists without loss of information"""
        gh = GenomeHash(self.transcripts,do_copy=True)
        found    = sorted(list(gh.feature_dict.values()),key=_name_sort)
        expected = sorted(list(self.tx_dict.values()),key=_name_sort)
        self.assertEquals(len(expected),len(found),"Features lost in creation of GenomeHash from list. Expected %s, found %s." % (len(expected),len(found)))
        self.assertEquals(expected,
                          found,
                          "Features lost in creation of GenomeHash from list:\nFirst:\n%s\nSecond:\n%s" % (expected,found))
コード例 #3
0
    def setUpClass(cls):
        """Set up test data for `TestGenomeHash`"""
        cls.binsize = 10000

        cls.transcripts    = list(BED_Reader(CommentReader(open(REF_FILES["100transcripts_bed"])),return_type=Transcript))
        cls.coding_regions = list(BED_Reader(CommentReader(open(REF_FILES["100cds_bed"])),return_type=Transcript))
        cls.coding_antisense = list(BED_Reader(CommentReader(open(REF_FILES["100cds_antisense_bed"])),return_type=Transcript))

        cls.tx_dict  = { X.get_name() : X for X in cls.transcripts }
        cls.cds_dict = { X.get_name() : X for X in cls.coding_regions }
        cls.as_cds_dict = { X.get_name() : X for X in cls.coding_antisense }

        cls.tx_hash     = GenomeHash(cls.tx_dict,do_copy=False,binsize=cls.binsize)
        cls.cds_hash    = GenomeHash(cls.cds_dict,do_copy=False,binsize=cls.binsize)
        cls.as_cds_hash = GenomeHash(cls.as_cds_dict,do_copy=False,binsize=cls.binsize)
        
        cls.shuffled_indices = list(range(len(cls.transcripts)))
        shuffle(cls.shuffled_indices)
コード例 #4
0
ファイル: test_metagene.py プロジェクト: zzygyx9119/plastid
def test_group_regions_make_windows_multiple_genes():
    empty_hash = GenomeHash([])
    flank_up = 50
    flank_down = 100
    for test_name, test_group in _DO_GENERATE_MULTI_GENE.items():
        result_groups = _DO_GENERATE_MULTI_GENE_RESULTS["%s_%s_%s" %
                                                        (test_name, flank_up,
                                                         flank_down)]
        yield check_maximal_window, test_name, empty_hash, test_group, result_groups, flank_up, flank_down
コード例 #5
0
    def test_genomehash_create_from_dict(self):
#        """Test creation `GenomeHash`es from dictionaries without loss of information"""
        gh = GenomeHash(self.tx_dict,do_copy=True)
        
        found    = sorted(list(gh.feature_dict.values()),key=_name_sort)
        expected = sorted(list(self.tx_dict.values()),key=_name_sort)
        self.assertEquals(len(expected),len(found),"Features lost in creation of GenomeHash from dict. Expected %s, found %s." % (len(expected),len(found)))
        self.assertEquals(sorted(list(gh.feature_dict.values()),key=_name_sort),
                          sorted(list(self.tx_dict.values()),key=_name_sort),
                          "Features lost in creation of GenomeHash from dict")
コード例 #6
0
    def test_genomehash_update_from_dict(self):
#        """Test addition to a `GenomeHash` from a dictionary without loss of features
#        
#        1. Add features to an empty dictionary
#        
#        2. Add features to a non-empty dictionary
#        """
        tuple_sort = lambda x: _name_sort(x[1])
        gh = GenomeHash({},do_copy=True)
        tx_items  = sorted(list(self.tx_dict.items()),key=tuple_sort)
        tx_values = [X[1] for X in tx_items]
        dict1 = dict(tx_items[:50])
        dict2 = dict(tx_items[50:])
        self.assertEqual(len(dict1),50)
        self.assertEqual(len(dict2),50)

        # check values, as opposed to items, because keys of feature_dict
        # are unique numerical IDs as opposed to ames
        gh.update(dict1)
        self.assertEquals(sorted(list(gh.feature_dict.values()),key=_name_sort),
                          tx_values[:50],
                          "Features lost in first update of empty GenomeHash from dict")
    
        gh.update(dict2)
        self.assertEquals(sorted(list(gh.feature_dict.values()),key=_name_sort),
                          tx_values,
                          "Features lost in second update of non-empty GenomeHash from dict")
コード例 #7
0
ファイル: test_metagene.py プロジェクト: zzygyx9119/plastid
def test_group_regions_make_windows_finds_maximal_window():
    # test case, 3 transcripts same start
    # test case, 3 transcripts, different starts
    # test case, 3 transcripts, 2 share start, 1 different
    # test case including ncRNAs
    # for all, with and without changed upstream, downstream flanks
    empty_hash = GenomeHash([])
    for flank_up, flank_down in _FLANKS:
        for test_name, test_group in _DO_GENERATE_MAX_WINDOW.items():
            result_group = _DO_GENERATE_MAX_WINDOW_RESULTS[
                "%s_%s_%s" % (test_name, flank_up, flank_down)]
            yield check_maximal_window, test_name, empty_hash, test_group, [
                result_group
            ], flank_up, flank_down
コード例 #8
0
    def test_genomehash_update_from_list(self):
#        """Test addition to a `GenomeHash` from a list without loss of features
#        
#        1. Add features to an empty dictionary
#        
#        2. Add features to a non-empty dictionary
#        """
        gh = GenomeHash({},do_copy=True)
        tx_list = sorted(list(self.tx_dict.values()),key=_name_sort)
        self.assertGreater(len(tx_list),0)

        gh.update(tx_list[:50])
        self.assertEquals(sorted(list(gh.feature_dict.values()),key=_name_sort),
                          tx_list[:50],
                          "Features lost in update of empty GenomeHash from list")
    
        gh.update(tx_list[50:])
        self.assertEquals(sorted(list(gh.feature_dict.values()),key=_name_sort),
                          tx_list,
                          "Features lost in update of non-empty GenomeHash from list")
コード例 #9
0
    'YMR194C_B_mRNA_0':
    ['YMR194C_B_mRNA_0:0-324^396-729(-)', 'YMR194C_B_mRNA_0:0-328^400-729(-)'],
    'YPL249C_A_mRNA_0':
    ['YPL249C_A_mRNA_0:0-410^648-697(-)', 'YPL249C_A_mRNA_0:0-417^655-697(-)']
}
unmatched_query_juncs = {
    K: [SegmentChain.from_str(X) for X in V]
    for K, V in unmatched_query_juncs.items()
}
"""Query junctions with no known matches"""

unmatched_noncan_query_juncs = [
    "YNL130C:0-23^145-180(-)",
    "YNL130C:0-53^165-180(-)",
    "YNL130C:0-70^141-180(-)",
    "YNL130C:0-49^121-180(-)",
]
unmatched_noncan_query_juncs = [
    SegmentChain.from_str(X) for X in unmatched_noncan_query_juncs
]
"""Query junctions without canonical splice junctions in the match range"""

repetitive_regions = [
    "YBR215W_mRNA_0:190-193(+)",  # threeprime splice site plus
    "YHL001W_mRNA_0:144-149(+)",  # fiveprime splice site plus
    "YIL133C_mRNA_0:935-940(-)",  # threeprime splice site minus
    "YMR194C_B_mRNA_0:325-328(-)",  # fiveprime splice site minus
]
cross_hash = GenomeHash(
    [SegmentChain(GenomicSegment.from_str(X)) for X in repetitive_regions])
cross_hash_seqs = {X.chrom for X in cross_hash.feature_dict.values()}
コード例 #10
0
ファイル: test_bigbed.py プロジェクト: zzygyx9119/plastid
    def setUpClass(cls):
        cls.cols = [3, 4, 5, 6, 8, 9, 12]
        cls.bedfiles = {}
        cls.bbfiles = {}
        for col in cls.cols:
            cls.bedfiles[col] = resource_filename(
                "plastid",
                "test/data/annotations/100transcripts_bed%s.bed" % col)
            cls.bbfiles[col] = resource_filename(
                "plastid",
                "test/data/annotations/100transcripts_bed%s.bb" % col)

        cls.chrom_sizes = {}
        for line in open(
                resource_filename("plastid",
                                  "test/data/annotations/sacCer3.sizes")):
            chrom, size = line.strip().split("\t")
            cls.chrom_sizes[chrom] = int(size)

        cls.bbs = {
            K: BigBedReader(cls.bbfiles[K], return_type=Transcript)
            for K in cls.cols
        }

        # comparisons against genome hash
        cls.binsize = 10000
        transcripts = list(
            BED_Reader(open(cls.bedfiles[12]), return_type=Transcript))

        cls.tx_dict = {}
        cls.cds_dict = {}
        cls.as_cds_dict = {}
        for tx in transcripts:
            txid = tx.get_name()
            cls.tx_dict[txid] = tx
            cds_ivc = tx.get_cds()
            cds_ivc.attr["ID"] = txid
            if cds_ivc.length > 0:
                cls.cds_dict[txid] = tx.get_cds()
                cls.as_cds_dict[txid] = tx.get_cds().get_antisense()
                cls.as_cds_dict[txid].attr["ID"] = txid

        cls.tx_hash = GenomeHash(cls.tx_dict,
                                 do_copy=False,
                                 binsize=cls.binsize)
        cls.cds_hash = GenomeHash(cls.cds_dict,
                                  do_copy=False,
                                  binsize=cls.binsize)
        cls.as_cds_hash = GenomeHash(cls.as_cds_dict,
                                     do_copy=False,
                                     binsize=cls.binsize)

        cls.shuffled_indices = list(range(len(transcripts)))
        shuffle(cls.shuffled_indices)

        cls.flybbfile = resource_filename(
            "plastid", "test/data/annotations/dmel-all-no-analysis-r5.54.bb")
        cls.flybedfile = resource_filename(
            "plastid", "test/data/annotations/dmel-all-no-analysis-r5.54.bed")

        # BigBed files with and without extra columns, with and without autoSql descriptions
        cls.bb_bonuscols = {
            "bb4as":
            resource_filename(
                "plastid",
                "test/data/annotations/100transcripts_bed4plus_bonus_as.bb"),
            "bb12as":
            resource_filename(
                "plastid",
                "test/data/annotations/100transcripts_bed12plus_bonus_as.bb"),
            "bb4no_as":
            resource_filename(
                "plastid",
                "test/data/annotations/100transcripts_bed4plus_bonus_no_as.bb"
            ),
            "bb12no_as":
            resource_filename(
                "plastid",
                "test/data/annotations/100transcripts_bed12plus_bonus_no_as.bb"
            ),
        }
        cls.bonus_col_file = resource_filename(
            "plastid", "test/data/annotations/bonus_bed_columns.txt")

        # BigBed file with indexes
        cls.bb_indexed = resource_filename(
            "plastid", "test/data/annotations/dmel-bonus-cols.bb")
コード例 #11
0
ファイル: cs.py プロジェクト: zzygyx9119/plastid
def process_partial_group(transcripts, mask_hash, printer):
    """Correct boundaries of merged genes, as described in :func:`do_generate`

    Parameters
    ----------
    transcripts : dict
        Dictionary mapping unique transcript IDs to |Transcripts|.
        This set should be complete in the sense that it should contain
        all transcripts that have any chance of mutually overlapping
        each other (e.g. all on same chromosome and strand). 

    mask_hash : |GenomeHash|
        |GenomeHash| of regions to exclude from analysis


    Returns
    -------
    :class:`pandas.DataFrame`
        Table of merged gene positions

    :class:`pandas.DataFrame`
        Table of adjusted transcript positions

    :class:`dict`
        Dictionary mapping raw gene names to merged gene names
    """
    gene_table = {
        "region": [],
        "transcript_ids": [],
        "exon_unmasked": [],
        "exon": [],
        "masked": [],
        "utr5": [],
        "cds": [],
        "utr3": [],
        "exon_bed": [],
        "utr5_bed": [],
        "cds_bed": [],
        "utr3_bed": [],
        "masked_bed": [],
    }

    # data table for transcripts
    transcript_table = {
        "region": [],
        "exon": [],
        "utr5": [],
        "cds": [],
        "utr3": [],
        "masked": [],
        "exon_unmasked": [],
        "transcript_ids": [],
        "exon_bed": [],
        "utr5_bed": [],
        "cds_bed": [],
        "utr3_bed": [],
        "masked_bed": [],
    }

    keycombos = list(itertools.permutations(("utr5", "cds", "utr3"), 2))

    # merge genes that share exons & write output
    printer.write("Collapsing genes that share exons ...")
    merged_genes = merge_genes(transcripts)

    # remap transcripts to merged genes
    # and vice-versa
    merged_gene_tx = {}
    tx_merged_gene = {}
    printer.write("Mapping transcripts to merged genes...")
    for txid in transcripts:
        my_tx = transcripts[txid]
        my_gene = my_tx.get_gene()
        my_merged = merged_genes[my_gene]
        tx_merged_gene[txid] = my_merged
        try:
            merged_gene_tx[my_merged].append(txid)
        except KeyError:
            merged_gene_tx[my_merged] = [txid]

    # flatten merged genes
    printer.write(
        "Flattening merged genes, masking positions, and labeling subfeatures ..."
    )
    for n, (gene_id, my_txids) in enumerate(merged_gene_tx.items()):
        if n % 1000 == 0 and n > 0:
            printer.write("    %s genes ..." % n)

        my_gene_positions = []
        chroms = []
        strands = []
        for my_txid in my_txids:
            my_segmentchain = transcripts[my_txid]
            chroms.append(my_segmentchain.chrom)
            strands.append(my_segmentchain.strand)
            my_gene_positions.extend(my_segmentchain.get_position_list())

            try:
                assert len(set(chroms)) == 1
            except AssertionError:
                printer.write(
                    "Skipping gene %s which contains multiple chromosomes: %s"
                    % (gene_id, ",".join(chroms)))

            try:
                assert len(set(strands)) == 1
            except AssertionError:
                printer.write(
                    "Skipping gene %s which contains multiple strands: %s" %
                    (gene_id, ",".join(strands)))

        my_gene_positions = set(my_gene_positions)
        gene_ivc_raw = SegmentChain(
            *positions_to_segments(chroms[0], strands[0], my_gene_positions))
        gene_table["region"].append(gene_id)
        gene_table["transcript_ids"].append(",".join(sorted(my_txids)))
        gene_table["exon_unmasked"].append(gene_ivc_raw)

    printer.write("    %s genes total." % (n + 1))

    # mask genes
    printer.write("Masking positions and labeling subfeature positions ...")
    gene_hash = GenomeHash(gene_table["exon_unmasked"], do_copy=False)

    for n, (gene_id, gene_ivc_raw) in enumerate(
            zip(gene_table["region"], gene_table["exon_unmasked"])):
        if n % 2000 == 0:
            printer.write("    %s genes ..." % n)

        my_chrom = gene_ivc_raw.spanning_segment.chrom
        my_strand = gene_ivc_raw.spanning_segment.strand

        masked_positions = []
        nearby_genes = gene_hash[gene_ivc_raw]

        # don't mask out positions from identical gene
        gene_ivc_raw_positions = gene_ivc_raw.get_position_set()
        nearby_genes = [
            X for X in nearby_genes
            if X.get_position_set() != gene_ivc_raw_positions
        ]
        for gene in nearby_genes:
            masked_positions.extend(gene.get_position_list())

        nearby_masks = mask_hash[gene_ivc_raw]
        for mask in nearby_masks:
            masked_positions.extend(mask.get_position_list())

        masked_positions = set(masked_positions)

        gene_positions_raw = gene_ivc_raw.get_position_set()
        mask_ivc_positions = gene_positions_raw & masked_positions
        total_mask_ivc = SegmentChain(*positions_to_segments(
            my_chrom, my_strand, mask_ivc_positions),
                                      ID=gene_id)
        gene_table["masked"].append(total_mask_ivc)
        gene_table["masked_bed"].append(total_mask_ivc.as_bed())

        gene_post_mask = gene_positions_raw - masked_positions
        gene_post_mask_ivc = SegmentChain(*positions_to_segments(
            my_chrom, my_strand, gene_post_mask),
                                          ID=gene_id)
        gene_table["exon"].append(gene_post_mask_ivc)
        gene_table["exon_bed"].append(gene_post_mask_ivc.as_bed())

        masked_positions = total_mask_ivc.get_position_set()
        tmp_positions = {
            "utr5": set(),
            "cds": set(),
            "utr3": set(),
        }
        txids = sorted(merged_gene_tx[gene_id])
        chrom = gene_post_mask_ivc.chrom
        strand = gene_post_mask_ivc.strand

        # pool transcript positions
        for txid in txids:
            transcript = transcripts[txid]

            utr5pos = transcript.get_utr5().get_position_set()
            cdspos = transcript.get_cds().get_position_set()
            utr3pos = transcript.get_utr3().get_position_set()

            tmp_positions["utr5"] |= utr5pos
            tmp_positions["cds"] |= cdspos
            tmp_positions["utr3"] |= utr3pos

        # eliminate positions in which CDS & UTRs overlap from each transcript
        for txid in txids:
            transcript = transcripts[txid]
            transcript_positions = {
                "utr5": transcript.get_utr5().get_position_set(),
                "cds": transcript.get_cds().get_position_set(),
                "utr3": transcript.get_utr3().get_position_set(),
            }

            for key1, key2 in keycombos:
                transcript_positions[key1] -= tmp_positions[key2]
                transcript_positions[key1] -= masked_positions

            transcript_table["region"].append(txid)

            # all unmasked positions
            my_chain = SegmentChain(*positions_to_segments(
                chrom, strand,
                transcript.get_position_set() - masked_positions),
                                    ID=txid)
            transcript_table["exon"].append(str(my_chain))
            transcript_table["exon_bed"].append(my_chain.as_bed())

            # all uniquely-labeled unmasked positions
            for k, v in transcript_positions.items():
                my_chain = SegmentChain(*positions_to_segments(
                    chrom, strand, v),
                                        ID=txid)
                transcript_table[k].append(str(my_chain))
                transcript_table["%s_bed" % k].append(my_chain.as_bed())

            total_mask_ivc.attr["ID"] = txid
            transcript_table["masked"].append(str(total_mask_ivc))
            transcript_table["masked_bed"].append(total_mask_ivc.as_bed())
            transcript_table["exon_unmasked"].append(str(transcript))
            transcript_table["transcript_ids"].append(txid)

        tmp_positions2 = copy.deepcopy(tmp_positions)
        for k1, k2 in keycombos:
            tmp_positions[k1] -= tmp_positions2[k2]
            tmp_positions[k1] -= masked_positions

        for k in (tmp_positions.keys()):
            my_chain = SegmentChain(*positions_to_segments(
                chrom, strand, tmp_positions[k]),
                                    ID=gene_id)
            gene_table[k].append(str(my_chain))
            gene_table["%s_bed" % k].append(my_chain.as_bed())

    printer.write("    %s genes total." % (n + 1))

    # cast SegmentChains/Transcripts to strings to keep numpy from unpacking them
    conversion_keys = [
        "exon", "utr5", "cds", "utr3", "masked", "exon_unmasked"
    ]
    for k in conversion_keys:
        gene_table[k] = [str(X) for X in gene_table[k]]
        transcript_table[k] = [str(X) for X in transcript_table[k]]

    gene_df = pd.DataFrame(gene_table)
    gene_df.sort_values(["region"], inplace=True)

    transcript_df = pd.DataFrame(transcript_table)
    transcript_df.sort_values(["region"], inplace=True)

    return gene_df, transcript_df, merged_genes
コード例 #12
0
ファイル: slidejuncs.py プロジェクト: zzygyx9119/plastid
def main(argv=sys.argv[1:]):
    """Command-line program
    
    Parameters
    ----------
    argv : list, optional
        A list of command-line arguments, which will be processed
        as if the script were called from the command line if
        :py:func:`main` is called directly.

        Default: `sys.argv[1:]`. The command-line arguments, if the script is
        invoked from the command line
    """
    sp = SequenceParser()
    mp = MaskParser()
    bp = BaseParser()
    
    parser = argparse.ArgumentParser(description=format_module_docstring(__doc__),
                                     formatter_class=argparse.RawDescriptionHelpFormatter,
                                     parents=[bp.get_parser(),sp.get_parser(),mp.get_parser()],
                                     )
    parser.add_argument("--maxslide",type=int,default=10,
                        help="Maximum number of nt to search 5\' and 3\' of intron"+
                             " boundaries (Default: 10)")
    parser.add_argument("--ref",type=str,metavar="ref.bed",default=None,
                        help="Reference file describing known splice junctions")
    parser.add_argument("--slide_canonical",action="store_true",default=False,
                        help="Slide junctions to canonical junctions if present within equal support region")
    parser.add_argument("infile",type=str,metavar="input.bed",
                        help="BED file describing discovered junctions")
    parser.add_argument("outbase",type=str,
                        help="Basename for output files")
    args = parser.parse_args(argv)
    bp.get_base_ops_from_args(args)
    
    printer.write("Opening genome from %s..." % args.sequence_file)
    genome = sp.get_seqdict_from_args(args)
    
    # load crossmap    
    cross_hash = mp.get_genome_hash_from_args(args)

    # load ref junctions
    if args.ref is not None:
        printer.write("Loading reference junctions from %s" % args.ref)
        known_hash = GenomeHash(list(BED_Reader(open(args.ref))),do_copy=False)
    else:
        known_hash = GenomeHash()

    # set up variables    
    canonicals_plus = [("GT","AG"),
                       ("GC","AG")
                      ]
    
    canonicals_minus = [("CT","AC"),
                        ("CT","GC")
                       ]
    
    known_in_range     = 0
    canonical_in_range = 0
    repetitive         = 0
    untouched          = 0
    c = 0
    
    seen_already = []

    outfiles = {
                 "repetitive" : "%s_repetitive.bed" % args.outbase,
                 "known"      : "%s_shifted_known.bed" % args.outbase,
                 "canonical"  : "%s_shifted_canonical.bed" % args.outbase,
                 "untouched"  : "%s_untouched.bed" % args.outbase,
                }
    outfiles = { K : argsopener(V,args,"w") for K,V in outfiles.items() }

    # process data
    printer.write("Opening junctions from %s..." % args.infile)
    for ivc in BED_Reader(CommentReader(opener(args.infile))):
        processed = False
        tup = None

        if c % 1000 == 0 and c > 0:
            printer.write("Processed: %s\tknown: %s\tshifted to canonical: %s\trepetitive: %s\tuntouched: %s" % \
                    (c, known_in_range, canonical_in_range, repetitive, untouched))
                   
        assert len(ivc) == 2
        strand = ivc.strand
        
        minus_range, plus_range = find_match_range(ivc,genome,args.maxslide)
        
        # see if either end of splice junction +- match_range lands in repetitive areas of genome
        if covered_by_repetitive(ivc,minus_range,plus_range,cross_hash):
            repetitive += 1
            outfiles["repetitive"].write(ivc.as_bed())
            processed = True

        # see if one or more known junctions in range
        if processed == False and args.ref is not None:
            # find_known_in_range(query_ivc,minus_range,plus_range,knownjunctions)
            known_juncs = find_known_in_range(ivc,minus_range,plus_range,known_hash.get_nearby_features(ivc))
            if len(known_juncs) > 0:
                known_in_range += 1
                for my_known in known_juncs:
                    tup = get_junction_tuple(my_known)
                    if tup not in seen_already:
                        outfiles["known"].write(my_known.as_bed())
                        seen_already.append(tup)
                    
                processed = True
            
        # see if one or more canonical junctions in range
        if processed == False and args.slide_canonical == True:
            canonicals = canonicals_plus if strand == "+" else canonicals_minus
            #find_canonicals_in_range(query_ivc,minus_range,plus_range,genome,canonicals)
            canonical_juncs = find_canonicals_in_range(ivc,minus_range,plus_range,genome,canonicals)
            if len(canonical_juncs) > 0:
                canonical_in_range += 1
                for can in canonical_juncs:
                    tup = get_junction_tuple(can)
                    if tup not in seen_already:
                        outfiles["canonical"].write(can.as_bed())
                        seen_already.append(tup)

                processed = True
                    
        if processed == False:
            outfiles["untouched"].write(ivc.as_bed())
            untouched += 1
            
        c += 1

    # save output
    printer.write("Totals: %s\tknown: %s\tshifted to canonical: %s\trepetitive: %s\tuntouched: %s" % \
            (c, known_in_range, canonical_in_range, repetitive, untouched))    

    for v in outfiles.values():
        v.close()
    
    printer.write("Done.")