def main():
    """" Set up the CLI """
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument("input_files", nargs="+",
                        default=[],
                        metavar="INFILE",
                        help="List of hit tables to process")
    parser.add_argument("-o", "--outfile", dest="outfile",
                        metavar="OUTFILE",
                        help="Write count table to OUTFILE")
    parser.add_argument("-r", "--rank", dest="ranks", default=None,
                        metavar="RANK", action="append",
                        help=""" Rank(s) to collect counts on. Use flag
                        multiple
                        times to specify multiple ranks. If multiple values
                        given, one table produced for each with rank name
                        appended to file name. Defaults to all major ranks
                        between phylum and species. Corresponds to rank names
                        in nodes.dmp. To see list run:
                        'cut -f5 nodes.dmp | uniq | sort | uniq'
                        in ncbi tax dir. Will also accept 'organism' to mean
                        no rank (ie, just the organism name).""")
    parser.add_argument(
        "-s",
        "--collapseToDomain",
        default=False,
        action="store_true",
        help="Collapse all taxa below given rank down to "
             "superkingdom/domain. EG: in the genus output, anything "
             "assigned to Cyanobactia, will be lumped in with all "
             "other bacteria")
    parser.add_argument(
            "--proportional",
            dest="proportional",
            default=False,
            action="store_true",
            help="""When using tophit or toporg, redistribute proportionally
            instead of winner take all""")
    parser.add_argument(
        "-R",
        "--printRank",
        dest="printRanks",
        action="append",
        help="Include indeicated rank(s) in lineage of printed taxa. "
             "Will be ignored if beyond the rank of the taxa "
             "(IE We can't include species if the taxon being counted "
             "is genus)")

    # option for deconvoluting clusters or assemblies
    add_weight_arguments(parser, multiple=True)

    # cutoff options
    add_count_arguments(parser)

    # format, tax dir, and more
    add_taxon_arguments(
        parser,
        choices={
            'countMethod': (
                'LCA',
                'all',
                'first',
                'most',
                'tophit',
                'toporg',
                'consensus')})

    # log level and help
    add_universal_arguments(parser)
    arguments = parser.parse_args()
    setup_logging(arguments)

    if arguments.proportional and \
            arguments.countMethod not in ['tophit', 'toporg']:
        parser.error("--proportinal only has meaning "
                     "if using tophit or toporg")

    if len(arguments.input_files) == 0:
        parser.error("Must supply at least one m8 file to parse")

    # Handle the case where Galaxy tries to set None as a string
    arguments.ranks = checkNoneOption(arguments.ranks)
    arguments.printRanks = checkNoneOption(arguments.printRanks)

    logging.info("Printing out ranks: %r", arguments.ranks)

    # Set defaults and check for some conflicts
    if arguments.ranks is None and arguments.taxdir is None:
        # using hit names only
        arguments.ranks = [ORG_RANK]
        if arguments.printRanks is not None:
            parser.error("Display ranks are not used without taxonomic info")
    else:
        if arguments.taxdir is None:
            parser.error("Cannot select ranks without a taxonomy")
        if arguments.ranks is None:
            # set a default
            arguments.ranks = [
                'phylum',
                'class',
                'order',
                'family',
                'genus',
                'species']

        try:
            # Make sure the rank lists make sense
            arguments.ranks = cleanRanks(arguments.ranks)
            if arguments.printRanks is not None:
                arguments.printRanks = cleanRanks(arguments.printRanks)
        except Exception as e:
            parser.error(str(e))

    # load weights file
    sequenceWeights = loadSequenceWeights(arguments.weights)

    # only print to stdout if there is a single rank
    if len(arguments.ranks) > 1 and arguments.outfile is None:
        parser.error("STDOUT only works if a single rank is chosen!")

    # Because rank is used in parsing hits, we can only do multiple ranks for
    # certain kinds of count methods
    if len(arguments.ranks) > 1:
        rank = None
        if arguments.countMethod in ['consensus', 'most']:
            parser.error(
                "Using multiple ranks does not work with the 'consensus' "
                "or 'most' counting methods. LCA should give the same "
                "results as consensus. If you really want to do this, "
                "use a bash loop:'for rank in phylum order genus; do "
                "COMMAND -r ${rank}; done'")
    else:
        rank = arguments.ranks[0]

    # load necessary maps
    (taxonomy, hitStringMap) = readMaps(arguments)

    # parse input files
    fileCounts = {}
    totals = {}
    fileLabels = {}
    sortedLabels = []

    # Allow for file names to be preceded with TAG=
    for filename in arguments.input_files:
        bits = filename.split("=", 1)
        if len(bits) > 1:
            (filetag, filename) = bits
        else:
            filetag = filename
        fileLabels[filename] = filetag
        # keep order so that column order matches arguments
        sortedLabels.append(filetag)
        fileCounts[filetag] = {}
        totals[filetag] = 0

    params = FilterParams.create_from_arguments(arguments)
    if arguments.countMethod == 'tophit' or arguments.countMethod == 'toporg':
        # Process all files at once and use overall abundance to pick best hits
        from edl import redistribute
        multifile = redistribute.multipleFileWrapper(fileLabels.keys())

        if arguments.countMethod == 'tophit':
            # don't give any taxonomy, just map to accessions for
            # redistribution
            readHits = redistribute.pickBestHitByAbundance(
                multifile,
                filterParams=params,
                returnLines=False,
                winnerTakeAll=not arguments.proportional,
                parseStyle=arguments.parseStyle,
                sequenceWeights=sequenceWeights)
            # define method to turn Hits into orgnaisms
            hitTranslator = getHitTranslator(parseStyle=arguments.parseStyle,
                                             taxonomy=taxonomy,
                                             hitStringMap=hitStringMap)

            translateHit = lambda hit: hitTranslator.translateHit(hit=hit)[0]

        else:
            # translate to organism before finding most abundant
            readHits = redistribute.pickBestHitByAbundance(
                multifile,
                filterParams=params,
                returnLines=False,
                returnTranslations=True,
                winnerTakeAll=not arguments.proportional,
                taxonomy=taxonomy,
                hitStringMap=hitStringMap,
                parseStyle=ACCS)

            # Organisms will be returned, make translator trivial:
            translateHit = passThrough

        # use read->file mapping and hit translator to get file based counts
        #  from returned (read,Hit) pairs
        increment = 1
        for (read_name, hit) in readHits:
            file_name, read_name = read_name.split("/", 1)
            file_tag = fileLabels[unquote_plus(file_name)]
            taxon = translateHit(hit)
            taxcount = fileCounts[file_tag].setdefault(taxon, 0)
            if sequenceWeights is not None:
                increment = sequenceWeights.get(read_name, 1)
            fileCounts[file_tag][taxon] = taxcount + increment
            totals[file_tag] += increment
        logging.debug(str(totals))

    else:
        # Original way, just process each file separately
        for (filename, filetag) in fileLabels.items():
            infile = open(filename, 'rU')

            hitIter = parseM8FileIter(infile,
                                      hitStringMap,
                                      params,
                                      arguments.parseStyle,
                                      arguments.countMethod,
                                      taxonomy=taxonomy,
                                      rank=rank)

            (total, counts, hitMap) = \
                countIterHits(hitIter,
                              allMethod=arguments.allMethod,
                              weights=sequenceWeights)
            fileCounts[filetag] = counts
            totals[filetag] = total

            logging.info(
                "parsed %d hits (%d unique) for %d reads from %s",
                total, len(counts), len(hitMap), filename)

            infile.close()

    printCountTablesByRank(fileCounts, totals, sortedLabels, arguments)
Exemple #2
0
def pickBestHitByAbundance(m8stream,
                           filterParams=None,
                           return_lines=True,
                           return_translations=False,
                           organismCounts=None,
                           winnerTakeAll=False,
                           sequenceWeights=None,
                           **kwargs):
    """
    Given a hit table with (potentially) multiple hits for each read.
    Select the best hit for each read. Hits are parsed from given hit
    table (m8stream) if given a FilterParams object, otherwise it is
    assumed that m8stream is an iterator over Hit objects. Remaining
    keyword arguments are used to translate hits to accessions, organisms,
    or anything else using a HitTranslator.

    Ambiguous hits (multiple 'best' hits to one read) are resolved as follows:
        given a set of reads that all hit the same list of translated hits:
            divvy up reads so that the abundance ratios change minimally

    Abundance is recorded for whatever the Hittranslator returns. If a hit
    map and taxonomy are given, this will be organisms, if only the parseStyle
    is given and it's set to ACC, then accessions will be the currency. The
    default is HITID.

    Yields (read,hit) tuples, (read, [translated hits]) tuples, or hit
    table lines.
    """
    if return_lines and return_translations:
        return_lines = False
        logger.warn("return_translations overrides return_lines!")

    # filtered hits
    if filterParams is None:
        hitIter = m8stream
    else:
        hitIter = filterM8Stream(m8stream, filterParams, return_lines=False)

    # custom function for pulling orgs from hits
    #  if no settings given, just use the hit ID as the 'organism'
    kwargs.setdefault("parseStyle", HITID)
    hitTranslator = getHitTranslator(**kwargs)

    # we need to keep track of lots of things
    orgCounts = {}
    totalReads = 0
    unambiguousReads = 0
    ambiguousReads = 0
    sameOrgCount = 0
    ambiguousHits = {}

    # Check to see if organism counts were given
    if organismCounts is not None:
        if isinstance(organismCounts, str):
            organismCounts = getOrganismCountsFromFile(organismCounts)

    # loop over hits and yield unambiguous ones
    # Save ambiguous hits and org abundances
    logger.debug(str(hitIter))
    for (read, hits) in hitIter:
        logger.debug("Read: %s" % (read))
        totalReads += 1
        hitByOrg = {}
        orgs = []
        count = 0
        for hit in hits:
            count += 1
            hitOrgs = hitTranslator.translateHit(hit)
            logger.debug("Hit: %s (%s), %s" % (hit.hit, hitOrgs, hit.score))
            orgs.extend(hitOrgs)
            for org in hitOrgs:
                if org in hitByOrg:
                    # This should be REALLY rare.
                    sameOrgCount += 1
                    sameOrgExample = (read, hit.hit, org)
                    logger.warn(
                        "Read (%s) has two best hits to same org (%s)!" %
                        (read, org))
                    # always keep the first alphabetically, for reproducibility
                    if hit.hit < hitByOrg[org].hit:
                        hitByOrg[org] = hit
                else:
                    hitByOrg[org] = hit
        orgs = tuple(sorted(set(orgs)))
        if count == 0:
            # This *should* never happen
            logger.error("No hits for %s!!!!!" % (read))
            raise Exception("Read (%s) has not hits. This shouldn't happen." %
                            (read))
        elif count == 1 or len(hitByOrg) == 1:
            logger.debug("Read is UNambiguous")
            unambiguousReads += 1
            for org in orgs:
                if sequenceWeights is not None:
                    increment = sequenceWeights.get(read, 1)
                else:
                    increment = 1
                orgCounts[org] = orgCounts.get(org, 0) + increment
            if return_lines:
                yield hit.line
            elif return_translations:
                yield (read, orgs)
            else:
                yield (read, hit)
        else:
            logger.debug("Read IS ambiguous")
            ambiguousReads += 1
            if organismCounts is None:
                # If we don't have count data to start, save these til the end
                ambiguousHits.setdefault(orgs, []).append(hitByOrg)
            else:
                # Use given counts to resolve
                for (hit, org) in assignHits(orgs, [
                        hitByOrg,
                ], organismCounts, winnerTakeAll):
                    yield formatReturn(hit, org, return_lines,
                                       return_translations)

    logger.info("Processed %d reads:" % (totalReads))
    logger.info("Collected unambiguous counts for %d orgs from %d reads" %
                (len(orgCounts), unambiguousReads))

    # if we used given organism counts, then we are done
    if organismCounts is not None:
        return

    # otherwise, we have ambiguous reads to resolve
    logger.info("Need to resolve %d ambiguous reads hitting %d orgs" %
                (ambiguousReads, len(ambiguousHits)))

    if sameOrgCount > 0:
        elements = list(sameOrgExample)
        elements.insert(0, sameOrgCount)
        logger.warn("found %d cases where a read had an extra hit to the same "
                    "organism. For Example: %s (%s,%s)" % tuple(elements))

    # loop over ambiguous hits (grouped by possible orgs) and pick one for
    # each read
    ambiguousReads = 0
    # for orgs, hits in ambiguousHits.items():
    for orgs in sorted(ambiguousHits.keys()):
        hits = ambiguousHits[orgs]
        for (hit, org) in assignHits(orgs, hits, orgCounts, winnerTakeAll):
            ambiguousReads += 1
            yield formatReturn(hit, org, return_lines, return_translations)

    logger.info("Selected top hit for %d ambiguous reads for a total of %d "
                "returned hit assignments" %
                (ambiguousReads, ambiguousReads + unambiguousReads))
def pickBestHitByAbundance(m8stream,
                           filterParams=None,
                           returnLines=True,
                           returnTranslations=False,
                           organismCounts=None,
                           winnerTakeAll=False,
                           sequenceWeights=None,
                           **kwargs):
    """
    Given a hit table with (potentially) multiple hits for each read.
    Select the best hit for each read. Hits are parsed from given hit
    table (m8stream) if given a FilterParams object, otherwise it is
    assumed that m8stream is an iterator over Hit objects. Remaining
    keyword arguments are used to translate hits to accessions, organisms,
    or anything else using a HitTranslator.

    Ambiguous hits (multiple 'best' hits to one read) are resolved as follows:
        given a set of reads that all hit the same list of translated hits:
            divvy up reads so that the abundance ratios change minimally

    Abundance is recorded for whatever the Hittranslator returns. If a hit
    map and taxonomy are given, this will be organisms, if only the parseStyle
    is given and it's set to ACC, then accessions will be the currency. The
    default is HITID.

    Yields (read,hit) tuples, (read, [translated hits]) tuples, or hit
    table lines.
    """
    if returnLines and returnTranslations:
        returnLines = False
        logger.warn("returnTranslations overrides returnLines!")

    # filtered hits
    if filterParams is None:
        hitIter = m8stream
    else:
        hitIter = blastm8.filterM8Stream(
            m8stream, filterParams, returnLines=False)

    # custom function for pulling orgs from hits
    #  if no settings given, just use the hit ID as the 'organism'
    kwargs.setdefault("parseStyle", HITID)
    hitTranslator = getHitTranslator(**kwargs)

    # we need to keep track of lots of things
    orgCounts = {}
    totalReads = 0
    unambiguousReads = 0
    ambiguousReads = 0
    sameOrgCount = 0
    ambiguousHits = {}

    # Check to see if organism counts were given
    if organismCounts is not None:
        if isinstance(organismCounts, str):
            organismCounts = getOrganismCountsFromFile(organismCounts)

    # loop over hits and yield unambiguous ones
    # Save ambiguous hits and org abundances
    logger.debug(str(hitIter))
    for (read, hits) in hitIter:
        logger.debug("Read: %s" % (read))
        totalReads += 1
        hitByOrg = {}
        orgs = []
        count = 0
        for hit in hits:
            count += 1
            hitOrgs = hitTranslator.translateHit(hit)
            logger.debug("Hit: %s (%s), %s" % (hit.hit, hitOrgs, hit.score))
            orgs.extend(hitOrgs)
            for org in hitOrgs:
                if org in hitByOrg:
                    # This should be REALLY rare.
                    sameOrgCount += 1
                    sameOrgExample = (read, hit.hit, org)
                    logger.warn(
                        "Read (%s) has two best hits to same org (%s)!" %
                        (read, org))
                    # always keep the first alphabetically, for reproducibility
                    if hit.hit < hitByOrg[org].hit:
                        hitByOrg[org] = hit
                else:
                    hitByOrg[org] = hit
        orgs = tuple(sorted(set(orgs)))
        if count == 0:
            # This *should* never happen
            logger.error("No hits for %s!!!!!" % (read))
            raise Exception(
                "Read (%s) has not hits. This shouldn't happen." %
                (read))
        elif count == 1 or len(hitByOrg) == 1:
            logger.debug("Read is UNambiguous")
            unambiguousReads += 1
            for org in orgs:
                if sequenceWeights is not None:
                    increment = sequenceWeights.get(read, 1)
                else:
                    increment = 1
                orgCounts[org] = orgCounts.get(org, 0) + increment
            if returnLines:
                yield hit.line
            elif returnTranslations:
                yield (read, orgs)
            else:
                yield (read, hit)
        else:
            logger.debug("Read IS ambiguous")
            ambiguousReads += 1
            if organismCounts is None:
                # If we don't have count data to start, save these til the end
                ambiguousHits.setdefault(orgs, []).append(hitByOrg)
            else:
                # Use given counts to resolve
                for (
                    hit, org) in assignHits(
                    orgs, [
                        hitByOrg, ], organismCounts, winnerTakeAll):
                    yield formatReturn(hit,
                                       org,
                                       returnLines,
                                       returnTranslations)

    logger.info("Processed %d reads:" % (totalReads))
    logger.info(
        "Collected unambiguous counts for %d orgs from %d reads" %
        (len(orgCounts), unambiguousReads))

    # if we used given organism counts, then we are done
    if organismCounts is not None:
        return

    # otherwise, we have ambiguous reads to resolve
    logger.info(
        "Need to resolve %d ambiguous reads hitting %d orgs" %
        (ambiguousReads, len(ambiguousHits)))

    if sameOrgCount > 0:
        elements = list(sameOrgExample)
        elements.insert(0, sameOrgCount)
        logger.warn(
            "found %d cases where a read had an extra hit to the same "
            "organism. For Example: %s (%s,%s)" %
            tuple(elements))

    # loop over ambiguous hits (grouped by possible orgs) and pick one for
    # each read
    ambiguousReads = 0
    # for orgs, hits in ambiguousHits.items():
    for orgs in sorted(ambiguousHits.keys()):
        hits = ambiguousHits[orgs]
        for (hit, org) in assignHits(orgs, hits, orgCounts, winnerTakeAll):
            ambiguousReads += 1
            yield formatReturn(hit, org, returnLines, returnTranslations)

    logger.info(
        "Selected top hit for %d ambiguous reads for a total of %d "
        "returned hit assignments" %
        (ambiguousReads, ambiguousReads + unambiguousReads))
def main():
    description = __doc__
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument("input_files", nargs="+",
                        default=[],
                        metavar="INFILE",
                        help="List of hit tables to process")
    parser.add_argument("-o", "--outfile", dest="output_file",
                        metavar="OUTFILE", help="Write count table to OUTFILE")
    parser.add_argument("-l", "--level", dest="levels", default=None,
                        metavar="LEVEL", action="append",
                        help=""" Level(s) to collect counts on. Use flag
                      multiple times to specify multiple levels. If multiple
                      values given, one table produced for each with rank
                      name appended to file name. Levels can be an integer
                      (1-3) for KEGG or SEED levels, any one of 'gene',
                      'role', 'family',
                      'ko', or 'ortholog' (which are all synonyms), or
                      anything not synonymous with 'gene' to
                      get CAZy groups. Defaults to ortholog/role and
                      levels 1, 2, and 3 for KEGG and SEED
                      and gene and group for CAZy and COG.""")

    # option for deconvoluting clusters or assemblies
    add_weight_arguments(parser, multiple=True)

    # cutoff options
    add_count_arguments(parser)

    # format, ortholog heirarchy, and more
    kegg.add_path_arguments(
        parser,
        defaults={'countMethod': 'tophit'},
        choices={'countMethod':
                 ('tophit',
                  'first',
                  'most',
                  'all',
                  'consensus')},
        helps={'countMethod':
               ("How to deal with counts from multiple hits. ('first': "
                "just use the first hit, 'most': "
                "can return multiple hits, 'all': return every hit, "
                "consensus: return None unless all the same). Do not "
                "use most or consensus with more than one level at a time. "
                "Default is 'tophit': This breaks any ties by choosing "
                "the most abundant hit based on other unambiguous "
                "assignments.")})

    # log level and help
    add_universal_arguments(parser)
    arguments = parser.parse_args()
    setup_logging(arguments)

    if len(arguments.input_files) == 0:
        parser.error("Must supply at least one m8 file to parse")

    # Set defaults and check for some conflicts
    if arguments.levels is None and arguments.heirarchyFile is None:
        # using hit names only
        arguments.levels = [None]
    else:
        if arguments.heirarchyFile is None \
                and arguments.heirarchyType != 'cazy':
            logging.warning("Type: %s", arguments.heirarchyType)
            parser.error("Cannot select levels without a heirarchy (ko) file")
        if arguments.levels is None:
            # set a default
            if arguments.heirarchyType is 'kegg':
                arguments.levels = ['ko', '1', '2', 'pathway']
            if arguments.heirarchyType is 'seed':
                arguments.levels = ['role', '1', '2', 'subsystem']
            else:
                arguments.levels = ['gene', 'group']

        try:
            # Make sure the rank lists make sense
            arguments.levels = cleanLevels(arguments.levels)
        except Exception as e:
            parser.error(str(e))

    # load weights file
    sequenceWeights = loadSequenceWeights(arguments.weights)

    # only print to stdout if there is a single level
    if len(arguments.levels) > 1 and arguments.output_file is None:
        parser.error("STDOUT only works if a single level is chosen!")

    cutoff = arguments.cutoff

    # map reads to hits
    if arguments.mapFile is not None:
        if arguments.mapStyle == 'auto':
            with open(arguments.mapFile) as f:
                firstLine = next(f)
                while len(firstLine) == 0 or firstLine[0] == '#':
                    firstLine = next(f)
            if koMapRE.search(firstLine):
                arguments.mapStyle = 'kegg'
            elif seedMapRE.search(firstLine):
                arguments.mapStyle = 'seed'
            elif tabMapRE.search(firstLine):
                arguments.mapStyle = 'tab'
            # elif cogMapRE.search(firstLine):
            #    arguments.mapStyle='cog'
            else:
                raise Exception(
                    "Cannot figure out map type from first line:\n%s" %
                    (firstLine))

        logging.info("Map file seems to be: %s", arguments.mapStyle)
        if arguments.mapStyle == 'kegg':
            valueMap = kegg.parseLinkFile(arguments.mapFile)
        elif arguments.mapStyle == 'seed':
            valueMap = kegg.parseSeedMap(arguments.mapFile)
        # elif arguments.mapStyle=='cog':
        #    valueMap=kegg.parseCogMap(arguments.mapFile)
        else:
            if arguments.parseStyle == GIS:
                keyType = int
            else:
                keyType = None
            valueMap = parseMapFile(
                arguments.mapFile,
                valueType=None,
                valueDelim=arguments.tab_map_delim,
                keyType=keyType)
        if len(valueMap) > 0:
            logging.info("Read %d items into map. EG: %s",
                         len(valueMap), next(iter(valueMap.items())))
        else:
            logging.warn("Read 0 items into value map!")
    else:
        valueMap = None

    # parse input files
    fileCounts = {}
    totals = {}
    fileLabels = {}
    sortedLabels = []

    # Allow for file names to be preceded with TAG=
    for filename in arguments.input_files:
        bits = filename.split("=", 1)
        if len(bits) > 1:
            (filetag, filename) = bits
        else:
            filetag = filename
        fileLabels[filename] = filetag
        # keep order so that column order matches arguments
        sortedLabels.append(filetag)
        fileCounts[filetag] = {}
        totals[filetag] = 0

    params = FilterParams.create_from_arguments(arguments)
    # TODO: incorporate weights into tophit algorithm!
    if arguments.countMethod == 'tophit':
        # Process all files at once and use overall abundance to pick best hits
        from edl import redistribute
        multifile = redistribute.multipleFileWrapper(fileLabels.items())

        # don't give any hit translation, just use hit ids for redistribution
        readHits = redistribute.pickBestHitByAbundance(
            multifile,
            filterParams=params,
            returnLines=False,
            winnerTakeAll=True,
            parseStyle=arguments.parseStyle,
            sequenceWeights=sequenceWeights)
        # define method to turn Hits into Genes (kos, families)
        hitTranslator = getHitTranslator(parseStyle=arguments.parseStyle,
                                         hitStringMap=valueMap)
        # translateHit = lambda hit: hitTranslator.translateHit(hit)[0]

        # use read->file mapping and hit translator to get file based counts
        #  from returned (read,Hit) pairs
        increment = 1
        for (read_name, hit) in readHits:
            file_tag, read_name = read_name.split("/", 1)
            file_tag = unquote_plus(file_tag)
            gene = hitTranslator.translateHit(hit)[0]
            if gene is None:
                gene = "None"
            logging.debug(
                "READ: %s\t%s\t%s\t%s",
                file_tag, read_name, hit.hit, gene)
            genecount = fileCounts[file_tag].setdefault(gene, 0)
            if sequenceWeights is not None:
                increment = sequenceWeights.get(read_name, 1)
            fileCounts[file_tag][gene] = genecount + increment
            totals[file_tag] += increment
        logging.debug(str(totals))

    else:
        # Original way, just process each file separately
        for (filename, filetag) in fileLabels.items():
            infile = open(filename, 'rU')

            hitIter = parseM8FileIter(infile,
                                      valueMap,
                                      params,
                                      arguments.parseStyle,
                                      arguments.countMethod,
                                      ignoreEmptyHits=arguments.mappedHitsOnly)

            (total, counts, hitMap) = \
                countIterHits(hitIter,
                              allMethod=arguments.allMethod,
                              weights=sequenceWeights)
            fileCounts[filetag] = counts
            totals[filetag] = total

            logging.info(
                "parsed %d hits (%d unique) for %d reads from %s",
                total, len(counts), len(hitMap), filename)

            infile.close()

    logging.debug(repr(fileCounts))
    printCountTablesByLevel(fileCounts, totals, sortedLabels, arguments)
Exemple #5
0
def main():
    description = __doc__
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument("input_files",
                        nargs="+",
                        default=[],
                        metavar="INFILE",
                        help="List of hit tables to process")
    parser.add_argument("-o",
                        "--outfile",
                        dest="output_file",
                        metavar="OUTFILE",
                        help="Write count table to OUTFILE")
    parser.add_argument("-l",
                        "--level",
                        dest="levels",
                        default=None,
                        metavar="LEVEL",
                        action="append",
                        help=""" Level(s) to collect counts on. Use flag
                      multiple times to specify multiple levels. If multiple
                      values given, one table produced for each with rank
                      name appended to file name. Levels can be an integer
                      (1-3) for KEGG or SEED levels, any one of 'gene',
                      'role', 'family',
                      'ko', or 'ortholog' (which are all synonyms), or
                      anything not synonymous with 'gene' to
                      get CAZy groups. Defaults to ortholog/role and
                      levels 1, 2, and 3 for KEGG and SEED
                      and gene and group for CAZy and COG.""")

    # option for deconvoluting clusters or assemblies
    add_weight_arguments(parser, multiple=True)

    # cutoff options
    add_count_arguments(parser)

    # format, ortholog heirarchy, and more
    kegg.add_path_arguments(
        parser,
        defaults={'countMethod': 'tophit'},
        choices={
            'countMethod': ('tophit', 'first', 'most', 'all', 'consensus')
        },
        helps={
            'countMethod':
            ("How to deal with counts from multiple hits. ('first': "
             "just use the first hit, 'most': "
             "can return multiple hits, 'all': return every hit, "
             "consensus: return None unless all the same). Do not "
             "use most or consensus with more than one level at a time. "
             "Default is 'tophit': This breaks any ties by choosing "
             "the most abundant hit based on other unambiguous "
             "assignments.")
        })

    # log level and help
    add_universal_arguments(parser)
    arguments = parser.parse_args()
    setup_logging(arguments)

    if len(arguments.input_files) == 0:
        parser.error("Must supply at least one m8 file to parse")

    # Set defaults and check for some conflicts
    if arguments.levels is None and arguments.heirarchyFile is None:
        # using hit names only
        arguments.levels = [None]
    else:
        if arguments.heirarchyFile is None \
                and arguments.heirarchyType != 'cazy':
            logging.warning("Type: %s", arguments.heirarchyType)
            parser.error("Cannot select levels without a heirarchy (ko) file")
        if arguments.levels is None:
            # set a default
            if arguments.heirarchyType is 'kegg':
                arguments.levels = ['ko', '1', '2', 'pathway']
            if arguments.heirarchyType is 'seed':
                arguments.levels = ['role', '1', '2', 'subsystem']
            else:
                arguments.levels = ['gene', 'group']

        try:
            # Make sure the rank lists make sense
            arguments.levels = cleanLevels(arguments.levels)
        except Exception as e:
            parser.error(str(e))

    # load weights file
    sequenceWeights = loadSequenceWeights(arguments.weights)

    # only print to stdout if there is a single level
    if len(arguments.levels) > 1 and arguments.output_file is None:
        parser.error("STDOUT only works if a single level is chosen!")

    cutoff = arguments.cutoff

    # map reads to hits
    if arguments.mapFile is not None:
        if arguments.mapStyle == 'auto':
            with open(arguments.mapFile) as f:
                firstLine = next(f)
                while len(firstLine) == 0 or firstLine[0] == '#':
                    firstLine = next(f)
            if koMapRE.search(firstLine):
                arguments.mapStyle = 'kegg'
            elif seedMapRE.search(firstLine):
                arguments.mapStyle = 'seed'
            elif tabMapRE.search(firstLine):
                arguments.mapStyle = 'tab'
            # elif cogMapRE.search(firstLine):
            #    arguments.mapStyle='cog'
            else:
                raise Exception(
                    "Cannot figure out map type from first line:\n%s" %
                    (firstLine))

        logging.info("Map file seems to be: %s", arguments.mapStyle)
        if arguments.mapStyle == 'kegg':
            valueMap = kegg.parseLinkFile(arguments.mapFile)
        elif arguments.mapStyle == 'seed':
            valueMap = kegg.parseSeedMap(arguments.mapFile)
        # elif arguments.mapStyle=='cog':
        #    valueMap=kegg.parseCogMap(arguments.mapFile)
        else:
            if arguments.parseStyle == GIS:
                keyType = int
            else:
                keyType = None
            valueMap = parseMapFile(arguments.mapFile,
                                    valueType=None,
                                    valueDelim=arguments.tab_map_delim,
                                    keyType=keyType)
        if len(valueMap) > 0:
            logging.info("Read %d items into map. EG: %s", len(valueMap),
                         next(iter(valueMap.items())))
        else:
            logging.warn("Read 0 items into value map!")
    else:
        valueMap = None

    # parse input files
    fileCounts = {}
    totals = {}
    fileLabels = {}
    sortedLabels = []

    # Allow for file names to be preceded with TAG=
    for filename in arguments.input_files:
        bits = filename.split("=", 1)
        if len(bits) > 1:
            (filetag, filename) = bits
        else:
            filetag = filename
        fileLabels[filename] = filetag
        # keep order so that column order matches arguments
        sortedLabels.append(filetag)
        fileCounts[filetag] = {}
        totals[filetag] = 0

    # TODO: incorporate weights into tophit algorithm!
    if arguments.countMethod == 'tophit':
        # Process all files at once and use overall abundance to pick best hits
        from edl import redistribute
        params = FilterParams.create_from_arguments(arguments)
        multifile = redistribute.multipleFileWrapper(fileLabels.items())

        # don't give any hit translation, just use hit ids for redistribution
        readHits = redistribute.pickBestHitByAbundance(
            multifile,
            filterParams=params,
            returnLines=False,
            winnerTakeAll=True,
            parseStyle=arguments.parseStyle,
            sequenceWeights=sequenceWeights)
        # define method to turn Hits into Genes (kos, families)
        hitTranslator = getHitTranslator(parseStyle=arguments.parseStyle,
                                         hitStringMap=valueMap)
        # translateHit = lambda hit: hitTranslator.translateHit(hit)[0]

        # use read->file mapping and hit translator to get file based counts
        #  from returned (read,Hit) pairs
        increment = 1
        for (read_name, hit) in readHits:
            file_tag, read_name = read_name.split("/", 1)
            file_tag = unquote_plus(file_tag)
            gene = hitTranslator.translateHit(hit)[0]
            if gene is None:
                gene = "None"
            logging.debug("READ: %s\t%s\t%s\t%s", file_tag, read_name, hit.hit,
                          gene)
            genecount = fileCounts[file_tag].setdefault(gene, 0)
            if sequenceWeights is not None:
                increment = sequenceWeights.get(read_name, 1)
            fileCounts[file_tag][gene] = genecount + increment
            totals[file_tag] += increment
        logging.debug(str(totals))

    else:
        # Original way, just process each file separately
        for (filename, filetag) in fileLabels.items():
            infile = open(filename, 'rU')

            hitIter = parseM8FileIter(infile,
                                      valueMap,
                                      arguments.hitTableFormat,
                                      arguments.filter_top_pct,
                                      arguments.parseStyle,
                                      arguments.countMethod,
                                      ignoreEmptyHits=arguments.mappedHitsOnly)

            (total, counts, hitMap) = \
                countIterHits(hitIter,
                              allMethod=arguments.allMethod,
                              weights=sequenceWeights)
            fileCounts[filetag] = counts
            totals[filetag] = total

            logging.info("parsed %d hits (%d unique) for %d reads from %s",
                         total, len(counts), len(hitMap), filename)

            infile.close()

    logging.debug(repr(fileCounts))
    printCountTablesByLevel(fileCounts, totals, sortedLabels, arguments)
def main():
    """" Set up the CLI """
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument("input_files",
                        nargs="+",
                        default=[],
                        metavar="INFILE",
                        help="List of hit tables to process")
    parser.add_argument("-o",
                        "--outfile",
                        dest="outfile",
                        metavar="OUTFILE",
                        help="Write count table to OUTFILE")
    parser.add_argument("-r",
                        "--rank",
                        dest="ranks",
                        default=None,
                        metavar="RANK",
                        action="append",
                        help=""" Rank(s) to collect counts on. Use flag
                        multiple
                        times to specify multiple ranks. If multiple values
                        given, one table produced for each with rank name
                        appended to file name. Defaults to all major ranks
                        between phylum and species. Corresponds to rank names
                        in nodes.dmp. To see list run:
                        'cut -f5 nodes.dmp | uniq | sort | uniq'
                        in ncbi tax dir. Will also accept 'organism' to mean
                        no rank (ie, just the organism name).""")
    parser.add_argument(
        "-s",
        "--collapseToDomain",
        default=False,
        action="store_true",
        help="Collapse all taxa below given rank down to "
        "superkingdom/domain. EG: in the genus output, anything "
        "assigned to Cyanobactia, will be lumped in with all "
        "other bacteria")
    parser.add_argument(
        "-R",
        "--printRank",
        dest="printRanks",
        action="append",
        help="Include indeicated rank(s) in lineage of printed taxa. "
        "Will be ignored if beyond the rank of the taxa "
        "(IE We can't include species if the taxon being counted "
        "is genus)")

    # option for deconvoluting clusters or assemblies
    add_weight_arguments(parser, multiple=True)

    # cutoff options
    add_count_arguments(parser)

    # format, tax dir, and more
    add_taxon_arguments(parser,
                        choices={
                            'countMethod': ('LCA', 'all', 'first', 'most',
                                            'tophit', 'toporg', 'consensus')
                        })

    # log level and help
    add_universal_arguments(parser)
    arguments = parser.parse_args()
    setup_logging(arguments)

    if len(arguments.input_files) == 0:
        parser.error("Must supply at least one m8 file to parse")

    # Handle the case where Galaxy tries to set None as a string
    arguments.ranks = checkNoneOption(arguments.ranks)
    arguments.printRanks = checkNoneOption(arguments.printRanks)

    logging.info("Printing out ranks: %r", arguments.ranks)

    # Set defaults and check for some conflicts
    if arguments.ranks is None and arguments.taxdir is None:
        # using hit names only
        arguments.ranks = [ORG_RANK]
        if arguments.printRanks is not None:
            parser.error("Display ranks are not used without taxonomic info")
    else:
        if arguments.taxdir is None:
            parser.error("Cannot select ranks without a taxonomy")
        if arguments.ranks is None:
            # set a default
            arguments.ranks = [
                'phylum', 'class', 'order', 'family', 'genus', 'species'
            ]

        try:
            # Make sure the rank lists make sense
            arguments.ranks = cleanRanks(arguments.ranks)
            if arguments.printRanks is not None:
                arguments.printRanks = cleanRanks(arguments.printRanks)
        except Exception as e:
            parser.error(str(e))

    # load weights file
    sequenceWeights = loadSequenceWeights(arguments.weights)

    # only print to stdout if there is a single rank
    if len(arguments.ranks) > 1 and arguments.outfile is None:
        parser.error("STDOUT only works if a single rank is chosen!")

    # Because rank is used in parsing hits, we can only do multiple ranks for
    # certain kinds of count methods
    if len(arguments.ranks) > 1:
        rank = None
        if arguments.countMethod in ['consensus', 'most']:
            parser.error(
                "Using multiple ranks does not work with the 'consensus' "
                "or 'most' counting methods. LCA should give the same "
                "results as consensus. If you really want to do this, "
                "use a bash loop:'for rank in phylum order genus; do "
                "COMMAND -r ${rank}; done'")
    else:
        rank = arguments.ranks[0]

    # load necessary maps
    (taxonomy, hitStringMap) = readMaps(arguments)

    # parse input files
    fileCounts = {}
    totals = {}
    fileLabels = {}
    sortedLabels = []

    # Allow for file names to be preceded with TAG=
    for filename in arguments.input_files:
        bits = filename.split("=", 1)
        if len(bits) > 1:
            (filetag, filename) = bits
        else:
            filetag = filename
        fileLabels[filename] = filetag
        # keep order so that column order matches arguments
        sortedLabels.append(filetag)
        fileCounts[filetag] = {}
        totals[filetag] = 0

    if arguments.countMethod == 'tophit' or arguments.countMethod == 'toporg':
        # Process all files at once and use overall abundance to pick best hits
        from edl import redistribute
        params = FilterParams.create_from_arguments(arguments)
        multifile = redistribute.multipleFileWrapper(fileLabels.keys())

        if arguments.countMethod == 'tophit':
            # don't give any taxonomy, just map to accessions for
            # redistribution
            readHits = redistribute.pickBestHitByAbundance(
                multifile,
                filterParams=params,
                returnLines=False,
                winnerTakeAll=True,
                parseStyle=arguments.parseStyle,
                sequenceWeights=sequenceWeights)
            # define method to turn Hits into orgnaisms
            hitTranslator = getHitTranslator(parseStyle=arguments.parseStyle,
                                             taxonomy=taxonomy,
                                             hitStringMap=hitStringMap)

            translateHit = lambda hit: hitTranslator.translateHit(hit=hit)[0]

        else:
            # translate to organism before finding most abundant
            readHits = redistribute.pickBestHitByAbundance(
                multifile,
                filterParams=params,
                returnLines=False,
                returnTranslations=True,
                winnerTakeAll=True,
                taxonomy=taxonomy,
                hitStringMap=hitStringMap,
                parseStyle=ACCS)

            # Organisms will be returned, make translator trivial:
            translateHit = passThrough

        # use read->file mapping and hit translator to get file based counts
        #  from returned (read,Hit) pairs
        increment = 1
        for (read_name, hit) in readHits:
            file_name, read_name = read_name.split("/", 1)
            file_tag = fileLabels[unquote_plus(file_name)]
            taxon = translateHit(hit)
            taxcount = fileCounts[file_tag].setdefault(taxon, 0)
            if sequenceWeights is not None:
                increment = sequenceWeights.get(read_name, 1)
            fileCounts[file_tag][taxon] = taxcount + increment
            totals[file_tag] += increment
        logging.debug(str(totals))

    else:
        # Original way, just process each file separately
        for (filename, filetag) in fileLabels.items():
            infile = open(filename, 'rU')

            hitIter = parseM8FileIter(infile,
                                      hitStringMap,
                                      arguments.hitTableFormat,
                                      arguments.filter_top_pct,
                                      arguments.parseStyle,
                                      arguments.countMethod,
                                      taxonomy=taxonomy,
                                      rank=rank)

            (total, counts, hitMap) = \
                countIterHits(hitIter,
                              allMethod=arguments.allMethod,
                              weights=sequenceWeights)
            fileCounts[filetag] = counts
            totals[filetag] = total

            logging.info("parsed %d hits (%d unique) for %d reads from %s",
                         total, len(counts), len(hitMap), filename)

            infile.close()

    printCountTablesByRank(fileCounts, totals, sortedLabels, arguments)