Exemplo n.º 1
0
    def setup_class(self):
        self.references = parse_references_from_fasta('tests/data/ref1.fasta')
        self.variant_collection = NTVariantCollection(self.references)

        self.variant_collection.variants['ref1']['3']['t'] = NTVariant(
            chrom='ref1',
            pos=3,
            ref='c',
            alt='t',
            qual=30,
            info={
                'DP': 400,
                'AC': 12,
                'AF': 0.03
            })
        self.variant_collection.variants['ref1']['10']['a'] = NTVariant(
            chrom='ref1',
            pos=10,
            ref='a',
            alt='t',
            qual=23,
            info={
                'DP': 200,
                'AC': 7,
                'AF': 0.035
            })
Exemplo n.º 2
0
def ntvar(bam, reference, error_rate, output):
    rs = parse_references_from_fasta(reference)

    mapped_read_collection_arr = []
    for r in rs:
        # create MappedReadCollection object
        mapped_read_collection_arr.append(parse_mapped_reads_from_bam(r, bam))

    mapped_read_collection_arr = []
    for r in rs:
        # create MappedReadCollection object
        mapped_read_collection_arr.append(parse_mapped_reads_from_bam(r, bam))

    variants = NTVariantCollection.from_mapped_read_collections(
        error_rate, rs, *mapped_read_collection_arr)

    variants.filter('q30', 'QUAL<30', True)
    variants.filter('ac5', 'AC<5', True)
    variants.filter('dp100', 'DP<100', True)

    if output:
        output.write(variants.to_vcf_file())
        output.close()
    else:
        click.echo(variants.to_vcf_file())
Exemplo n.º 3
0
def parse_nt_variants_from_vcf(variant_file, references):
    """Build variants object from a vcf file"""

    obj = NTVariantCollection(references)

    # Read in and parse the variants file
    # The file uses 1 as the first position but 0 is the first position in
    # mapped reads.
    with open(variant_file, "r") as input:
        for line in input:
            if line[0] != "#":
                chrom, pos, var_id, ref, alt, qual, var_filter, info = \
                    line.rstrip().split("\t")

                dp, ac, af = info.rstrip().split(';')

                dp = dp.rstrip().split('=')[1]
                ac = ac.rstrip().split('=')[1]
                af = af.rstrip().split('=')[1]

                variant_obj = NTVariant(chrom=chrom,
                                        id=var_id,
                                        pos=int(pos),
                                        ref=ref,
                                        alt=alt,
                                        qual=qual,
                                        filter=var_filter,
                                        info={
                                            'DP': dp,
                                            'AC': ac,
                                            'AF': af
                                        })

                obj.variants[chrom][int(pos)][alt] = variant_obj

    return obj
Exemplo n.º 4
0
    def test_from_aacensus(self):
        bam = TEST_PATH + "/data/align.bam"
        BED4_file = TEST_PATH + "/data/hxb2_pol.bed"
        mapped_read_collection_arr = []
        error_rate = 0.0038

        # Create a MappedReadCollection object
        for r in self.references:
            mapped_read_collection_arr.append(
                parse_mapped_reads_from_bam(r, bam))

            variants = NTVariantCollection.from_mapped_read_collections(
                error_rate, self.references, *mapped_read_collection_arr)
            variants.filter('q30', 'QUAL<30', True)
            variants.filter('ac5', 'AC<5', True)
            variants.filter('dp100', 'DP<100', True)

        # Mask the unconfident differences
        for mrc in mapped_read_collection_arr:
            mrc.mask_unconfident_differences(variants)

        # Parse the genes from the gene file
        genes = parse_BED4_file(BED4_file, self.references[0].name)

        # Determine which frames our genes are in
        frames = set()

        for gene in genes:
            frames.add(genes[gene]['frame'])

        aa_census = AACensus(self.reference, mapped_read_collection_arr, genes,
                             frames)

        test_variants = CodonVariantCollection.from_aacensus(aa_census)
        ref_seq = self.references[0].seq

        for gene in test_variants.variants:
            assert gene in genes
            for pos in test_variants.variants[gene]:
                for frame in frames:
                    nt_pos = pos / 3 - frame
                    assert nt_pos >= genes[gene]['start'] or nt_pos <= genes[
                        gene]['end']
                for codon in test_variants.variants[gene][pos]:
                    ref_codon = ref_seq[(pos):(pos) + 3].lower()
                    assert codon != ref_codon
Exemplo n.º 5
0
def codonvar(bam, reference, offset, bed4_file, variants, error_rate, output):
    rs = parse_references_from_fasta(reference)
    mapped_read_collection_arr = []

    # Create a MappedReadCollection object
    for r in rs:
        mapped_read_collection_arr.append(parse_mapped_reads_from_bam(r, bam))

    if variants:
        variants_obj = parse_nt_variants_from_vcf(variants, rs)
    else:
        variants = NTVariantCollection.from_mapped_read_collections(
            error_rate, rs, *mapped_read_collection_arr)
        variants.filter('q30', 'QUAL<30', True)
        variants.filter('ac5', 'AC<5', True)
        variants.filter('dp100', 'DP<100', True)
        variants_obj = variants

    # Mask the unconfident differences
    for mrc in mapped_read_collection_arr:
        mrc.mask_unconfident_differences(variants_obj)

    # Parse the genes from the gene file
    genes = parse_BED4_file(bed4_file, rs[0].name)

    # Determine which frames our genes are in
    frames = set()

    for gene in genes:
        frames.add(genes[gene]['frame'])

    aa_census = AACensus(reference, mapped_read_collection_arr, genes, frames)

    codon_variants = CodonVariantCollection.from_aacensus(aa_census)

    if output:
        output.write(codon_variants.to_csv_file(offset))
        output.close()
    else:
        click.echo(codon_variants.to_csv_file(offset))
Exemplo n.º 6
0
    def setup(self):
        bam = TEST_PATH + "/data/align.bam"
        reference = TEST_PATH + "/data/hxb2_pol.fas"
        genes_file = TEST_PATH + "/data/hxb2_pol.bed"
        error_rate = 0.0038

        rs = parse_references_from_fasta(reference)
        mapped_read_collection_arr = []

        # Create a MappedReadCollection object
        for r in rs:
            mapped_read_collection_arr.append(
                parse_mapped_reads_from_bam(r, bam))

        variants = NTVariantCollection.from_mapped_read_collections(
            error_rate, rs, *mapped_read_collection_arr)
        variants.filter('q30', 'QUAL<30', True)
        variants.filter('ac5', 'AC<5', True)
        variants.filter('dp100', 'DP<100', True)

        # Mask the unconfident differences
        for mrc in mapped_read_collection_arr:
            mrc.mask_unconfident_differences(variants)

        # Parse the genes from the gene file
        genes = parse_genes_file(genes_file, rs[0].name)

        # Determine which frames our genes are in
        frames = set()

        for gene in genes:
            frames.add(genes[gene]['frame'])

        aa_census = AACensus(reference, mapped_read_collection_arr, genes,
                             frames)

        self.codon_variants = CodonVariantCollection.from_aacensus(aa_census)
Exemplo n.º 7
0
def aavar(bam, reference, bed4_file, variants, mutation_db, min_freq,
          error_rate, output):
    rs = parse_references_from_fasta(reference)

    mapped_read_collection_arr = []
    for r in rs:
        # Create a MappedReadCollection object
        mapped_read_collection_arr.append(parse_mapped_reads_from_bam(r, bam))

    if variants:
        variants_obj = parse_nt_variants_from_vcf(variants, rs)
    else:
        variants = NTVariantCollection.from_mapped_read_collections(
            error_rate, rs, *mapped_read_collection_arr)
        variants.filter('q30', 'QUAL<30', True)
        variants.filter('ac5', 'AC<5', True)
        variants.filter('dp100', 'DP<100', True)
        variants_obj = variants

    # Mask the unconfident differences
    for mrc in mapped_read_collection_arr:
        mrc.mask_unconfident_differences(variants_obj)

    # Parse the genes from the gene file
    genes = parse_BED4_file(bed4_file, rs[0].name)

    # Determine which frames our genes are in
    frames = set()

    for gene in genes:
        frames.add(genes[gene]['frame'])

    # Create an AACensus object
    aa_census = AACensus(reference, mapped_read_collection_arr, genes, frames)

    # Create AAVar collection and print the aavf file
    aa_vars = AAVariantCollection.from_aacensus(aa_census)

    # Filter for mutant frequency
    aa_vars.filter('mf0.01', 'freq<0.01', True)

    # Build the mutation database and update collection
    if mutation_db is not None:
        mutation_db = MutationDB(mutation_db, genes)
        aa_vars.apply_mutation_db(mutation_db)

    aavf_obj = aa_vars.to_aavf_obj("aavar", os.path.basename(reference),
                                   CONFIDENT)
    records = list(aavf_obj)

    if output:
        writer = parser.Writer(output, aavf_obj)
    else:
        writer = parser.Writer(sys.stdout, aavf_obj)

    for record in records:
        writer.write_record(record)

    if output:
        output.close

    writer.close()
Exemplo n.º 8
0
    def analyze_reads(self, fasta_id, variant_filters, reporting_threshold,
                      generate_consensus):

        # Map reads against reference using bowtietwo
        if not self.quiet:
            print("# Mapping reads...")

        try:
            bam = self.generate_bam(fasta_id)
        except Exception as error:
            raise (error)

        if not self.quiet:
            print("# Loading read mappings...")

        # cmd_consensus
        if generate_consensus:
            cons_seq_file = open("%s/consensus.fasta" % self.output_dir, "w+")

        mapped_read_collection_arr = []
        for r in self.references:
            mrc = parse_mapped_reads_from_bam(r, bam)
            mapped_read_collection_arr.append(mrc)
            consensus_seq = mrc.to_consensus(self.consensus_pct)
            if generate_consensus and len(consensus_seq) > 0:
                cons_seq_file.write('>{0}_{1}_{2}\n{3}'.format(
                    fasta_id, reporting_threshold, r.name, consensus_seq))

        if generate_consensus:
            cons_seq_file.close()

        # cmd_callntvar
        if not self.quiet:
            print("# Identifying variants...")

        variants = NTVariantCollection.from_mapped_read_collections(
            variant_filters[ERROR_RATE], self.references,
            *mapped_read_collection_arr)

        variants.filter('q%s' % variant_filters[MIN_VARIANT_QUAL],
                        'QUAL<%s' % variant_filters[MIN_VARIANT_QUAL], True)
        variants.filter('ac%s' % variant_filters[MIN_AC],
                        'AC<%s' % variant_filters[MIN_AC], True)
        variants.filter('dp%s' % variant_filters[MIN_DP],
                        'DP<%s' % variant_filters[MIN_DP], True)

        vcf_file = open("%s/hydra.vcf" % self.output_dir, "w+")
        vcf_file.write(variants.to_vcf_file())
        vcf_file.close()

        # cmd_aa_census
        if not self.quiet:
            print("# Masking filtered variants...")

        for mrc in mapped_read_collection_arr:
            mrc.mask_unconfident_differences(variants)

        if not self.quiet:
            print("# Building amino acid census...")

        # Determine which frames our genes are in
        frames = set()

        for gene in self.genes:
            frames.add(self.genes[gene]['frame'])

        aa_census = AACensus(self.reference, mapped_read_collection_arr,
                             self.genes, frames)

        coverage_file = open("%s/coverage_file.csv" % self.output_dir, "w+")
        coverage_file.write(aa_census.coverage(frames))
        coverage_file.close()

        # cmd_aavariants
        if not self.quiet:
            print("# Finding amino acid mutations...")

        # Create AAVar collection and print the aavf file
        aa_vars = AAVariantCollection.from_aacensus(aa_census)

        # Filter for mutant frequency
        aa_vars.filter('mf%s' % variant_filters[MIN_FREQ],
                       'freq<%s' % variant_filters[MIN_FREQ], True)

        # Build the mutation database and update collection
        if self.mutation_db is not None:
            mutation_db = MutationDB(self.mutation_db, self.genes)
            aa_vars.apply_mutation_db(mutation_db)

        aavf_obj = aa_vars.to_aavf_obj("hydra",
                                       os.path.basename(self.reference),
                                       CONFIDENT)
        records = list(aavf_obj)

        mut_report = open("%s/mutation_report.aavf" % self.output_dir, "w+")

        writer = parser.Writer(mut_report, aavf_obj)

        for record in records:
            writer.write_record(record)

        mut_report.close()
        writer.close()

        # cmd_drmutations
        if not self.quiet:
            print("# Writing drug resistant mutation report...")

        dr_report = open("%s/dr_report.csv" % self.output_dir, "w+")
        dr_report.write(
            aa_vars.report_dr_mutations(mutation_db, reporting_threshold))
        dr_report.close()

        self.output_stats(mapped_read_collection_arr)
Exemplo n.º 9
0
    def analyze_reads(self, fasta_id, filters, reporting_threshold,
                      generate_consensus):
        # Map reads against reference using bowtietwo
        if not self.quiet:
            print("# Mapping reads...")

        bam = self.generate_bam(fasta_id)

        if not self.quiet:
            print("# Loading read mappings...")

        # cmd_consensus
        if generate_consensus:
            cons_seq_file = open("%s/consensus.fasta" % self.output_dir, "w+")

        mapped_read_collection_arr = []
        for r in self.references:
            mrc = parse_mapped_reads_from_bam(r, bam)
            mapped_read_collection_arr.append(mrc)
            if generate_consensus:
                cons_seq_file.write('>{0}_{1}_{2}\n{3}'.format(
                    fasta_id, reporting_threshold, r.name,
                    mrc.to_consensus(self.consensus_pct)))

        if generate_consensus:
            cons_seq_file.close()

        # cmd_callntvar
        if not self.quiet:
            print("# Identifying variants...")

        variants = NTVariantCollection.from_mapped_read_collections(
            filters["error_rate"], self.references,
            *mapped_read_collection_arr)

        variants.filter('q%s' % filters["min_qual"],
                        'QUAL<%s' % filters["min_qual"], True)
        variants.filter('ac%s' % filters["min_ac"],
                        'AC<%s' % filters["min_ac"], True)
        variants.filter('dp%s' % filters["min_dp"],
                        'DP<%s' % filters["min_dp"], True)

        vcf_file = open("%s/hydra.vcf" % self.output_dir, "w+")
        vcf_file.write(variants.to_vcf_file())
        vcf_file.close()

        # cmd_aa_census
        if not self.quiet:
            print("# Masking filtered variants...")

        for mrc in mapped_read_collection_arr:
            mrc.mask_unconfident_differences(variants)

        if not self.quiet:
            print("# Building amino acid census...")

        # Determine which frames our genes are in
        frames = set()

        for gene in self.genes:
            frames.add(self.genes[gene]['frame'])

        aa_census = AACensus(self.reference, mapped_read_collection_arr,
                             self.genes, frames)

        coverage_file = open("%s/coverage_file.csv" % self.output_dir, "w+")
        coverage_file.write(aa_census.coverage(frames))
        coverage_file.close()

        # cmd_aavariants
        if not self.quiet:
            print("# Finding amino acid mutations...")

        # Create AAVar collection and print the hmcf file
        aa_vars = AAVariantCollection.from_aacensus(aa_census)

        # Filter for mutant frequency
        aa_vars.filter('mf%s' % filters['min_freq'],
                       'freq<%s' % filters['min_freq'], True)

        # Build the mutation database and update collection
        if self.mutation_db is not None:
            mutation_db = MutationDB(self.mutation_db, self.genes)
            aa_vars.apply_mutation_db(mutation_db)

        mut_report = open("%s/mutation_report.hmcf" % self.output_dir, "w+")
        mut_report.write(aa_vars.to_hmcf_file(CONFIDENT))
        mut_report.close()

        # cmd_drmutations
        if not self.quiet:
            print("# Writing drug resistant mutation report...")

        dr_report = open("%s/dr_report.csv" % self.output_dir, "w+")
        dr_report.write(aa_vars.report_dr_mutations(mutation_db,
                                                    reporting_threshold))
        dr_report.close()

        self.output_stats(mapped_read_collection_arr)
Exemplo n.º 10
0
class TestNTVariantCollection:
    @classmethod
    def setup_class(self):
        self.references = parse_references_from_fasta('tests/data/ref1.fasta')
        self.variant_collection = NTVariantCollection(self.references)

        self.variant_collection.variants['ref1']['3']['t'] = NTVariant(
            chrom='ref1',
            pos=3,
            ref='c',
            alt='t',
            qual=30,
            info={
                'DP': 400,
                'AC': 12,
                'AF': 0.03
            })
        self.variant_collection.variants['ref1']['10']['a'] = NTVariant(
            chrom='ref1',
            pos=10,
            ref='a',
            alt='t',
            qual=23,
            info={
                'DP': 200,
                'AC': 7,
                'AF': 0.035
            })

    def test_from_mapped_read_collections(self):
        #TODO: add actual test for Variants.from_mapped_reads method
        assert True

    def test_to_vcf_file(self):
        vcf_file_string = self.variant_collection.to_vcf_file()

        assert "##fileformat=VCFv4.2" in vcf_file_string
        assert "##fileDate=" in vcf_file_string
        assert "##source=quasitools" in vcf_file_string
        assert "##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Total Depth\">" in vcf_file_string
        assert "##INFO=<ID=AC,Number=A,Type=Integer,Description=\"Allele Count\">" in vcf_file_string
        assert "##INFO=<ID=AF,Number=A,Type=Float,Description=\"Allele Frequency\">" in vcf_file_string
        assert "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO" in vcf_file_string
        assert "ref1\t3\t.\tc\tt\t30\t.\tDP=400;AC=12;AF=0.0300" in vcf_file_string
        assert "ref1\t10\t.\ta\tt\t23\t.\tDP=200;AC=7;AF=0.0350" in vcf_file_string

    def test_calculate_variant_qual(self):
        qual1 = self.variant_collection._NTVariantCollection__calculate_variant_qual(
            0.01, 12, 400)
        qual2 = self.variant_collection._NTVariantCollection__calculate_variant_qual(
            0.01, 7, 200)

        assert qual1 == 30
        assert qual2 == 23

    def test_filter(self):
        variant_collection_copy = copy.deepcopy(self.variant_collection)

        variant_collection_copy.filter('q30', 'QUAL<30', True)
        assert variant_collection_copy.variants['ref1']['3'][
            't'].filter == 'PASS'
        assert variant_collection_copy.variants['ref1']['10'][
            'a'].filter == 'q30'

        variant_collection_copy.filter('ac5', 'AC<5', True)
        assert variant_collection_copy.variants['ref1']['3'][
            't'].filter == 'PASS'
        assert variant_collection_copy.variants['ref1']['10'][
            'a'].filter == 'q30'

        variant_collection_copy.filter('dp100', 'DP<100', True)
        assert variant_collection_copy.variants['ref1']['3'][
            't'].filter == 'PASS'
        assert variant_collection_copy.variants['ref1']['10'][
            'a'].filter == 'q30'
Exemplo n.º 11
0
    def test_valid_vcf_file(self):
        """Tests to ensure that valid vcf files are parsed properly."""

        reference = TEST_PATH + \
            "/data/hxb2_pol.fas"
        bam = TEST_PATH + "/data/align.bam"

        rs = parse_references_from_fasta(reference)

        mapped_read_collection_arr = []
        for r in rs:
            # Create a MappedReadCollection object
            mapped_read_collection_arr.append(
                parse_mapped_reads_from_bam(r, bam))

        variants_obj = NTVariantCollection(rs)

        for i in range(0, 20):
            variant = NTVariant(chrom="hxb2_pol",
                                pos=i,
                                id=".",
                                ref='a',
                                alt='t',
                                qual="50",
                                filter="PASS",
                                info={
                                    "DP": "300",
                                    "AC": "1",
                                    "AF": "0.0025"
                                })

            variants_obj.variants["hxb2_pol"][i]['t'] = variant

        #Create a valid vcf file
        valid_vcf_file = TEST_PATH + "/data/valid_vcf_file.vcf"

        with open(valid_vcf_file, "w+") as f:
            f.write(
                "##fileformat=VCFv4.2\n"
                "##fileDate=20171005\n"
                "##source=quasitools\n"
                "##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Total Depth\">\n"
                "##INFO=<ID=AC,Number=A,Type=Integer,Description=\"Allele Count\">\n"
                "##INFO=<ID=AF,Number=A,Type=Float,Description=\"Allele Frequency\">\n"
                "##FILTER=<ID=q30,Description=\"Quality below 30\">\n"
                "##FILTER=<ID=dp100,Description=\"Read depth below 100\">\n"
                "##FILTER=<ID=ac5,Description=\"Allele count below 5\">\n"
                "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO")

            for rid in variants_obj.variants:
                for pos in variants_obj.variants[rid]:
                    for alt in variants_obj.variants[rid][pos]:
                        variant = variants_obj.variants[rid][pos][alt]
                        f.write("\n%s\t%i\t%s\t%s\t%s\t%s\t%s" %
                                (variant.chrom, int(
                                    variant.pos), variant.id, variant.ref,
                                 variant.alt, variant.qual, variant.filter))
                        f.write(
                            "\tDP=%i;AC=%i;AF=%0.4f" %
                            (int(variant.info["DP"]), int(variant.info["AC"]),
                             float(variant.info["AF"])))

        parsed_nt_var = parse_nt_variants_from_vcf(valid_vcf_file, rs)

        # Check equality of parsed NTVariantCollection vs. the valid NTVariantCollection
        for rid in parsed_nt_var.variants:
            for pos in parsed_nt_var.variants[rid]:
                for alt in parsed_nt_var.variants[rid][pos]:
                    parsed_variant = parsed_nt_var.variants[rid][pos][alt]
                    variant = variants_obj.variants[rid][pos][alt]

                    assert parsed_variant.chrom == variant.chrom
                    assert parsed_variant.pos == variant.pos
                    assert parsed_variant.id == variant.id
                    assert parsed_variant.ref == variant.ref
                    assert parsed_variant.alt == variant.alt
                    assert parsed_variant.qual == variant.qual
                    assert parsed_variant.filter == variant.filter
                    assert parsed_variant.info["DP"] == variant.info["DP"]
                    assert parsed_variant.info["AC"] == variant.info["AC"]
                    assert parsed_variant.info["AF"] == variant.info["AF"]

        os.remove(valid_vcf_file)