Esempio n. 1
0
def mark_dupes(bc, gene_id, reads, args, dupe_type, dupe_func, reporter,
               corrected_dupe_keys=None, out_bam=None):
    mark = dupe_type == cr_constants.CDNA_PCR_DUPE_TYPE
    assert not mark or (corrected_dupe_keys is not None and out_bam is not None)

    dupe_key_umi_counts = collections.defaultdict(dict)

    for read in reads:
        read.is_duplicate = False
        if cr_utils.is_read_dupe_candidate(read, cr_utils.get_high_conf_mapq(args.align)):
            dupe_key = dupe_func(read)
            umi_counts = dupe_key_umi_counts[dupe_key]
            umi = cr_utils.get_read_umi(read)

            if corrected_dupe_keys and dupe_key in corrected_dupe_keys and umi in corrected_dupe_keys[dupe_key]:
                corrected_umi = corrected_dupe_keys[dupe_key][umi]
                cr_utils.set_tag(read, cr_constants.PROCESSED_UMI_TAG, umi, corrected_umi)
                umi = corrected_umi

            if umi in umi_counts:
                read.is_duplicate = True
                umi_counts[umi] += 1
            else:
                umi_counts[umi] = 1

        if mark:
            reporter.mark_dupes_corrected_cb(read)
            out_bam.write(read)

        reporter.mark_dupes_bam_cb(read, dupe_type)

    for _, umis in dupe_key_umi_counts.iteritems():
        reporter.mark_dupes_group_cb(gene_id, umis, dupe_type)

    return dupe_key_umi_counts
Esempio n. 2
0
def main(args, outs):
    outs.coerce_strings()

    in_bam = tk_bam.create_bam_infile(args.input_bam)
    out_bam, _ = tk_bam.create_bam_outfile(outs.output,
                                           None,
                                           None,
                                           template=in_bam)
    cell_bcs = set(cr_utils.load_barcode_tsv(args.cell_barcodes))

    for (tid, pos), reads_iter in itertools.groupby(in_bam,
                                                    key=cr_utils.pos_sort_key):
        dupe_keys = set()
        for read in reads_iter:
            if cr_utils.get_read_barcode(read) not in cell_bcs:
                continue

            if cr_utils.is_read_dupe_candidate(
                    read, cr_utils.get_high_conf_mapq(args.align)):
                dupe_key = (cr_utils.si_pcr_dupe_func(read),
                            cr_utils.get_read_umi(read))
                if dupe_key in dupe_keys:
                    continue

                dupe_keys.add(dupe_key)
                out_bam.write(read)
Esempio n. 3
0
def main(args, outs):
    outs.coerce_strings()

    in_bam = tk_bam.create_bam_infile(args.possorted_bam)
    in_bam_chunk = tk_bam.read_bam_chunk(in_bam, (args.chunk_start, args.chunk_end))
    out_bam, _ = tk_bam.create_bam_outfile(outs.filtered_bam, None, None, template=in_bam)
    cluster_bcs = set(args.cluster_bcs)

    for (tid, pos), reads_iter in itertools.groupby(in_bam_chunk, key=cr_utils.pos_sort_key):
        dupe_keys = set()
        for read in reads_iter:
            if cr_utils.get_read_barcode(read) not in cluster_bcs:
                continue

            if cr_utils.is_read_dupe_candidate(read, cr_utils.get_high_conf_mapq({"high_conf_mapq":60})):
                dupe_key = (cr_utils.si_pcr_dupe_func(read), cr_utils.get_read_umi(read))
                if dupe_key in dupe_keys:
                    continue

                dupe_keys.add(dupe_key)
                read.is_duplicate = False
                out_bam.write(read)
Esempio n. 4
0
def main(args, outs):
    outs.coerce_strings()

    # Load whitelist
    whitelist = cr_utils.load_barcode_whitelist(args.barcode_whitelist)
    barcode_to_idx = OrderedDict((k, i) for i, k in enumerate(whitelist))

    # Load feature reference
    feature_ref = rna_feature_ref.from_transcriptome_and_csv(
        args.reference_path, args.feature_reference)

    # Load library info from BAM
    in_bam = tk_bam.create_bam_infile(args.chunk_input)
    library_info = rna_library.get_bam_library_info(in_bam)

    # Get cell-associated barcodes by genome
    filtered_bcs_by_genome = cr_utils.load_barcode_csv(args.filtered_barcodes)
    filtered_bc_union = cr_utils.get_cell_associated_barcode_set(
        args.filtered_barcodes)

    # Create the barcode info
    barcode_info = MoleculeCounter.build_barcode_info(filtered_bcs_by_genome,
                                                      library_info, whitelist)

    # Create the molecule info file
    mc = MoleculeCounter.open(outs.output,
                              mode='w',
                              feature_ref=feature_ref,
                              barcodes=whitelist,
                              library_info=library_info,
                              barcode_info=barcode_info)

    # Initialize per-library metrics
    lib_metrics = {}
    for lib_idx in xrange(len(library_info)):
        lib_metrics[str(lib_idx)] = {}
        lib_metrics[str(lib_idx)][cr_mol_counter.USABLE_READS_METRIC] = 0

    # Record read-counts per molecule. Note that UMIs are not contiguous
    # in the input because no sorting was done after UMI correction.

    prev_gem_group = None
    prev_barcode_idx = None

    for (gem_group, barcode_seq), reads_iter in \
        itertools.groupby(in_bam, key=cr_utils.barcode_sort_key_no_umi):
        if barcode_seq is None:
            continue

        barcode_idx = barcode_to_idx[barcode_seq]

        # Assert expected sort order of input BAM
        assert gem_group >= prev_gem_group
        if gem_group == prev_gem_group:
            assert barcode_idx >= prev_barcode_idx

        is_cell_barcode = cr_utils.format_barcode_seq(
            barcode_seq, gem_group) in filtered_bc_union

        counts = defaultdict(int)

        for read in reads_iter:
            # ignore read2 to avoid double-counting. the mapping + annotation should be equivalent.
            if read.is_secondary or \
               read.is_read2 or \
               cr_utils.is_read_low_support_umi(read) or \
               not cr_utils.is_read_conf_mapped_to_feature(read):
                continue

            umi_seq = cr_utils.get_read_umi(read)
            if umi_seq is None:
                continue

            umi_int = MoleculeCounter.compress_umi_seq(
                umi_seq,
                MoleculeCounter.get_column_dtype('umi').itemsize * 8)

            feature_ids = cr_utils.get_read_gene_ids(read)
            assert len(feature_ids) == 1
            feature_int = feature_ref.id_map[feature_ids[0]].index

            library_idx = cr_utils.get_read_library_index(read)

            counts[(umi_int, library_idx, feature_int)] += 1

            if is_cell_barcode:
                lib_metrics[str(library_idx)][
                    cr_mol_counter.USABLE_READS_METRIC] += 1

            prev_gem_group = gem_group
            prev_barcode_idx = barcode_idx

        # Record data for this barcode
        gg_int = MoleculeCounter.get_column_dtype('gem_group').type(gem_group)
        mc.append_column('gem_group', np.repeat(gg_int, len(counts)))
        bc_int = MoleculeCounter.get_column_dtype('barcode_idx').type(
            barcode_idx)
        mc.append_column('barcode_idx', np.repeat(bc_int, len(counts)))

        feature_ints = np.fromiter(
            (k[2] for k in counts.iterkeys()),
            dtype=MoleculeCounter.get_column_dtype('feature_idx'),
            count=len(counts))
        # Sort by feature for fast matrix construction
        order = np.argsort(feature_ints)
        feature_ints = feature_ints[order]
        mc.append_column('feature_idx', feature_ints)
        del feature_ints

        li_ints = np.fromiter(
            (k[1] for k in counts.iterkeys()),
            dtype=MoleculeCounter.get_column_dtype('library_idx'),
            count=len(counts))[order]
        mc.append_column('library_idx', li_ints)
        del li_ints

        umi_ints = np.fromiter((k[0] for k in counts.iterkeys()),
                               dtype=MoleculeCounter.get_column_dtype('umi'),
                               count=len(counts))[order]
        mc.append_column('umi', umi_ints)
        del umi_ints

        count_ints = np.fromiter(
            counts.itervalues(),
            dtype=MoleculeCounter.get_column_dtype('count'),
            count=len(counts))[order]
        mc.append_column('count', count_ints)
        del count_ints

    in_bam.close()

    mc.set_metric(cr_mol_counter.LIBRARIES_METRIC, dict(lib_metrics))

    mc.save()
Esempio n. 5
0
def main(args, outs):
    in_bam = tk_bam.create_bam_infile(args.reads)

    out_vcf = tk_io.VariantFileWriter(open(outs.filtered_variants, 'w'),
                                      template_file=open(args.chunk_variants))

    snps = load_snps(args.snps)
    bcs = cr_utils.load_barcode_tsv(args.cell_barcodes)

    raw_matrix_types = snp_constants.SNP_BASE_TYPES
    raw_matrix_snps = [snps for _ in snp_constants.SNP_BASE_TYPES]
    raw_allele_bc_matrices = cr_matrix.GeneBCMatrices(raw_matrix_types,
                                                      raw_matrix_snps, bcs)

    likelihood_matrix_types = snp_constants.ALLELES
    likelihood_matrix_snps = [snps for _ in snp_constants.ALLELES]
    likelihood_allele_bc_matrices = cr_matrix.GeneBCMatrices(
        likelihood_matrix_types, likelihood_matrix_snps, bcs, dtype=np.float64)

    # Configurable SNP filter parameters
    min_snp_call_qual = args.min_snp_call_qual if args.min_snp_call_qual is not None else snp_constants.DEFAULT_MIN_SNP_CALL_QUAL
    min_bcs_per_snp = args.min_bcs_per_snp if args.min_bcs_per_snp is not None else snp_constants.DEFAULT_MIN_BCS_PER_SNP
    min_snp_obs = args.min_snp_obs if args.min_snp_obs is not None else snp_constants.DEFAULT_MIN_SNP_OBS
    base_error_rate = args.base_error_rate if args.base_error_rate is not None else snp_constants.DEFAULT_BASE_ERROR_RATE
    min_snp_base_qual = args.min_snp_base_qual if args.min_snp_base_qual is not None else snp_constants.DEFAULT_MIN_SNP_BASE_QUAL

    for record in vcf_record_iter(args.chunk_variants, min_snp_call_qual):
        ref_base = str(record.REF)
        alt_base = str(record.ALT[0])

        pos = record.POS - 1
        snps = collections.defaultdict(lambda: np.zeros((2, 2)))
        for col in in_bam.pileup(record.CHROM, pos, pos + 1):
            if col.pos != pos:
                continue

            for read in col.pileups:
                bc = cr_utils.get_read_barcode(read.alignment)
                umi = cr_utils.get_read_umi(read.alignment)
                assert bc in set(bcs) and umi is not None

                # Overlaps an exon junction
                qpos = get_read_qpos(read)
                if qpos is None:
                    continue

                base = str(read.alignment.query[qpos - read.alignment.qstart])
                base_qual = ord(read.alignment.qual[
                    qpos -
                    read.alignment.qstart]) - tk_constants.ILLUMINA_QUAL_OFFSET

                if base == ref_base:
                    base_index = 0
                elif base == alt_base:
                    base_index = 1
                else:
                    continue

                dupe_key = (bc, umi)
                snps[dupe_key][base_index, 0] += 1
                snps[dupe_key][base_index,
                               1] = max(base_qual, snps[dupe_key][base_index,
                                                                  1])

        bcs_bases = collections.defaultdict(collections.Counter)
        for (bc, umi), bases in snps.iteritems():
            base_index = np.argmax(bases[:, 0])
            base = ref_base if base_index == 0 else alt_base
            base_qual = bases[base_index, 1]
            if base_qual < min_snp_base_qual:
                continue
            bcs_bases[bc][base] += 1

        # Filter if not enough unique barcodes
        if len(bcs_bases) < min_bcs_per_snp:
            continue

        # Filter if not enough observed bases
        snp_obs = 0
        for b in bcs_bases.itervalues():
            snp_obs += sum([count for count in b.itervalues()])
        if snp_obs < min_snp_obs:
            continue

        for bc, bases in bcs_bases.iteritems():
            ref_obs = bases[ref_base]
            alt_obs = bases[alt_base]
            total_obs = ref_obs + alt_obs
            obs = np.array([
                ref_obs,
                alt_obs,
            ])

            log_p_hom_ref = sp_stats.binom.logpmf(ref_obs, total_obs,
                                                  1 - base_error_rate)
            log_p_hom_alt = sp_stats.binom.logpmf(alt_obs, total_obs,
                                                  1 - base_error_rate)
            log_p_het = sp_stats.binom.logpmf(ref_obs, total_obs, 0.5)

            log_p = np.array([
                log_p_hom_ref,
                log_p_het,
                log_p_hom_alt,
            ])
            log_p -= sp_misc.logsumexp(log_p)

            matrix = raw_allele_bc_matrices.matrices.values()[0]
            snp_index = matrix.gene_id_to_int(format_record(record))
            bc_index = matrix.bc_to_int(bc)

            for i, base_type in enumerate(snp_constants.SNP_BASE_TYPES):
                raw_allele_bc_matrices.get_matrix(base_type).m[
                    snp_index, bc_index] = obs[i]

            for i, allele in enumerate(snp_constants.ALLELES):
                likelihood_allele_bc_matrices.get_matrix(allele).m[
                    snp_index, bc_index] = log_p[i]

        out_vcf.write_record(record)

    raw_allele_bc_matrices.save_h5(outs.raw_allele_bc_matrices_h5)
    likelihood_allele_bc_matrices.save_h5(
        outs.likelihood_allele_bc_matrices_h5)
Esempio n. 6
0
def main(args, outs):
    outs.coerce_strings()

    in_bam = tk_bam.create_bam_infile(args.chunk_input)

    counter = cr_mol_counter.MoleculeCounter.open(outs.output, mode='w')

    mol_data_keys = cr_mol_counter.MoleculeCounter.get_data_columns()
    mol_data_columns = {key: idx for idx, key in enumerate(mol_data_keys)}

    gene_index = cr_reference.GeneIndex.load_pickle(
        cr_utils.get_reference_genes_index(args.reference_path))
    genomes = cr_utils.get_reference_genomes(args.reference_path)
    genome_index = cr_reference.get_genome_index(genomes)
    none_gene_id = len(gene_index.get_genes())

    # store reference index columns
    # NOTE - these must be cast to str first, as unicode is not supported
    counter.set_ref_column('genome_ids', [str(genome) for genome in genomes])
    counter.set_ref_column('gene_ids',
                           [str(gene.id) for gene in gene_index.genes])
    counter.set_ref_column('gene_names',
                           [str(gene.name) for gene in gene_index.genes])

    filtered_bcs_per_genome = cr_utils.load_barcode_csv(args.filtered_barcodes)
    filtered_bcs = set()
    for _, bcs in filtered_bcs_per_genome.iteritems():
        filtered_bcs |= set(bcs)

    gg_metrics = collections.defaultdict(
        lambda: {cr_mol_counter.GG_CONF_MAPPED_FILTERED_BC_READS_METRIC: 0})

    for (gem_group, barcode, gene_ids), reads_iter in itertools.groupby(
            in_bam, key=cr_utils.barcode_sort_key):
        if barcode is None or gem_group is None:
            continue
        is_cell_barcode = cr_utils.format_barcode_seq(
            barcode, gem_group) in filtered_bcs
        molecules = collections.defaultdict(
            lambda: np.zeros(len(mol_data_columns), dtype=np.uint64))

        compressed_barcode = cr_mol_counter.MoleculeCounter.compress_barcode_seq(
            barcode)
        gem_group = cr_mol_counter.MoleculeCounter.compress_gem_group(
            gem_group)

        read_positions = collections.defaultdict(set)
        for read in reads_iter:
            umi = cr_utils.get_read_umi(read)
            # ignore read2 to avoid double-counting. the mapping + annotation should be equivalent.
            if read.is_secondary or umi is None or read.is_read2:
                continue

            raw_umi = cr_utils.get_read_raw_umi(read)
            raw_bc, raw_gg = cr_utils.split_barcode_seq(
                cr_utils.get_read_raw_barcode(read))
            proc_bc, proc_gg = cr_utils.split_barcode_seq(
                cr_utils.get_read_barcode(read))

            if cr_utils.is_read_conf_mapped_to_transcriptome(
                    read, cr_utils.get_high_conf_mapq(args.align)):
                assert len(gene_ids) == 1

                mol_key, map_type = (umi, gene_index.gene_id_to_int(
                    gene_ids[0])), 'reads'

                read_pos = (read.tid, read.pos)
                uniq_read_pos = read_pos not in read_positions[mol_key]
                read_positions[mol_key].add(read_pos)

                if is_cell_barcode:
                    gg_metrics[int(gem_group)][
                        cr_mol_counter.
                        GG_CONF_MAPPED_FILTERED_BC_READS_METRIC] += 1

            elif read.is_unmapped:
                mol_key, map_type, uniq_read_pos = (
                    umi, none_gene_id), 'unmapped_reads', False
            else:
                mol_key, map_type, uniq_read_pos = (
                    umi, none_gene_id), 'nonconf_mapped_reads', False
            molecules[mol_key][mol_data_columns[map_type]] += 1
            molecules[mol_key][mol_data_columns['umi_corrected_reads']] += int(
                not raw_umi == umi)
            molecules[mol_key][mol_data_columns[
                'barcode_corrected_reads']] += int(not raw_bc == proc_bc)
            molecules[mol_key][mol_data_columns[
                'conf_mapped_uniq_read_pos']] += int(uniq_read_pos)

        for mol_key, molecule in sorted(molecules.items()):
            umi, gene_id = mol_key
            genome = cr_utils.get_genome_from_str(
                gene_index.int_to_gene_id(gene_id), genomes)
            genome_id = cr_reference.get_genome_id(genome, genome_index)
            counter.add(
                barcode=compressed_barcode,
                gem_group=gem_group,
                umi=cr_mol_counter.MoleculeCounter.compress_umi_seq(umi),
                gene=gene_id,
                genome=genome_id,
                **{
                    key: molecule[col_idx]
                    for key, col_idx in mol_data_columns.iteritems()
                })

    in_bam.close()

    counter.set_metric(cr_mol_counter.GEM_GROUPS_METRIC, dict(gg_metrics))

    counter.save()