예제 #1
0
def make_ngs_error_examples(ref_path, vcf_path, bam_path):
  """ Yields tf.Example for training a ML model.

  Each tf.Example contains
  relevant features aboout the ngs read.

  Args:
    ref_path: str. A path to an indexed fasta file.
    vcf_path: str. A path to an indexed VCF file.
    bam_path: str. A path to an SAM/BAM file.

  Yields:
    A tuple (example, ngs_read_length, has_error), where example is a
    tf.Example, ngs_read_length is the length of the read generated by the
    sequencer, and has_error is a boolean specifying whether the example
    contains a read error.
  """

  # Create a ref_reader backed by ref.
  ref_reader = fasta.IndexedFastaReader(ref_path)

  # Create a vcf_reader backed by vcf.
  vcf_reader = vcf.VcfReader(vcf_path)

  # Create a sam_reader backed by bam. Provide an empty ReadRequirements
  # proto to the reader so it enables standard filtering based on the default
  # values of ReadRequirements. Also explicitly allow the reader to access an
  # unindexed BAM, so only the iterate() function is enabled.
  read_requirements = reads_pb2.ReadRequirements()
  sam_reader = sam.SamReader(bam_path, read_requirements=read_requirements)

  # All our readers and writers are context managers, so use the `with`
  # construct to open all of the inputs/outputs and close them when we are done
  # looping over our reads.
  with ref_reader, vcf_reader, sam_reader:
    # Loop over the reads in our BAM file:
    for read in sam_reader.iterate():
      # Get the Range proto describing the chrom/start/stop spanned by our read.
      assert len(read.alignment.cigar) > 0
      first_cigar = read.alignment.cigar[0]
      # If the first cigar is a CLIP_SOFT, the start of sequence is the cigar
      # operation length before the alignment position.
      start = read.alignment.position.position
      if first_cigar.operation == cigar_pb2.CigarUnit.CLIP_SOFT:
        start -= first_cigar.operation_length
      read_range = ranges.make_range(read.alignment.position.reference_name,
                                     start, start + len(read.aligned_sequence))

      # Get all of the variants that overlap our read range.
      variants = list(vcf_reader.query(read_range))

      # Get the reference bases spanned by our read.
      ref_bases = ref_reader.query(read_range)

      # Check that we can use our read for generating an example.
      if is_usable_training_example(read, variants, ref_bases):
        # Convert read and ref_bases to a tf.Example with make_example.
        yield make_example(read, ref_bases), len(read.aligned_sequence), (
            read.aligned_sequence != ref_bases)
def make_ngs_examples(hparams):
    """Generator function that yields training, evaluation and test examples."""
    ref_reader = fasta.IndexedFastaReader(input_path=hparams.ref_path)
    vcf_reader = vcf.VcfReader(input_path=hparams.vcf_path)
    read_requirements = reads_pb2.ReadRequirements()
    sam_reader = sam.SamReader(input_path=hparams.bam_path,
                               read_requirements=read_requirements)

    # Use a separate SAM reader to query for reads falling in the pileup range.
    sam_query_reader = sam.SamReader(input_path=hparams.bam_path,
                                     read_requirements=read_requirements)
    used_pileup_ranges = set()
    with ref_reader, vcf_reader, sam_reader, sam_query_reader:
        for read in sam_reader:

            # Check that read has cigar string present and allowed alignment.
            if not read.alignment.cigar:
                print('Skipping read, no cigar alignment found')
                continue
            if not has_allowed_alignment(read):
                continue

            # Obtain window that will be used to construct an example.
            read_range = utils.read_range(read)
            ref = ref_reader.query(region=read_range)
            pileup_range = get_pileup_range(hparams, read, read_range, ref)

            # Do not construct multiple examples with the same pileup range.
            pileup_range_serialized = pileup_range.SerializeToString()
            if pileup_range_serialized in used_pileup_ranges:
                continue
            used_pileup_ranges.add(pileup_range_serialized)

            # Get reference sequence, reads, and truth variants for the pileup range.
            pileup_reads = list(sam_query_reader.query(region=pileup_range))
            pileup_ref = ref_reader.query(region=pileup_range)
            pileup_variants = list(vcf_reader.query(region=pileup_range))
            if is_usable_example(pileup_reads, pileup_variants, pileup_ref):
                yield make_example(hparams, pileup_reads, pileup_ref,
                                   pileup_range)
예제 #3
0
def make_ngs_error_examples(ref_path,
                            vcf_path,
                            bam_path,
                            examples_out_path,
                            max_reads=None):
    """Driver program for ngs_errors.

  See module description for details.

  Args:
    ref_path: str. A path to an indexed fasta file.
    vcf_path: str. A path to an indexed VCF file.
    bam_path: str. A path to an SAM/BAM file.
    examples_out_path: str. A path where we will write out examples.
    max_reads: int or None. If not None, we will emit at most max_reads examples
      to examples_out_path.
  """

    # Create a ref_reader backed by ref.
    ref_reader = fasta.IndexedFastaReader(ref_path)

    # Create a vcf_reader backed by vcf.
    vcf_reader = vcf.VcfReader(vcf_path)

    # Create a sam_reader backed by bam. Provide an empty ReadRequirements
    # proto to the reader so it enables standard filtering based on the default
    # values of ReadRequirements. Also explicitly allow the reader to access an
    # unindexed BAM, so only the iterate() function is enabled.
    read_requirements = reads_pb2.ReadRequirements()
    sam_reader = sam.SamReader(bam_path, read_requirements=read_requirements)

    # Create our TFRecordWriter where we'll send our tf.Examples.
    examples_out = genomics_writer.TFRecordWriter(examples_out_path)

    # All our readers and writers are context managers, so use the `with`
    # construct to open all of the inputs/outputs and close them when we are done
    # looping over our reads.
    n_examples = 0
    with ref_reader, vcf_reader, sam_reader, examples_out:
        # Loop over the reads in our BAM file:
        for i, read in enumerate(sam_reader.iterate(), start=1):
            # Get the Range proto describing the chrom/start/stop spanned by our read.
            read_range = utils.read_range(read)

            # Get all of the variants that overlap our read range.
            variants = list(vcf_reader.query(read_range))

            # Get the reference bases spanned by our read.
            ref_bases = ref_reader.query(read_range)

            # Check that we can use our read for generating an example.
            if is_usable_training_example(read, variants, ref_bases):
                n_examples += 1

                # Convert read and ref_bases to a tf.Example with make_example.
                example = make_example(read, ref_bases)

                # And write it out to our TFRecord output file.
                examples_out.write(example)

                # Do a bit of convenient logging. This is very verbose if we convert a
                # lot of reads...
                logging.info((
                    'Added an example for read %s (span=%s) with cigar %s [%d added '
                    'of %d total reads]'), read.fragment_name,
                             ranges.to_literal(read_range),
                             cigar.format_cigar_units(read.alignment.cigar),
                             n_examples, i)

                if max_reads is not None and n_examples >= max_reads:
                    return