def log_time_and_memory_usage(timers, show_phase_vcfs): total_time = timers.total() logger.info("\n== SUMMARY ==") log_memory_usage() # fmt: off logger.info("Time spent reading BAM/CRAM: %6.1f s", timers.elapsed("read_bam")) logger.info("Time spent parsing VCF: %6.1f s", timers.elapsed("parse_vcf")) if show_phase_vcfs: logger.info("Time spent parsing input phasings from VCFs: %6.1f s", timers.elapsed("parse_phasing_vcfs")) logger.info("Time spent selecting reads: %6.1f s", timers.elapsed("select")) logger.info("Time spent phasing: %6.1f s", timers.elapsed("phase")) logger.info("Time spent writing VCF: %6.1f s", timers.elapsed("write_vcf")) logger.info("Time spent finding components: %6.1f s", timers.elapsed("components")) logger.info("Time spent on rest: %6.1f s", total_time - timers.sum()) logger.info("Total elapsed time: %6.1f s", total_time)
def run_polyphase( phase_input_files, variant_file, ploidy, reference=None, output=sys.stdout, samples=None, chromosomes=None, verify_genotypes=False, ignore_read_groups=False, indels=True, mapping_quality=20, tag="PS", include_haploid_sets=False, write_command_line_header=True, read_list_filename=None, ce_bundle_edges=False, min_overlap=2, plot_clusters=False, plot_threading=False, ce_refinements=5, block_cut_sensitivity=4, ): """ Run Polyploid Phasing. phase_input_files -- list of paths to BAM/CRAM/VCF files variant-file -- path to input VCF reference -- path to reference FASTA output -- path to output VCF or a file like object samples -- names of samples to phase. An empty list means: phase all samples chromosomes -- names of chromosomes to phase. An empty list means: phase all chromosomes ignore_read_groups mapping_quality -- discard reads below this mapping quality tag -- How to store phasing info in the VCF, can be 'PS' or 'HP' write_command_line_header -- whether to add a ##commandline header to the output VCF """ timers = StageTimer() logger.info( "This is WhatsHap (polyploid) %s running under Python %s", __version__, platform.python_version(), ) numeric_sample_ids = NumericSampleIds() with ExitStack() as stack: assert phase_input_files phased_input_reader = stack.enter_context( PhasedInputReader( phase_input_files, reference, numeric_sample_ids, ignore_read_groups, indels=indels, mapq_threshold=mapping_quality, )) assert not phased_input_reader.has_vcfs if write_command_line_header: command_line = "(whatshap {}) {}".format(__version__, " ".join(sys.argv[1:])) else: command_line = None try: vcf_writer = stack.enter_context( PhasedVcfWriter( command_line=command_line, in_path=variant_file, out_file=output, tag=tag, ploidy=ploidy, include_haploid_sets=include_haploid_sets, )) except OSError as e: raise CommandLineError(e) vcf_reader = stack.enter_context( VcfReader( variant_file, indels=indels, phases=True, genotype_likelihoods=False, ploidy=ploidy, )) if ignore_read_groups and not samples and len(vcf_reader.samples) > 1: raise CommandLineError( "When using --ignore-read-groups on a VCF with " "multiple samples, --sample must also be used.") if not samples: samples = vcf_reader.samples vcf_sample_set = set(vcf_reader.samples) for sample in samples: if sample not in vcf_sample_set: raise CommandLineError( "Sample {!r} requested on command-line not found in VCF". format(sample)) if block_cut_sensitivity < 0: logger.warning( "Block cut sensitivity was set to negative value. Lowest value (0) is assumed instead." ) block_cut_sensitivity = 0 elif block_cut_sensitivity > 5: logger.warning( "Block cut sensitivity level too large. Assuming highest valid value (5) instead." ) block_cut_sensitivity = 5 samples = frozenset(samples) read_list_file = None if read_list_filename: raise NotImplementedError("create_read_list_file not implemented") # read_list_file = create_read_list_file(read_list_filename) # Store phasing parameters in tuple to keep function signatures cleaner phasing_param = PhasingParameter( ploidy=ploidy, verify_genotypes=verify_genotypes, ce_bundle_edges=ce_bundle_edges, min_overlap=min_overlap, ce_refinements=ce_refinements, block_cut_sensitivity=block_cut_sensitivity, plot_clusters=plot_clusters, plot_threading=plot_threading, ) timers.start("parse_vcf") try: for variant_table in vcf_reader: chromosome = variant_table.chromosome timers.stop("parse_vcf") if (not chromosomes) or (chromosome in chromosomes): logger.info("======== Working on chromosome %r", chromosome) else: logger.info( "Leaving chromosome %r unchanged (present in VCF but not requested by option --chromosome)", chromosome, ) with timers("write_vcf"): superreads, components = dict(), dict() vcf_writer.write(chromosome, superreads, components) continue # These two variables hold the phasing results for all samples superreads, components, haploid_components = dict(), dict( ), dict() # Iterate over all samples to process for sample in samples: logger.info("---- Processing individual %s", sample) # Process inputs for this sample missing_genotypes = set() heterozygous = set() genotypes = variant_table.genotypes_of(sample) for index, gt in enumerate(genotypes): if gt.is_none(): missing_genotypes.add(index) elif not gt.is_homozygous(): heterozygous.add(index) else: assert gt.is_homozygous() to_discard = set(range( len(variant_table))).difference(heterozygous) phasable_variant_table = deepcopy(variant_table) # Remove calls to be discarded from variant table phasable_variant_table.remove_rows_by_index(to_discard) logger.info( "Number of variants skipped due to missing genotypes: %d", len(missing_genotypes), ) logger.info( "Number of remaining heterozygous variants: %d", len(phasable_variant_table)) # Get the reads belonging to this sample timers.start("read_bam") readset, vcf_source_ids = phased_input_reader.read( chromosome, phasable_variant_table.variants, sample) readset.sort() timers.stop("read_bam") # Verify genotypes if verify_genotypes: timers.start("verify_genotypes") logger.info("Verify genotyping of %s", sample) positions = [ v.position for v in phasable_variant_table.variants ] computed_genotypes = [ Genotype(gt) for gt in compute_polyploid_genotypes( readset, ploidy, positions) ] # skip all positions at which genotypes do not match given_genotypes = phasable_variant_table.genotypes_of( sample) matching_genotypes = [] missing_genotypes = set() print(computed_genotypes, len(computed_genotypes)) print(given_genotypes, len(given_genotypes)) print(len(positions)) for i, g in enumerate(given_genotypes): c_g = computed_genotypes[i] if (g == c_g) or (c_g is None): matching_genotypes.append(g) else: matching_genotypes.append(Genotype([])) missing_genotypes.add(i) phasable_variant_table.set_genotypes_of( sample, matching_genotypes) # Remove variants with deleted genotype phasable_variant_table.remove_rows_by_index( missing_genotypes) logger.info( "Number of variants removed due to inconsistent genotypes: %d", len(missing_genotypes), ) logger.info( "Number of remaining heterozygous variants: %d", len(phasable_variant_table), ) # Re-read the readset to remove discarded variants readset, vcf_source_ids = phased_input_reader.read( chromosome, phasable_variant_table.variants, sample) readset.sort() timers.stop("verify_genotypes") # Remove reads with insufficient variants readset = readset.subset([ i for i, read in enumerate(readset) if len(read) >= max(2, min_overlap) ]) logger.info( "Kept %d reads that cover at least two variants each", len(readset)) # Adapt the variant table to the subset of reads phasable_variant_table.subset_rows_by_position( readset.get_positions()) # Run the actual phasing ( sample_components, sample_haploid_components, sample_superreads, ) = phase_single_individual(readset, phasable_variant_table, sample, phasing_param, output, timers) # Collect results components[sample] = sample_components haploid_components[sample] = sample_haploid_components superreads[sample] = sample_superreads with timers("write_vcf"): logger.info("======== Writing VCF") vcf_writer.write( chromosome, superreads, components, haploid_components if include_haploid_sets else None, ) # TODO: Use genotype information to polish results # assert len(changed_genotypes) == 0 logger.info("Done writing VCF") logger.debug("Chromosome %r finished", chromosome) timers.start("parse_vcf") timers.stop("parse_vcf") except PloidyError as e: raise CommandLineError(e) if read_list_file: read_list_file.close() logger.info("\n== SUMMARY ==") log_memory_usage() logger.info("Time spent reading BAM/CRAM: %6.1f s", timers.elapsed("read_bam")) logger.info("Time spent parsing VCF: %6.1f s", timers.elapsed("parse_vcf")) if verify_genotypes: logger.info( "Time spent verifying genotypes: %6.1f s", timers.elapsed("verify_genotypes"), ) logger.info("Time spent detecting blocks: %6.1f s", timers.elapsed("detecting_blocks")) logger.info("Time spent scoring reads: %6.1f s", timers.elapsed("read_scoring")) logger.info( "Time spent solving cluster editing: %6.1f s", timers.elapsed("solve_clusterediting"), ) logger.info("Time spent threading haplotypes: %6.1f s", timers.elapsed("threading")) if plot_clusters or plot_threading: logger.info("Time spent creating plots: %6.1f s", timers.elapsed("create_plots")) logger.info("Time spent writing VCF: %6.1f s", timers.elapsed("write_vcf")) logger.info("Time spent on rest: %6.1f s", timers.total() - timers.sum()) logger.info("Total elapsed time: %6.1f s", timers.total())
def run_genotype( phase_input_files, variant_file, reference=None, output=sys.stdout, samples=None, chromosomes=None, ignore_read_groups=False, indels=True, mapping_quality=20, max_coverage=15, nopriors=False, ped=None, recombrate=1.26, genmap=None, gt_qual_threshold=0, prioroutput=None, constant=0.0, overhang=10, affine_gap=False, gap_start=10, gap_extend=7, mismatch=15, write_command_line_header=True, use_ped_samples=False, ): """ For now: this function only runs the genotyping algorithm. Genotype likelihoods for all variants are computed using the forward backward algorithm """ timers = StageTimer() logger.info( "This is WhatsHap (genotyping) %s running under Python %s", __version__, platform.python_version(), ) if write_command_line_header: command_line = "(whatshap {}) {}".format(__version__, " ".join(sys.argv[1:])) else: command_line = None with ExitStack() as stack: # read the given input files (BAMs, VCFs, ref...) numeric_sample_ids = NumericSampleIds() phased_input_reader = stack.enter_context( PhasedInputReader( phase_input_files, reference, numeric_sample_ids, ignore_read_groups, indels=indels, mapq_threshold=mapping_quality, overhang=overhang, affine=affine_gap, gap_start=gap_start, gap_extend=gap_extend, default_mismatch=mismatch, )) show_phase_vcfs = phased_input_reader.has_vcfs # vcf writer for final genotype likelihoods vcf_writer = stack.enter_context( GenotypeVcfWriter(command_line=command_line, in_path=variant_file, out_file=output)) # vcf writer for only the prior likelihoods (if output is desired) prior_vcf_writer = None if prioroutput is not None: prior_vcf_writer = stack.enter_context( GenotypeVcfWriter( command_line=command_line, in_path=variant_file, out_file=stack.enter_context(open(prioroutput, "w")), )) # parse vcf with input variants # remove all likelihoods that may already be present vcf_reader = stack.enter_context( VcfReader( variant_file, indels=indels, genotype_likelihoods=False, ignore_genotypes=True, )) if ignore_read_groups and not samples and len(vcf_reader.samples) > 1: raise CommandLineError( "When using --ignore-read-groups on a VCF with " "multiple samples, --sample must also be used.") if not samples: samples = vcf_reader.samples # if --use-ped-samples is set, use only samples from PED file if ped and use_ped_samples: samples = set() for trio in PedReader(ped): if trio.child is None or trio.mother is None or trio.father is None: continue samples.add(trio.mother) samples.add(trio.father) samples.add(trio.child) vcf_sample_set = set(vcf_reader.samples) for sample in samples: if sample not in vcf_sample_set: raise CommandLineError( "Sample {!r} requested on command-line not found in VCF". format(sample)) if ped and genmap: logger.info( "Using region-specific recombination rates from genetic map %s.", genmap, ) recombination_cost_computer = GeneticMapRecombinationCostComputer( genmap) else: if ped: logger.info("Using uniform recombination rate of %g cM/Mb.", recombrate) recombination_cost_computer = UniformRecombinationCostComputer( recombrate) samples = frozenset(samples) families, family_trios = setup_families(samples, ped, numeric_sample_ids, max_coverage) # Read phase information provided as VCF files, if provided. with timers("parse_phasing_vcfs"): phased_input_reader.read_vcfs() # compute genotype likelihood threshold gt_prob = 1.0 - (10**(-gt_qual_threshold / 10.0)) for variant_table in timers.iterate("parse_vcf", vcf_reader): # create a mapping of genome positions to indices var_to_pos = dict() for i in range(len(variant_table.variants)): var_to_pos[variant_table.variants[i].position] = i chromosome = variant_table.chromosome if (not chromosomes) or (chromosome in chromosomes): logger.info("======== Working on chromosome %r", chromosome) else: logger.info( "Leaving chromosome %r unchanged (present in VCF but not requested by option --chromosome)", chromosome, ) vcf_writer.write_genotypes(chromosome, variant_table, indels, leave_unchanged=True) if prioroutput is not None: prior_vcf_writer.write_genotypes(chromosome, variant_table, indels, leave_unchanged=True) continue positions = [v.position for v in variant_table.variants] if not nopriors: # compute prior genotype likelihoods based on all reads for sample in samples: logger.info("---- Initial genotyping of %s", sample) with timers("read_bam"): readset, vcf_source_ids = phased_input_reader.read( chromosome, variant_table.variants, sample, read_vcf=False, ) readset.sort() genotypes, genotype_likelihoods = compute_genotypes( readset, positions) # recompute genotypes based on given threshold reg_genotype_likelihoods = [] for gl in range(len(genotype_likelihoods)): norm_sum = (genotype_likelihoods[gl][0] + genotype_likelihoods[gl][1] + genotype_likelihoods[gl][2] + 3 * constant) regularized = PhredGenotypeLikelihoods([ (genotype_likelihoods[gl][0] + constant) / norm_sum, (genotype_likelihoods[gl][1] + constant) / norm_sum, (genotype_likelihoods[gl][2] + constant) / norm_sum, ]) genotypes[gl] = determine_genotype( regularized, gt_prob) assert isinstance(genotypes[gl], Genotype) reg_genotype_likelihoods.append(regularized) variant_table.set_genotype_likelihoods_of( sample, [ PhredGenotypeLikelihoods(list(gl)) for gl in reg_genotype_likelihoods ], ) variant_table.set_genotypes_of(sample, genotypes) else: # use uniform genotype likelihoods for all individuals for sample in samples: variant_table.set_genotype_likelihoods_of( sample, [PhredGenotypeLikelihoods([1 / 3, 1 / 3, 1 / 3])] * len(positions), ) # if desired, output the priors in separate vcf if prioroutput is not None: prior_vcf_writer.write_genotypes(chromosome, variant_table, indels) # Iterate over all families to process, i.e. a separate DP table is created # for each family. for representative_sample, family in sorted(families.items()): if len(family) == 1: logger.info("---- Processing individual %s", representative_sample) else: logger.info("---- Processing family with individuals: %s", ",".join(family)) max_coverage_per_sample = max(1, max_coverage // len(family)) logger.info("Using maximum coverage per sample of %dX", max_coverage_per_sample) trios = family_trios[representative_sample] assert (len(family) == 1) or (len(trios) > 0) # Get the reads belonging to each sample readsets = dict() for sample in family: with timers("read_bam"): readset, vcf_source_ids = phased_input_reader.read( chromosome, variant_table.variants, sample, ) with timers("select"): readset = readset.subset([ i for i, read in enumerate(readset) if len(read) >= 2 ]) logger.info( "Kept %d reads that cover at least two variants each", len(readset), ) selected_reads = select_reads( readset, max_coverage_per_sample, preferred_source_ids=vcf_source_ids, ) readsets[sample] = selected_reads # Merge reads into one ReadSet (note that each Read object # knows the sample it originated from). all_reads = ReadSet() for sample, readset in readsets.items(): for read in readset: assert read.is_sorted(), "Add a read.sort() here" all_reads.add(read) all_reads.sort() # Determine which variants can (in principle) be phased accessible_positions = sorted(all_reads.get_positions()) logger.info( "Variants covered by at least one phase-informative " "read in at least one individual after read selection: %d", len(accessible_positions), ) # Create Pedigree pedigree = Pedigree(numeric_sample_ids) for sample in family: # genotypes are assumed to be unknown, so ignore information that # might already be present in the input vcf all_genotype_likelihoods = variant_table.genotype_likelihoods_of( sample) genotype_l = [ all_genotype_likelihoods[var_to_pos[a_p]] for a_p in accessible_positions ] pedigree.add_individual( sample, [ Genotype([]) for i in range(len(accessible_positions)) ], genotype_l, ) for trio in trios: pedigree.add_relationship( father_id=trio.father, mother_id=trio.mother, child_id=trio.child, ) recombination_costs = recombination_cost_computer.compute( accessible_positions) # Finally, run genotyping algorithm with timers("genotyping"): problem_name = "genotyping" logger.info( "Genotype %d sample%s by solving the %s problem ...", len(family), "s" if len(family) > 1 else "", problem_name, ) forward_backward_table = GenotypeDPTable( numeric_sample_ids, all_reads, recombination_costs, pedigree, accessible_positions, ) # store results for s in family: likelihood_list = variant_table.genotype_likelihoods_of( s) genotypes_list = variant_table.genotypes_of(s) for pos in range(len(accessible_positions)): likelihoods = forward_backward_table.get_genotype_likelihoods( s, pos) # compute genotypes from likelihoods and store information geno = determine_genotype(likelihoods, gt_prob) assert isinstance(geno, Genotype) genotypes_list[var_to_pos[ accessible_positions[pos]]] = geno likelihood_list[var_to_pos[ accessible_positions[pos]]] = likelihoods variant_table.set_genotypes_of(s, genotypes_list) variant_table.set_genotype_likelihoods_of( s, likelihood_list) with timers("write_vcf"): logger.info("======== Writing VCF") vcf_writer.write_genotypes(chromosome, variant_table, indels) logger.info("Done writing VCF") logger.debug("Chromosome %r finished", chromosome) logger.info("\n== SUMMARY ==") total_time = timers.total() log_memory_usage() logger.info( "Time spent reading BAM: %6.1f s", timers.elapsed("read_bam"), ) logger.info( "Time spent parsing VCF: %6.1f s", timers.elapsed("parse_vcf"), ) if show_phase_vcfs: logger.info( "Time spent parsing input phasings from VCFs: %6.1f s", timers.elapsed("parse_phasing_vcfs"), ) logger.info("Time spent selecting reads: %6.1f s", timers.elapsed("select")) logger.info( "Time spent genotyping: %6.1f s", timers.elapsed("genotyping"), ) logger.info( "Time spent writing VCF: %6.1f s", timers.elapsed("write_vcf"), ) logger.info( "Time spent on rest: %6.1f s", total_time - timers.sum(), ) logger.info("Total elapsed time: %6.1f s", total_time)