def main(args): config = cli.load_pypeliner_config(args) pyp = pypeliner.app.Pypeline([], config) workflow = Workflow() workflow.subworkflow(name='snpeff', func=snpeff.create_snpeff_annotation_workflow, args=(pypeliner.managed.InputFile( args.target_vcf_file), pypeliner.managed.TempOutputFile('snpeff.h5')), kwargs={ 'data_base': args.data_base, 'split_size': args.split_size, 'table_name': 'snpeff' }) workflow.transform(name='convert_to_tsv', func=convert_hdf5_to_tsv, ctx={'mem': 2}, args=(pypeliner.managed.TempInputFile('snpeff.h5'), 'snpeff', pypeliner.managed.OutputFile(args.out_file)), kwargs={ 'compress': True, 'index': False }) pyp.run(workflow)
def main(args): make_directory(args.out_dir) with open(args.config_file) as fh: # Replace {ref_path_db} in config with desired path config_str = fh.read() config_str = config_str.format(ref_db_dir=args.ref_db_dir) # Load config config = yaml.load(config_str) if args.exome: config['strelka']['kwargs']['use_depth_thresholds'] = False tumour_bam_files = dict(zip(args.tumour_sample_ids, args.tumour_bam_files)) raw_data_dir = os.path.join(args.out_dir, 'raw_data') results_dir = os.path.join(args.out_dir, 'results.h5') workflow = call_and_annotate_pipeline(config, args.normal_bam_file, tumour_bam_files, raw_data_dir, results_dir, chromosomes=args.chromosomes) pyp_config = cli.load_pypeliner_config(args) pyp = pypeliner.app.Pypeline([], pyp_config) pyp.run(workflow)
def main(args): config = cli.load_pypeliner_config(args) pyp = pypeliner.app.Pypeline([], config) workflow = dollo.get_tree_search_workflow( args.in_file, args.search_file, args.tree_file, grid_search=args.grid_search, grid_size=args.grid_size, max_probability_of_loss=args.max_probability_of_loss, min_probability_of_loss=args.min_probability_of_loss) pyp.run(workflow)
def main(args): config = cli.load_pypeliner_config(args) pyp = pypeliner.app.Pypeline([], config) workflow = mutect.create_mutect_workflow(args.normal_bam_file, args.tumour_bam_file, args.ref_genome_fasta_file, args.cosmic_file, args.dbsnp_file, args.out_file, chromosomes=args.chromosomes, split_size=args.split_size) pyp.run(workflow)
def main(args): utils.make_directory(args.raw_data_dir) config = cli.load_pypeliner_config(args) pyp = pypeliner.app.Pypeline(config=config) workflow = delly.delly_pipeline( args.normal_bam_file, cli.get_tumour_bam_file_dict(args), args.ref_genome_fasta_file, args.delly_excl_chrom, args.out_file, args.raw_data_dir, ) pyp.run(workflow)
def main(args): with open(args.config_file) as fh: # Replace {ref_path_db} in config with desired path config_str = fh.read() config_str = config_str.format(ref_db_dir=args.ref_db_dir) # Load config config = yaml.load(config_str) workflow = create_setup_reference_dbs_workflow(config['databases']) pyp_config = cli.load_pypeliner_config(args) pyp = pypeliner.app.Pypeline([], pyp_config) pyp.run(workflow)
def main(args): config = cli.load_pypeliner_config(args) pyp = pypeliner.app.Pypeline([], config) workflow = nuseq.create_nuseq_classify_workflow( args.normal_bam_file, args.tumour_bam_files, args.ref_genome_fasta_file, args.out_file, chromosomes=args.chromosomes, indel_threshold=args.indel_threshold, chunk_size=args.chunk_size, min_normal_depth=args.min_normal_depth, min_tumour_depth=args.min_tumour_depth, min_somatic_probability=args.min_somatic_probability, split_size=args.split_size ) pyp.run(workflow)
def main(args): config = cli.load_pypeliner_config(args) pyp = pypeliner.app.Pypeline([], config) indel_vcf_file = args.out_prefix + '.indel.vcf.gz' snv_vcf_file = args.out_prefix + '.snv.vcf.gz' workflow = strelka.create_strelka_workflow( args.normal_bam_file, args.tumour_bam_file, args.ref_genome_fasta_file, indel_vcf_file, snv_vcf_file, chromosomes=args.chromosomes, split_size=args.split_size, use_depth_thresholds=not args.no_depth_thresholds) pyp.run(workflow)
def main(args): utils.make_directory(args.raw_data_dir) destruct_config = {} if args.destruct_config is not None: with open(args.destruct_config) as fh: destruct_config = yaml.load(fh) pypeliner_config = cli.load_pypeliner_config(args) pyp = pypeliner.app.Pypeline(config=pypeliner_config) workflow = destruct.destruct_pipeline( args.normal_bam_file, cli.get_tumour_bam_file_dict(args), destruct_config, args.ref_data_dir, args.out_file, args.raw_data_dir, ) pyp.run(workflow)