def variant_counting_workflow(args):
    config = helpers.load_config(args)

    meta_yaml = os.path.join(args['out_dir'], 'info.yaml')

    bam_files, bai_files = helpers.get_bams(args['input_yaml'])
    vcfs = args['input_vcfs']
    results_file = os.path.join(args['out_dir'], 'results', 'variant_counting',
                                'counts.h5')

    return create_variant_counting_workflow(vcfs, bam_files, results_file,
                                            meta_yaml, config)
Esempio n. 2
0
def variant_calling_workflow(args):

    config = helpers.load_config(args)

    ctx = {'num_retry': 3, 'mem_retry_increment': 2, 'ncpus': 1}
    docker_ctx = helpers.get_container_ctx(config['containers'],
                                           'single_cell_pipeline')
    ctx.update(docker_ctx)

    meta_yaml = os.path.join(args['out_dir'], 'info.yaml')

    bam_files, bai_files = helpers.get_bams(args['input_yaml'])

    cellids = helpers.get_samples(args['input_yaml'])

    varcalls_dir = os.path.join(args['out_dir'], 'results', 'variant_calling')

    museq_vcf = os.path.join(varcalls_dir, 'museq_snv.vcf.gz')
    strelka_snv_vcf = os.path.join(varcalls_dir, 'strelka_snv.vcf.gz')
    strelka_indel_vcf = os.path.join(varcalls_dir, 'strelka_indel.vcf.gz')
    snv_h5 = os.path.join(varcalls_dir, 'snv_annotations.h5')
    raw_data_dir = os.path.join(varcalls_dir, 'raw')

    wgs_bam_template = args["tumour_template"]
    normal_bam_template = args["normal_template"]

    regions = refgenome.get_split_regions(config["split_size"])

    tumour_region_bams = {
        r: wgs_bam_template.format(region=r)
        for r in regions
    }
    normal_region_bams = {
        r: normal_bam_template.format(region=r)
        for r in regions
    }

    return create_variant_calling_workflow(
        bam_files,
        tumour_region_bams,
        normal_region_bams,
        museq_vcf,
        strelka_snv_vcf,
        strelka_indel_vcf,
        snv_h5,
        config,
        raw_data_dir,
    )
def germline_calling_workflow(workflow, args):

    config = helpers.load_config(args)

    ctx = {
        'mem_retry_increment': 2,
        'ncpus': 1,
        'mem': config["memory"]['low'],
        'pool_id': config['pools']['standard'],
    }
    docker_ctx = helpers.get_container_ctx(config['containers'],
                                           'single_cell_pipeline')
    ctx.update(docker_ctx)

    bam_files, bai_files = helpers.get_bams(args['input_yaml'])

    sampleids = helpers.get_samples(args['input_yaml'])

    normal_bam_template = args["input_template"]
    normal_bai_template = args["input_template"] + ".bai"

    if "{reads}" in normal_bam_template:
        raise ValueError(
            "input template for germline calling only support region based splits"
        )

    varcalls_dir = os.path.join(args['out_dir'], 'results', 'germline_calling')

    samtools_germline_vcf = os.path.join(varcalls_dir, 'raw',
                                         'samtools_germline.vcf.gz')
    snpeff_vcf_filename = os.path.join(varcalls_dir, 'snpeff.vcf')
    normal_genotype_filename = os.path.join(varcalls_dir, 'raw',
                                            'normal_genotype.h5')
    mappability_filename = os.path.join(varcalls_dir, 'raw', 'mappability.h5')
    counts_template = os.path.join(varcalls_dir, 'counts', 'raw', 'counts.h5')
    germline_h5_filename = os.path.join(varcalls_dir, 'germline.h5')

    workflow.setobj(
        obj=mgd.OutputChunks('cell_id'),
        value=bam_files.keys(),
    )

    workflow.transform(
        name="get_regions",
        ctx=ctx,
        func="single_cell.utils.pysamutils.get_regions_from_reference",
        ret=pypeliner.managed.OutputChunks('region'),
        args=(
            config["ref_genome"],
            config["split_size"],
            config["chromosomes"],
        ))

    workflow.subworkflow(name='samtools_germline',
                         func=germline.create_samtools_germline_workflow,
                         args=(
                             mgd.InputFile("normal.split.bam",
                                           "region",
                                           template=normal_bam_template),
                             mgd.InputFile("normal.split.bam.bai",
                                           "region",
                                           template=normal_bai_template),
                             config['ref_genome'],
                             mgd.OutputFile(samtools_germline_vcf,
                                            extensions=['.tbi']),
                             config,
                         ),
                         kwargs={
                             'chromosomes':
                             config["chromosomes"],
                             'base_docker':
                             helpers.get_container_ctx(config['containers'],
                                                       'single_cell_pipeline'),
                             'vcftools_docker':
                             helpers.get_container_ctx(config['containers'],
                                                       'vcftools'),
                             'samtools_docker':
                             helpers.get_container_ctx(config['containers'],
                                                       'samtools'),
                         })

    workflow.subworkflow(
        name='annotate_mappability',
        func=
        "biowrappers.components.variant_calling.mappability.create_vcf_mappability_annotation_workflow",
        args=(
            config['databases']['mappability']['local_path'],
            mgd.InputFile(samtools_germline_vcf, extensions=['.tbi']),
            mgd.OutputFile(mappability_filename),
        ),
        kwargs={
            'base_docker':
            helpers.get_container_ctx(config['containers'],
                                      'single_cell_pipeline')
        })

    workflow.transform(
        name='annotate_genotype',
        func="single_cell.workflows.germline.tasks.annotate_normal_genotype",
        ctx=ctx,
        args=(
            mgd.InputFile(samtools_germline_vcf, extensions=['.tbi']),
            mgd.OutputFile(normal_genotype_filename),
            config["chromosomes"],
        ),
    )

    workflow.subworkflow(
        name='snpeff',
        func=
        "biowrappers.components.variant_calling.snpeff.create_snpeff_annotation_workflow",
        args=(
            config['databases']['snpeff']['db'],
            mgd.InputFile(samtools_germline_vcf, extensions=['.tbi']),
            mgd.OutputFile(snpeff_vcf_filename),
        ),
        kwargs={
            'hdf5_output':
            False,
            'base_docker':
            helpers.get_container_ctx(config['containers'],
                                      'single_cell_pipeline'),
            'vcftools_docker':
            helpers.get_container_ctx(config['containers'], 'vcftools'),
            'snpeff_docker':
            helpers.get_container_ctx(config['containers'], 'snpeff'),
        })

    workflow.subworkflow(
        name='read_counts',
        func=
        "single_cell.variant_calling.create_snv_allele_counts_for_vcf_targets_workflow",
        args=(
            config,
            mgd.InputFile('tumour.bam', 'cell_id', fnames=bam_files),
            mgd.InputFile('tumour.bam.bai', 'cell_id', fnames=bai_files),
            mgd.InputFile(samtools_germline_vcf, extensions=['.tbi']),
            mgd.OutputFile(counts_template),
        ),
        kwargs={
            'table_name':
            '/germline_allele_counts',
            'docker_config':
            helpers.get_container_ctx(config['containers'],
                                      'single_cell_pipeline')
        },
    )

    workflow.transform(
        name='build_results_file',
        func="biowrappers.components.io.hdf5.tasks.concatenate_tables",
        ctx=ctx,
        args=(
            [
                mgd.InputFile(counts_template),
                mgd.InputFile(mappability_filename),
                mgd.InputFile(normal_genotype_filename),
            ],
            pypeliner.managed.OutputFile(germline_h5_filename),
        ),
        kwargs={
            'drop_duplicates': True,
        })

    info_file = os.path.join(args["out_dir"], 'results', 'germline_calling',
                             "info.yaml")

    results = {
        'germline_data': helpers.format_file_yaml(germline_h5_filename),
    }

    input_datasets = {
        k: helpers.format_file_yaml(v)
        for k, v in bam_files.iteritems()
    }

    metadata = {
        'germline_calling': {
            'version': single_cell.__version__,
            'results': results,
            'containers': config['containers'],
            'input_datasets': input_datasets,
            'output_datasets': None
        }
    }

    workflow.transform(name='generate_meta_yaml',
                       ctx=dict(mem=config['memory']['med'],
                                pool_id=config['pools']['standard'],
                                mem_retry_increment=2,
                                ncpus=1),
                       func="single_cell.utils.helpers.write_to_yaml",
                       args=(mgd.OutputFile(info_file), metadata))

    return workflow
Esempio n. 4
0
def aneufinder_workflow(workflow, args):

    config = helpers.load_config(args)
    cellids = helpers.get_samples(args['input_yaml'])
    bam_files, _  = helpers.get_bams(args['input_yaml'])

    workflow.setobj(
        obj=mgd.OutputChunks('cell_id'),
        value=cellids,
    )

    info_file = os.path.join(args["out_dir"],'results', 'aneufinder', "info.yaml")

    output = os.path.join(args['out_dir'], 'results', "aneufinder")

    aneufinder_pdf_file = os.path.join(
        output, 'plots', '{}_reads.pdf'.format(args['library_id']))

    helpers.makedirs(output)

    results_filename = os.path.join(output, '{}_results.h5'.format(args['library_id']))
    workflow.subworkflow(
        name='aneufinder_workflow',
        func=aneufinder.create_aneufinder_workflow,
        args=(
            mgd.InputFile('bam_markdups', 'cell_id', fnames=bam_files),
            cellids,
            config,
            output,
            mgd.OutputFile(results_filename),
            mgd.OutputFile(aneufinder_pdf_file),
            args['library_id'],
        ),
    )



    results = {
        'aneufinder_plot': helpers.format_file_yaml(aneufinder_pdf_file),
        'aneufinder_data':helpers.format_file_yaml(results_filename),
    }

    input_datasets = {k: helpers.format_file_yaml(v) for k,v in bam_files.iteritems()}

    metadata = {
        'aneufinder':{
            'reads_table': '/aneufinder/reads',
            'segments_table': '/aneufinder/segments/',
            'chromosomes': config['chromosomes'],
            'ref_genome': config['ref_genome'],
            'version': single_cell.__version__,
            'results': results,
            'containers': config['containers'],
            'input_datasets': input_datasets,
            'output_datasets': None
        }
    }

    workflow.transform(
        name='generate_meta_yaml',
        ctx=dict(mem=config['memory']['med'],
                 pool_id=config['pools']['standard'],
                 mem_retry_increment=2, ncpus=1),
        func="single_cell.utils.helpers.write_to_yaml",
        args=(
            mgd.OutputFile(info_file),
            metadata
        )
    )

    return workflow
Esempio n. 5
0
def align_workflow(workflow, args):

    config = helpers.load_config(args)

    sampleinfo = helpers.get_sample_info(args['input_yaml'])

    cellids = helpers.get_samples(args['input_yaml'])
    bam_files, bai_files = helpers.get_bams(args['input_yaml'])

    lib = args["library_id"]

    outdir = os.path.join(args["out_dir"], "results", "alignment")

    info_file = os.path.join(outdir, "info.yaml")

    alignment_metrics_h5 = os.path.join(outdir,
                                        '{}_alignment_metrics.h5'.format(lib))

    plots_dir = os.path.join(outdir, 'plots')
    plot_metrics_output = os.path.join(plots_dir,
                                       '{}_plot_metrics.pdf'.format(lib))

    ctx = {'mem_retry_increment': 2, 'ncpus': 1}
    ctx.update(
        helpers.get_container_ctx(config['containers'],
                                  'single_cell_pipeline'))

    if not args["metrics_only"]:
        fastq1_files, fastq2_files = helpers.get_fastqs(args['input_yaml'])
        instrumentinfo = helpers.get_instrument_info(args['input_yaml'])
        centerinfo = helpers.get_center_info(args['input_yaml'])

        workflow.setobj(
            obj=mgd.OutputChunks('cell_id', 'lane'),
            value=fastq1_files.keys(),
        )

        workflow.subworkflow(
            name='alignment_workflow',
            func=align.create_alignment_workflow,
            args=(
                mgd.InputFile('fastq_1',
                              'cell_id',
                              'lane',
                              fnames=fastq1_files,
                              axes_origin=[]),
                mgd.InputFile('fastq_2',
                              'cell_id',
                              'lane',
                              fnames=fastq2_files,
                              axes_origin=[]),
                mgd.OutputFile('bam_markdups',
                               'cell_id',
                               fnames=bam_files,
                               axes_origin=[]),
                mgd.OutputFile('bai_markdups',
                               'cell_id',
                               fnames=bai_files,
                               axes_origin=[]),
                config['ref_genome'],
                config,
                args,
                instrumentinfo,
                centerinfo,
                sampleinfo,
                cellids,
            ),
        )
    else:
        workflow.setobj(
            obj=mgd.OutputChunks('cell_id'),
            value=cellids,
        )

    workflow.subworkflow(
        name='metrics_workflow',
        func=alignment_metrics.create_alignment_metrics_workflow,
        args=(
            mgd.InputFile('bam_markdups',
                          'cell_id',
                          fnames=bam_files,
                          axes_origin=[]),
            mgd.InputFile('bai_markdups',
                          'cell_id',
                          fnames=bai_files,
                          axes_origin=[]),
            mgd.OutputFile(alignment_metrics_h5),
            mgd.OutputFile(plot_metrics_output),
            config['ref_genome'],
            config,
            args,
            sampleinfo,
            cellids,
        ),
    )

    inputs = helpers.get_fastq_files(args["input_yaml"])
    outputs = {
        k: helpers.format_file_yaml(v)
        for k, v in bam_files.iteritems()
    }

    metadata = {
        'alignment': {
            'name': 'alignment',
            'cell_batch_realign': args["realign"],
            'metrics_table': '/alignment/metrics',
            'gc_metrics_table': '/alignment/gc_metrics',
            'aligner': config["aligner"],
            'adapter': config["adapter"],
            'adapter2': config["adapter2"],
            'picardtools_wgsmetrics_params': config['picard_wgs_params'],
            'ref_genome': config["ref_genome"],
            'version': single_cell.__version__,
            'containers': config['containers'],
            'output_datasets': outputs,
            'input_datasets': inputs,
            'results': {
                'alignment_metrics':
                helpers.format_file_yaml(alignment_metrics_h5),
                'alignment_plots':
                helpers.format_file_yaml(plot_metrics_output),
            },
        }
    }

    workflow.transform(name='generate_meta_yaml',
                       ctx=dict(mem=config['memory']['med'],
                                pool_id=config['pools']['standard'],
                                **ctx),
                       func="single_cell.utils.helpers.write_to_yaml",
                       args=(mgd.OutputFile(info_file), metadata))

    return workflow
Esempio n. 6
0
def qc_workflow(args):
    config = helpers.load_config(args)

    sampleinfo = helpers.get_sample_info(args['input_yaml'])
    cellids = helpers.get_samples(args['input_yaml'])
    bam_files, _ = helpers.get_bams(args['input_yaml'])

    lib = args["library_id"]

    workflow = pypeliner.workflow.Workflow()

    annotation_only = args['annotation_only']

    alignment_dir = args["alignment_output"]
    hmmcopy_dir = args["hmmcopy_output"]
    annotation_dir = args["annotation_output"]

    if alignment_dir and not annotation_only:
        alignment_files = get_output_files(alignment_dir, 'alignment', lib)

        fastq1_files, fastq2_files = helpers.get_fastqs(args['input_yaml'])
        triminfo = helpers.get_trim_info(args['input_yaml'])
        centerinfo = helpers.get_center_info(args['input_yaml'])

        workflow.setobj(
            obj=mgd.OutputChunks('cell_id', 'lane'),
            value=list(fastq1_files.keys()),
        )

        workflow.subworkflow(
            name='alignment_workflow',
            ctx={
                'docker_image':
                config['alignment']['docker']['single_cell_pipeline']
            },
            func=align.create_alignment_workflow,
            args=(
                mgd.InputFile('fastq_1',
                              'cell_id',
                              'lane',
                              fnames=fastq1_files,
                              axes_origin=[]),
                mgd.InputFile('fastq_2',
                              'cell_id',
                              'lane',
                              fnames=fastq2_files,
                              axes_origin=[]),
                mgd.OutputFile('bam_markdups',
                               'cell_id',
                               fnames=bam_files,
                               axes_origin=[],
                               extensions=['.bai']),
                mgd.OutputFile(alignment_files['alignment_metrics_csv']),
                mgd.OutputFile(alignment_files['gc_metrics_csv']),
                mgd.OutputFile(alignment_files['fastqc_metrics_csv']),
                mgd.OutputFile(alignment_files['plot_metrics_output']),
                config['alignment']['ref_genome'],
                config['alignment'],
                triminfo,
                centerinfo,
                sampleinfo,
                cellids,
                mgd.OutputFile(alignment_files['alignment_metrics_tar']),
                lib,
            ),
            kwargs={'realign': args['realign']})

    if hmmcopy_dir and not annotation_only:
        hmmcopy_files = get_output_files(hmmcopy_dir, 'hmmcopy', lib)

        if not alignment_dir:
            workflow.setobj(
                obj=mgd.OutputChunks('cell_id'),
                value=list(bam_files.keys()),
            )

        workflow.subworkflow(
            name='hmmcopy_workflow',
            ctx={
                'docker_image':
                config['hmmcopy']['docker']['single_cell_pipeline']
            },
            func=hmmcopy.create_hmmcopy_workflow,
            args=(mgd.InputFile('bam_markdups',
                                'cell_id',
                                fnames=bam_files,
                                extensions=['.bai']),
                  mgd.OutputFile(hmmcopy_files['reads_csvs']),
                  mgd.OutputFile(hmmcopy_files['segs_csvs']),
                  mgd.OutputFile(hmmcopy_files['metrics_csvs']),
                  mgd.OutputFile(hmmcopy_files['params_csvs']),
                  mgd.OutputFile(hmmcopy_files['igv_csvs']),
                  mgd.OutputFile(hmmcopy_files['segs_pdf']),
                  mgd.OutputFile(hmmcopy_files['bias_pdf']),
                  mgd.OutputFile(hmmcopy_files['heatmap_pdf']),
                  mgd.OutputFile(hmmcopy_files['metrics_pdf']),
                  mgd.OutputFile(hmmcopy_files['kernel_density_pdf']),
                  mgd.OutputFile(hmmcopy_files['hmmcopy_data_tar']), cellids,
                  config['hmmcopy'], sampleinfo),
        )

    if annotation_dir:
        annotation_files = get_output_files(annotation_dir, 'annotation', lib)
        if not hmmcopy_dir or not alignment_dir:
            raise Exception(
                '--hmmcopy_output and --alignment_output are required to run annotation'
            )

        alignment_files = get_output_files(alignment_dir, 'alignment', lib)
        hmmcopy_files = get_output_files(hmmcopy_dir, 'hmmcopy', lib)

        workflow.subworkflow(
            name='annotation_workflow',
            ctx={
                'docker_image':
                config['annotation']['docker']['single_cell_pipeline']
            },
            func=qc_annotation.create_qc_annotation_workflow,
            args=(
                mgd.InputFile(hmmcopy_files['metrics_csvs']),
                mgd.InputFile(hmmcopy_files['reads_csvs']),
                mgd.InputFile(alignment_files['alignment_metrics_csv']),
                mgd.InputFile(alignment_files['gc_metrics_csv']),
                mgd.InputFile(hmmcopy_files['segs_pdf']),
                mgd.OutputFile(annotation_files['merged_metrics_csvs']),
                mgd.OutputFile(annotation_files['qc_report']),
                mgd.OutputFile(annotation_files['corrupt_tree_newick']),
                mgd.OutputFile(annotation_files['consensus_tree_newick']),
                mgd.OutputFile(annotation_files['phylo_csv']),
                mgd.OutputFile(annotation_files['loci_rank_trees']),
                mgd.OutputFile(annotation_files['filtered_data']),
                mgd.OutputFile(annotation_files['corrupt_tree_pdf']),
                mgd.OutputFile(annotation_files['segs_pass']),
                mgd.OutputFile(annotation_files['segs_fail']),
                mgd.OutputFile(annotation_files['corrupt_heatmap_pdf']),
                mgd.OutputFile(annotation_files['heatmap_filt_pdf']),
                config['annotation'],
                lib,
            ),
            kwargs={'no_corrupt_tree': args['no_corrupt_tree']})

    return workflow
Esempio n. 7
0
def breakpoint_calling_workflow(workflow, args):

    config = helpers.load_config(args)

    normal_bam_file = args['matched_normal']
    bam_files, bai_files = helpers.get_bams(args['input_yaml'])

    varcalls_dir = os.path.join(args['out_dir'], 'results',
                                'breakpoint_calling')
    raw_data_directory = os.path.join(varcalls_dir, 'raw')
    breakpoints_filename = os.path.join(varcalls_dir, 'breakpoints.h5')
    ref_data_directory = '/refdata'

    pypeliner.workflow.Workflow()

    workflow.setobj(
        obj=mgd.OutputChunks('cell_id'),
        value=bam_files.keys(),
    )

    workflow.subworkflow(
        name='destruct',
        func=
        "biowrappers.components.breakpoint_calling.destruct.destruct_pipeline",
        args=(
            mgd.InputFile(normal_bam_file),
            mgd.InputFile('tumour.bam', 'cell_id', fnames=bam_files),
            config.get('destruct', {}),
            ref_data_directory,
            mgd.OutputFile(breakpoints_filename),
            raw_data_directory,
        ),
    )

    info_file = os.path.join(args["out_dir"], 'results', 'breakpoint_calling',
                             "info.yaml")

    results = {
        'destruct_data': helpers.format_file_yaml(breakpoints_filename),
    }

    input_datasets = {
        k: helpers.format_file_yaml(v)
        for k, v in bam_files.iteritems()
    }
    input_datasets = {'normal': normal_bam_file, 'tumour': input_datasets}

    metadata = {
        'breakpoint_calling': {
            'ref_data': ref_data_directory,
            'version': single_cell.__version__,
            'results': results,
            'containers': config['containers'],
            'input_datasets': input_datasets,
            'output_datasets': None
        }
    }

    workflow.transform(name='generate_meta_yaml',
                       ctx=dict(mem=config['memory']['med'],
                                pool_id=config['pools']['standard'],
                                mem_retry_increment=2,
                                ncpus=1),
                       func="single_cell.utils.helpers.write_to_yaml",
                       args=(mgd.OutputFile(info_file), metadata))

    return workflow
def copy_number_calling_workflow(workflow, args):

    config = helpers.load_config(args)

    ctx = {'mem_retry_increment': 2, 'ncpus': 1,
           'mem': config["memory"]['low'],
           'pool_id': config['pools']['standard']}
    docker_ctx = helpers.get_container_ctx(config['containers'], 'single_cell_pipeline')
    ctx.update(docker_ctx)

    tumour_bam_files, tumour_bai_files = helpers.get_bams(args['tumour_yaml'])

    normal_bam_files, normal_bai_files = helpers.get_bams(args['normal_yaml'])

    tumour_cellids = helpers.get_samples(args['tumour_yaml'])

    normal_cellids = helpers.get_samples(args['normal_yaml'])

    if set(tumour_bam_files.keys()) != set(tumour_cellids):
        raise ValueError()

    if set(normal_bam_files.keys()) != set(normal_cellids):
        raise ValueError()

    copynumber_dir = os.path.join(args["out_dir"], "copynumber")

    out_file = os.path.join(copynumber_dir, "results", "results.h5")

    cloneid = args["clone_id"]

    remixt_config = config['titan_params'].get('extract_seqdata', {})

    workflow.setobj(
        obj=mgd.OutputChunks('tumour_cell_id'),
        value=tumour_cellids,
    )

    workflow.setobj(
        obj=mgd.OutputChunks('normal_cell_id'),
        value=normal_cellids,
    )

    workflow.transform(
        name="get_snp_positions_filename",
        ctx=ctx,
        func="remixt.config.get_filename",
        ret=mgd.TempOutputObj('snp_positions_filename'),
        args=(
              remixt_config,
              config['titan_params']['ref_data_dir'],
              'snp_positions'
        )
    )

    workflow.transform(
        name="get_bam_max_fragment_length",
        ctx=ctx,
        func="remixt.config.get_param",
        ret=mgd.TempOutputObj('bam_max_fragment_length'),
        args=(
              remixt_config,
              'bam_max_fragment_length'
        )
    )

    workflow.transform(
        name="get_bam_max_soft_clipped",
        ctx=ctx,
        func="remixt.config.get_param",
        ret=mgd.TempOutputObj('bam_max_soft_clipped'),
        args=(
              remixt_config,
              'bam_max_soft_clipped'
        )
    )

    workflow.transform(
        name="get_bam_check_proper_pair",
        ctx=ctx,
        func="remixt.config.get_param",
        ret=mgd.TempOutputObj('bam_check_proper_pair'),
        args=(
              remixt_config,
              'bam_check_proper_pair'
        )
    )


    workflow.subworkflow(
        name="extract_seqdata_tumour",
        axes=('tumour_cell_id',),
        func=extract_seqdata.create_extract_seqdata_workflow,
        args=(
            mgd.InputFile(
                'bam_markdups',
                'tumour_cell_id',
                fnames=tumour_bam_files),
            mgd.InputFile(
                'bam_markdups_index',
                'tumour_cell_id',
                fnames=tumour_bai_files),
            mgd.TempOutputFile("tumour.h5", "tumour_cell_id"),
            config,
            config['titan_params'].get('extract_seqdata', {}),
            config['titan_params']['ref_data_dir'],
            mgd.TempInputObj('snp_positions_filename'),
            mgd.TempInputObj('bam_max_fragment_length'),
            mgd.TempInputObj('bam_max_soft_clipped'),
            mgd.TempInputObj('bam_check_proper_pair'),
        )
    )

    workflow.subworkflow(
        name="extract_seqdata_normal",
        axes=('normal_cell_id',),
        func=extract_seqdata.create_extract_seqdata_workflow,
        args=(
            mgd.InputFile(
                'bam_markdups',
                'normal_cell_id',
                fnames=normal_bam_files),
            mgd.InputFile(
                'bam_markdups_index',
                'normal_cell_id',
                fnames=normal_bai_files),
            mgd.TempOutputFile("normal.h5", "normal_cell_id"),
            config,
            config['titan_params'].get('extract_seqdata', {}),
            config['titan_params']['ref_data_dir'],
            mgd.TempInputObj('snp_positions_filename'),
            mgd.TempInputObj('bam_max_fragment_length'),
            mgd.TempInputObj('bam_max_soft_clipped'),
            mgd.TempInputObj('bam_check_proper_pair'),
        )
    )

    workflow.subworkflow(
        name='titan_workflow',
        func=titan.create_titan_workflow,
        args=(
            mgd.TempInputFile("normal.h5", "normal_cell_id"),
            mgd.TempInputFile("tumour.h5", "tumour_cell_id"),
            config['ref_genome'],
            copynumber_dir,
            out_file,
            config,
            args,
            tumour_cellids,
            normal_cellids,
            cloneid
        ),
    )

    info_file = os.path.join(args["out_dir"],'results','copynumber_calling', "info.yaml")

    results = {
        'copynumber_data': helpers.format_file_yaml(out_file),
    }

    tumours = {k: helpers.format_file_yaml(v) for k,v in tumour_bam_files.iteritems()}
    normals = {k: helpers.format_file_yaml(v) for k,v in normal_bam_files.iteritems()}
    input_datasets = {'tumour': tumours, 'normal': normals}

    metadata = {
        'copynumber_calling': {
            'chromosomes': config['chromosomes'],
            'ref_genome': config['ref_genome'],
            'version': single_cell.__version__,
            'results': results,
            'containers': config['containers'],
            'input_datasets': input_datasets,
            'output_datasets': None
        }
    }

    workflow.transform(
        name='generate_meta_yaml',
        ctx=dict(mem=config['memory']['med'],
                 pool_id=config['pools']['standard'],
                 mem_retry_increment=2, ncpus=1),
        func="single_cell.utils.helpers.write_to_yaml",
        args=(
            mgd.OutputFile(info_file),
            metadata
        )
    )

    return workflow
def merge_bams_workflow(workflow, args):

    input_yaml = args["input_yaml"]
    output_template = args["merged_bam_template"]

    info_file = os.path.join(args["out_dir"], 'results', 'merge_bams',
                             "info.yaml")
    config = helpers.load_config(args)
    bam_files, bai_files = helpers.get_bams(input_yaml)
    cellids = helpers.get_samples(input_yaml)

    wgs_bam_template = output_template
    wgs_bai_template = wgs_bam_template + ".bai"

    ctx = {'mem_retry_increment': 2, 'ncpus': 1}
    ctx.update(
        helpers.get_container_ctx(config['containers'],
                                  'single_cell_pipeline'))

    workflow.setobj(
        obj=mgd.OutputChunks('cell_id'),
        value=cellids,
    )

    workflow.transform(
        name="get_regions",
        ctx=dict(mem=2, pool_id=config['pools']['standard'], **ctx),
        func="single_cell.utils.pysamutils.get_regions_from_reference",
        ret=pypeliner.managed.TempOutputObj('region'),
        args=(
            config["ref_genome"],
            config["split_size"],
            config["chromosomes"],
        ))

    workflow.subworkflow(name="wgs_merge_workflow",
                         func=merge_bams.create_merge_bams_workflow,
                         args=(
                             mgd.InputFile('bam_markdups',
                                           'cell_id',
                                           fnames=bam_files,
                                           extensions=['.bai']),
                             mgd.OutputFile("merged_bam",
                                            "region",
                                            axes_origin=[],
                                            template=wgs_bam_template,
                                            extensions=['.bai']),
                             cellids,
                             config,
                             mgd.TempInputObj("region"),
                         ))

    workflow.transform(name="get_files",
                       ctx=dict(mem=2,
                                pool_id=config['pools']['standard'],
                                **ctx),
                       func='single_cell.utils.helpers.resolve_template',
                       ret=pypeliner.managed.TempOutputObj('outputs'),
                       args=(pypeliner.managed.TempInputObj('region'),
                             wgs_bam_template, 'region'))

    inputs = {k: helpers.format_file_yaml(v) for k, v in bam_files.iteritems()}

    metadata = {
        'merge_bams': {
            'name': 'merge_bams',
            'ref_genome': config["ref_genome"],
            'version': single_cell.__version__,
            'containers': config['containers'],
            'output_datasets': pypeliner.managed.TempInputObj('outputs'),
            'input_datasets': inputs,
            'results': None
        }
    }

    workflow.transform(name='generate_meta_yaml',
                       ctx=dict(mem=2,
                                pool_id=config['pools']['standard'],
                                **ctx),
                       func="single_cell.utils.helpers.write_to_yaml",
                       args=(mgd.OutputFile(info_file), metadata))

    return workflow
Esempio n. 10
0
def infer_haps_workflow(workflow, args):

    config = helpers.load_config(args)
    remixt_config = config['titan_params'].get('extract_seqdata', {})

    singlecellimage = config['docker']['images']['single_cell_pipeline']
    ctx = {
        'mem_retry_increment': 2,
        'ncpus': 1,
        'image': singlecellimage['image'],
        'dockerize': config['docker']['dockerize'],
        'mounts': config['docker']['mounts'],
        'username': singlecellimage['username'],
        'password': singlecellimage['password'],
        'server': singlecellimage['server'],
    }

    haps_dir = os.path.join(args["out_dir"], "infer_haps")

    haplotypes_filename = os.path.join(haps_dir, "results", "haplotypes.tsv")
    allele_counts_filename = os.path.join(haps_dir, "results",
                                          "allele_counts.tsv")

    snp_positions_filename = remixt.config.get_filename(
        config, ref_data_dir, 'snp_positions')
    bam_max_fragment_length = remixt.config.get_param(
        config, 'bam_max_fragment_length')
    bam_max_soft_clipped = remixt.config.get_param(config,
                                                   'bam_max_soft_clipped')
    bam_check_proper_pair = remixt.config.get_param(config,
                                                    'bam_check_proper_pair')

    workflow.setobj(obj=mgd.OutputChunks('chromosome'),
                    value=config['titan_params']['chromosomes'])

    if args['input_yaml']:
        bam_files, bai_files = helpers.get_bams(args['input_yaml'])
        cellids = helpers.get_samples(args['input_yaml'])

        workflow.setobj(
            obj=mgd.OutputChunks('cell_id'),
            value=cellids,
        )

        workflow.subworkflow(
            name="extract_seqdata",
            axes=('cell_id', ),
            func=extract_seqdata.create_extract_seqdata_workflow,
            args=(
                mgd.InputFile('bam_markdups', 'cell_id', fnames=bam_files),
                mgd.InputFile('bam_markdups_index',
                              'cell_id',
                              fnames=bai_files),
                mgd.TempOutputFile("tumour.h5", "cell_id"),
                config,
                config['titan_params'].get('extract_seqdata', {}),
                config['titan_params']['ref_data_dir'],
                snp_positions_filename,
                bam_max_fragment_length,
                bam_max_soft_clipped,
                bam_check_proper_pair,
            ))

        workflow.transform(
            name='merge_all_seqdata',
            ctx=dict(mem=config["memory"]['high'],
                     pool_id=config['pools']['highmem'],
                     **ctx),
            func="single_cell.workflows.titan.tasks.merge_overlapping_seqdata",
            args=(mgd.TempOutputFile("seqdata_normal_all_cells_merged.h5"),
                  mgd.TempInputFile("tumour.h5", "cell_id"),
                  config["titan_params"]["chromosomes"]),
        )
    else:
        workflow.subworkflow(
            name="extract_seqdata",
            func=extract_seqdata.create_extract_seqdata_workflow,
            args=(
                mgd.InputFile(args['input_bam']),
                mgd.InputFile(args['input_bam'] + '.bai'),
                mgd.TempOutputFile("seqdata_normal_all_cells_merged.h5"),
                config,
                config['titan_params'].get('extract_seqdata', {}),
                config['titan_params']['ref_data_dir'],
                snp_positions_filename,
                bam_max_fragment_length,
                bam_max_soft_clipped,
                bam_check_proper_pair,
            ),
            kwargs={'multiprocess': True})

    if args["normal"]:
        workflow.transform(
            name='infer_snp_genotype',
            axes=('chromosome', ),
            ctx={'mem': 16},
            func='remixt.analysis.haplotype.infer_snp_genotype_from_normal',
            args=(
                mgd.TempOutputFile('snp_genotype.tsv', 'chromosome'),
                mgd.TempInputFile("seqdata_normal_all_cells_merged.h5"),
                mgd.InputInstance('chromosome'),
                config,
            ),
        )
    else:
        workflow.transform(
            name='infer_snp_genotype',
            axes=('chromosome', ),
            ctx={'mem': 16},
            func='remixt.analysis.haplotype.infer_snp_genotype_from_tumour',
            args=(
                mgd.TempOutputFile('snp_genotype.tsv', 'chromosome'),
                {
                    'sample':
                    mgd.TempInputFile("seqdata_normal_all_cells_merged.h5")
                },
                mgd.InputInstance('chromosome'),
                config,
            ),
        )

    workflow.transform(name='infer_haps',
                       axes=('chromosome', ),
                       ctx={'mem': 16},
                       func='remixt.analysis.haplotype.infer_haps',
                       args=(
                           mgd.TempOutputFile('haps.tsv', 'chromosome'),
                           mgd.TempInputFile('snp_genotype.tsv', 'chromosome'),
                           mgd.InputInstance('chromosome'),
                           mgd.TempSpace('haplotyping', 'chromosome'),
                           config,
                           config['titan_params']['ref_data_dir'],
                       ))

    workflow.transform(name='merge_haps',
                       ctx={'mem': 16},
                       func='remixt.utils.merge_tables',
                       args=(
                           mgd.OutputFile(haplotypes_filename),
                           mgd.TempInputFile('haps.tsv', 'chromosome'),
                       ))

    workflow.transform(
        name='create_segments',
        func='remixt.analysis.segment.create_segments',
        args=(
            mgd.TempOutputFile('segments.tsv'),
            config,
            config['titan_params']['ref_data_dir'],
        ),
    )

    workflow.transform(
        name='haplotype_allele_readcount',
        ctx={'mem': 20},
        func='remixt.analysis.readcount.haplotype_allele_readcount',
        args=(
            mgd.OutputFile(allele_counts_filename),
            mgd.TempInputFile('segments.tsv'),
            mgd.TempInputFile('tumour.h5', 'cell_id'),
            mgd.InputFile(haplotypes_filename),
            config,
        ),
    )

    info_file = os.path.join(args["out_dir"], 'results', 'infer_haps',
                             "info.yaml")

    results = {
        'infer_haps_allele_counts':
        helpers.format_file_yaml(allele_counts_filename),
        'infer_haps_data':
        helpers.format_file_yaml(haplotypes_filename),
    }

    if args['input_yaml']:
        input_datasets = {
            k: helpers.format_file_yaml(v)
            for k, v in bam_files.iteritems()
        }
    else:
        input_datasets = helpers.format_file_yaml(args['input_bam'])

    metadata = {
        'infer_haps': {
            'version': single_cell.__version__,
            'results': results,
            'containers': config['containers'],
            'input_datasets': input_datasets,
            'output_datasets': None
        }
    }

    workflow.transform(name='generate_meta_yaml',
                       ctx=dict(mem=config['memory']['med'],
                                pool_id=config['pools']['standard'],
                                mem_retry_increment=2,
                                ncpus=1),
                       func="single_cell.utils.helpers.write_to_yaml",
                       args=(mgd.OutputFile(info_file), metadata))

    return workflow