示例#1
0
def aneufinder_workflow(workflow, args):

    config = helpers.load_config(args)
    cellids = helpers.get_samples(args['input_yaml'])
    bam_files, _  = helpers.get_bams(args['input_yaml'])

    workflow.setobj(
        obj=mgd.OutputChunks('cell_id'),
        value=cellids,
    )

    info_file = os.path.join(args["out_dir"],'results', 'aneufinder', "info.yaml")

    output = os.path.join(args['out_dir'], 'results', "aneufinder")

    aneufinder_pdf_file = os.path.join(
        output, 'plots', '{}_reads.pdf'.format(args['library_id']))

    helpers.makedirs(output)

    results_filename = os.path.join(output, '{}_results.h5'.format(args['library_id']))
    workflow.subworkflow(
        name='aneufinder_workflow',
        func=aneufinder.create_aneufinder_workflow,
        args=(
            mgd.InputFile('bam_markdups', 'cell_id', fnames=bam_files),
            cellids,
            config,
            output,
            mgd.OutputFile(results_filename),
            mgd.OutputFile(aneufinder_pdf_file),
            args['library_id'],
        ),
    )



    results = {
        'aneufinder_plot': helpers.format_file_yaml(aneufinder_pdf_file),
        'aneufinder_data':helpers.format_file_yaml(results_filename),
    }

    input_datasets = {k: helpers.format_file_yaml(v) for k,v in bam_files.iteritems()}

    metadata = {
        'aneufinder':{
            'reads_table': '/aneufinder/reads',
            'segments_table': '/aneufinder/segments/',
            'chromosomes': config['chromosomes'],
            'ref_genome': config['ref_genome'],
            'version': single_cell.__version__,
            'results': results,
            'containers': config['containers'],
            'input_datasets': input_datasets,
            'output_datasets': None
        }
    }

    workflow.transform(
        name='generate_meta_yaml',
        ctx=dict(mem=config['memory']['med'],
                 pool_id=config['pools']['standard'],
                 mem_retry_increment=2, ncpus=1),
        func="single_cell.utils.helpers.write_to_yaml",
        args=(
            mgd.OutputFile(info_file),
            metadata
        )
    )

    return workflow
def germline_calling_workflow(workflow, args):

    config = helpers.load_config(args)

    ctx = {
        'mem_retry_increment': 2,
        'ncpus': 1,
        'mem': config["memory"]['low'],
        'pool_id': config['pools']['standard'],
    }
    docker_ctx = helpers.get_container_ctx(config['containers'],
                                           'single_cell_pipeline')
    ctx.update(docker_ctx)

    bam_files, bai_files = helpers.get_bams(args['input_yaml'])

    sampleids = helpers.get_samples(args['input_yaml'])

    normal_bam_template = args["input_template"]
    normal_bai_template = args["input_template"] + ".bai"

    if "{reads}" in normal_bam_template:
        raise ValueError(
            "input template for germline calling only support region based splits"
        )

    varcalls_dir = os.path.join(args['out_dir'], 'results', 'germline_calling')

    samtools_germline_vcf = os.path.join(varcalls_dir, 'raw',
                                         'samtools_germline.vcf.gz')
    snpeff_vcf_filename = os.path.join(varcalls_dir, 'snpeff.vcf')
    normal_genotype_filename = os.path.join(varcalls_dir, 'raw',
                                            'normal_genotype.h5')
    mappability_filename = os.path.join(varcalls_dir, 'raw', 'mappability.h5')
    counts_template = os.path.join(varcalls_dir, 'counts', 'raw', 'counts.h5')
    germline_h5_filename = os.path.join(varcalls_dir, 'germline.h5')

    workflow.setobj(
        obj=mgd.OutputChunks('cell_id'),
        value=bam_files.keys(),
    )

    workflow.transform(
        name="get_regions",
        ctx=ctx,
        func="single_cell.utils.pysamutils.get_regions_from_reference",
        ret=pypeliner.managed.OutputChunks('region'),
        args=(
            config["ref_genome"],
            config["split_size"],
            config["chromosomes"],
        ))

    workflow.subworkflow(name='samtools_germline',
                         func=germline.create_samtools_germline_workflow,
                         args=(
                             mgd.InputFile("normal.split.bam",
                                           "region",
                                           template=normal_bam_template),
                             mgd.InputFile("normal.split.bam.bai",
                                           "region",
                                           template=normal_bai_template),
                             config['ref_genome'],
                             mgd.OutputFile(samtools_germline_vcf,
                                            extensions=['.tbi']),
                             config,
                         ),
                         kwargs={
                             'chromosomes':
                             config["chromosomes"],
                             'base_docker':
                             helpers.get_container_ctx(config['containers'],
                                                       'single_cell_pipeline'),
                             'vcftools_docker':
                             helpers.get_container_ctx(config['containers'],
                                                       'vcftools'),
                             'samtools_docker':
                             helpers.get_container_ctx(config['containers'],
                                                       'samtools'),
                         })

    workflow.subworkflow(
        name='annotate_mappability',
        func=
        "biowrappers.components.variant_calling.mappability.create_vcf_mappability_annotation_workflow",
        args=(
            config['databases']['mappability']['local_path'],
            mgd.InputFile(samtools_germline_vcf, extensions=['.tbi']),
            mgd.OutputFile(mappability_filename),
        ),
        kwargs={
            'base_docker':
            helpers.get_container_ctx(config['containers'],
                                      'single_cell_pipeline')
        })

    workflow.transform(
        name='annotate_genotype',
        func="single_cell.workflows.germline.tasks.annotate_normal_genotype",
        ctx=ctx,
        args=(
            mgd.InputFile(samtools_germline_vcf, extensions=['.tbi']),
            mgd.OutputFile(normal_genotype_filename),
            config["chromosomes"],
        ),
    )

    workflow.subworkflow(
        name='snpeff',
        func=
        "biowrappers.components.variant_calling.snpeff.create_snpeff_annotation_workflow",
        args=(
            config['databases']['snpeff']['db'],
            mgd.InputFile(samtools_germline_vcf, extensions=['.tbi']),
            mgd.OutputFile(snpeff_vcf_filename),
        ),
        kwargs={
            'hdf5_output':
            False,
            'base_docker':
            helpers.get_container_ctx(config['containers'],
                                      'single_cell_pipeline'),
            'vcftools_docker':
            helpers.get_container_ctx(config['containers'], 'vcftools'),
            'snpeff_docker':
            helpers.get_container_ctx(config['containers'], 'snpeff'),
        })

    workflow.subworkflow(
        name='read_counts',
        func=
        "single_cell.variant_calling.create_snv_allele_counts_for_vcf_targets_workflow",
        args=(
            config,
            mgd.InputFile('tumour.bam', 'cell_id', fnames=bam_files),
            mgd.InputFile('tumour.bam.bai', 'cell_id', fnames=bai_files),
            mgd.InputFile(samtools_germline_vcf, extensions=['.tbi']),
            mgd.OutputFile(counts_template),
        ),
        kwargs={
            'table_name':
            '/germline_allele_counts',
            'docker_config':
            helpers.get_container_ctx(config['containers'],
                                      'single_cell_pipeline')
        },
    )

    workflow.transform(
        name='build_results_file',
        func="biowrappers.components.io.hdf5.tasks.concatenate_tables",
        ctx=ctx,
        args=(
            [
                mgd.InputFile(counts_template),
                mgd.InputFile(mappability_filename),
                mgd.InputFile(normal_genotype_filename),
            ],
            pypeliner.managed.OutputFile(germline_h5_filename),
        ),
        kwargs={
            'drop_duplicates': True,
        })

    info_file = os.path.join(args["out_dir"], 'results', 'germline_calling',
                             "info.yaml")

    results = {
        'germline_data': helpers.format_file_yaml(germline_h5_filename),
    }

    input_datasets = {
        k: helpers.format_file_yaml(v)
        for k, v in bam_files.iteritems()
    }

    metadata = {
        'germline_calling': {
            'version': single_cell.__version__,
            'results': results,
            'containers': config['containers'],
            'input_datasets': input_datasets,
            'output_datasets': None
        }
    }

    workflow.transform(name='generate_meta_yaml',
                       ctx=dict(mem=config['memory']['med'],
                                pool_id=config['pools']['standard'],
                                mem_retry_increment=2,
                                ncpus=1),
                       func="single_cell.utils.helpers.write_to_yaml",
                       args=(mgd.OutputFile(info_file), metadata))

    return workflow
示例#3
0
def align_workflow(workflow, args):

    config = helpers.load_config(args)

    sampleinfo = helpers.get_sample_info(args['input_yaml'])

    cellids = helpers.get_samples(args['input_yaml'])
    bam_files, bai_files = helpers.get_bams(args['input_yaml'])

    lib = args["library_id"]

    outdir = os.path.join(args["out_dir"], "results", "alignment")

    info_file = os.path.join(outdir, "info.yaml")

    alignment_metrics_h5 = os.path.join(outdir,
                                        '{}_alignment_metrics.h5'.format(lib))

    plots_dir = os.path.join(outdir, 'plots')
    plot_metrics_output = os.path.join(plots_dir,
                                       '{}_plot_metrics.pdf'.format(lib))

    ctx = {'mem_retry_increment': 2, 'ncpus': 1}
    ctx.update(
        helpers.get_container_ctx(config['containers'],
                                  'single_cell_pipeline'))

    if not args["metrics_only"]:
        fastq1_files, fastq2_files = helpers.get_fastqs(args['input_yaml'])
        instrumentinfo = helpers.get_instrument_info(args['input_yaml'])
        centerinfo = helpers.get_center_info(args['input_yaml'])

        workflow.setobj(
            obj=mgd.OutputChunks('cell_id', 'lane'),
            value=fastq1_files.keys(),
        )

        workflow.subworkflow(
            name='alignment_workflow',
            func=align.create_alignment_workflow,
            args=(
                mgd.InputFile('fastq_1',
                              'cell_id',
                              'lane',
                              fnames=fastq1_files,
                              axes_origin=[]),
                mgd.InputFile('fastq_2',
                              'cell_id',
                              'lane',
                              fnames=fastq2_files,
                              axes_origin=[]),
                mgd.OutputFile('bam_markdups',
                               'cell_id',
                               fnames=bam_files,
                               axes_origin=[]),
                mgd.OutputFile('bai_markdups',
                               'cell_id',
                               fnames=bai_files,
                               axes_origin=[]),
                config['ref_genome'],
                config,
                args,
                instrumentinfo,
                centerinfo,
                sampleinfo,
                cellids,
            ),
        )
    else:
        workflow.setobj(
            obj=mgd.OutputChunks('cell_id'),
            value=cellids,
        )

    workflow.subworkflow(
        name='metrics_workflow',
        func=alignment_metrics.create_alignment_metrics_workflow,
        args=(
            mgd.InputFile('bam_markdups',
                          'cell_id',
                          fnames=bam_files,
                          axes_origin=[]),
            mgd.InputFile('bai_markdups',
                          'cell_id',
                          fnames=bai_files,
                          axes_origin=[]),
            mgd.OutputFile(alignment_metrics_h5),
            mgd.OutputFile(plot_metrics_output),
            config['ref_genome'],
            config,
            args,
            sampleinfo,
            cellids,
        ),
    )

    inputs = helpers.get_fastq_files(args["input_yaml"])
    outputs = {
        k: helpers.format_file_yaml(v)
        for k, v in bam_files.iteritems()
    }

    metadata = {
        'alignment': {
            'name': 'alignment',
            'cell_batch_realign': args["realign"],
            'metrics_table': '/alignment/metrics',
            'gc_metrics_table': '/alignment/gc_metrics',
            'aligner': config["aligner"],
            'adapter': config["adapter"],
            'adapter2': config["adapter2"],
            'picardtools_wgsmetrics_params': config['picard_wgs_params'],
            'ref_genome': config["ref_genome"],
            'version': single_cell.__version__,
            'containers': config['containers'],
            'output_datasets': outputs,
            'input_datasets': inputs,
            'results': {
                'alignment_metrics':
                helpers.format_file_yaml(alignment_metrics_h5),
                'alignment_plots':
                helpers.format_file_yaml(plot_metrics_output),
            },
        }
    }

    workflow.transform(name='generate_meta_yaml',
                       ctx=dict(mem=config['memory']['med'],
                                pool_id=config['pools']['standard'],
                                **ctx),
                       func="single_cell.utils.helpers.write_to_yaml",
                       args=(mgd.OutputFile(info_file), metadata))

    return workflow
示例#4
0
def create_variant_calling_workflow(
    tumour_cell_bams,
    tumour_region_bams,
    normal_region_bams,
    museq_vcf,
    strelka_snv_vcf,
    strelka_indel_vcf,
    snv_h5,
    config,
    raw_data_dir,
):
    workflow = pypeliner.workflow.Workflow()

    workflow.set_filenames('normal_regions.bam',
                           'region',
                           fnames=normal_region_bams)
    workflow.set_filenames('tumour_cells.bam',
                           'cell_id',
                           fnames=tumour_cell_bams)
    workflow.set_filenames('tumour_regions.bam',
                           'region',
                           fnames=tumour_region_bams)

    workflow.setobj(
        obj=mgd.OutputChunks('cell_id'),
        value=tumour_cell_bams.keys(),
    )

    workflow.setobj(
        obj=mgd.OutputChunks('region'),
        value=tumour_region_bams.keys(),
    )

    workflow.subworkflow(
        name='museq',
        func=mutationseq.create_museq_workflow,
        args=(
            mgd.InputFile('normal_regions.bam', 'region', extensions=['.bai']),
            mgd.InputFile('tumour_regions.bam', 'region', extensions=['.bai']),
            config['ref_genome'],
            mgd.OutputFile(museq_vcf),
            config,
        ),
    )

    workflow.subworkflow(name='strelka',
                         func=strelka.create_strelka_workflow,
                         args=(
                             mgd.InputFile('normal_regions.bam',
                                           'region',
                                           extensions=['.bai']),
                             mgd.InputFile('tumour_regions.bam',
                                           'region',
                                           extensions=['.bai']),
                             config['ref_genome'],
                             mgd.OutputFile(strelka_indel_vcf),
                             mgd.OutputFile(strelka_snv_vcf),
                             config,
                         ),
                         kwargs={"chromosomes": config["chromosomes"]})

    workflow.transform(
        name='convert_museq_to_hdf5',
        func="biowrappers.components.io.vcf.tasks.convert_vcf_to_hdf5",
        ctx=dict(mem=2, pool_id=config['pools']['standard'], **ctx),
        args=(
            mgd.InputFile(museq_vcf),
            mgd.TempOutputFile('museq.h5'),
            '/museq/vcf/',
        ),
        kwargs={
            'score_callback': museq_callback,
        })

    workflow.transform(
        name='convert_strelka_to_hdf5',
        func="biowrappers.components.io.vcf.tasks.convert_vcf_to_hdf5",
        ctx=dict(mem=2, pool_id=config['pools']['standard'], **ctx),
        args=(
            mgd.InputFile(strelka_snv_vcf),
            mgd.TempOutputFile('strelka_snv.h5'),
            '/strelka/vcf/',
        ),
        kwargs={
            'score_callback': strelka_snv_callback,
        })

    workflow.transform(name='merge_snvs',
                       func='biowrappers.components.io.vcf.tasks.merge_vcfs',
                       ctx=dict(mem=2,
                                pool_id=config['pools']['standard'],
                                **ctx),
                       args=([
                           mgd.InputFile(museq_vcf),
                           mgd.InputFile(strelka_snv_vcf),
                       ], mgd.TempOutputFile('all.snv.vcf')),
                       kwargs={
                           'docker_config':
                           helpers.get_container_ctx(config['containers'],
                                                     'vcftools')
                       })

    workflow.transform(name='finalise_snvs',
                       func="biowrappers.components.io.vcf.tasks.finalise_vcf",
                       ctx=dict(mem=2,
                                pool_id=config['pools']['standard'],
                                **ctx),
                       args=(mgd.TempInputFile('all.snv.vcf'),
                             mgd.TempOutputFile('all.snv.vcf.gz',
                                                extensions=['.tbi'])),
                       kwargs={
                           'docker_config':
                           helpers.get_container_ctx(config['containers'],
                                                     'vcftools')
                       })

    workflow.subworkflow(
        name='annotate_snvs',
        axes=(),
        func=
        "biowrappers.pipelines.snv_call_and_annotate.create_annotation_workflow",
        args=(
            config,
            mgd.TempInputFile('all.snv.vcf.gz'),
            mgd.TempOutputFile('snv_annotations.h5'),
            os.path.join(raw_data_dir, 'snv'),
        ),
        kwargs={
            'variant_type':
            'snv',
            'docker_config':
            helpers.get_container_ctx(config['containers'],
                                      'single_cell_pipeline')
        })

    workflow.subworkflow(
        name='count_alleles',
        func=create_snv_allele_counts_for_vcf_targets_workflow,
        args=(
            config,
            mgd.InputFile('tumour_cells.bam', 'cell_id', extensions=['.bai']),
            mgd.TempInputFile('all.snv.vcf.gz'),
            mgd.TempOutputFile('snv_counts.h5'),
        ),
        kwargs={
            'chromosomes':
            config['chromosomes'],
            'docker_config':
            helpers.get_container_ctx(config['containers'],
                                      'single_cell_pipeline')
        })

    workflow.transform(
        name='build_results_file',
        ctx=dict(mem=config['memory']['high'],
                 pool_id=config['pools']['highmem'],
                 **ctx),
        func="biowrappers.components.io.hdf5.tasks.concatenate_tables",
        args=(
            [
                mgd.TempInputFile('snv_counts.h5'),
                mgd.TempInputFile('snv_annotations.h5'),
                mgd.TempInputFile('museq.h5'),
                mgd.TempInputFile('strelka_snv.h5'),
            ],
            pypeliner.managed.OutputFile(snv_h5),
        ),
        kwargs={
            'drop_duplicates': True,
            'in_memory': False,
        })

    info_file = os.path.join(args["out_dir"], 'results', 'variant_calling',
                             "info.yaml")
    normals = {
        k: helpers.format_file_yaml(v)
        for k, v in normal_region_bams.iteritems()
    }
    tumours = {
        k: helpers.format_file_yaml(v)
        for k, v in tumour_region_bams.iteritems()
    }
    cells = {
        k: helpers.format_file_yaml(v)
        for k, v in tumour_cell_bams.iteritems()
    }
    inputs = {'normal': normals, 'tumour': tumours, 'cells': cells}

    metadata = {
        'variant_calling': {
            'name': 'variant_calling',
            'version': single_cell.__version__,
            'containers': config['containers'],
            'output_datasets': None,
            'input_datasets': inputs,
            'results': {
                'variant_calling_data': helpers.format_file_yaml(snv_h5)
            }
        }
    }

    workflow.transform(name='generate_meta_yaml',
                       ctx=dict(mem=config['memory']['med'],
                                pool_id=config['pools']['standard']),
                       func="single_cell.utils.helpers.write_to_yaml",
                       args=(mgd.OutputFile(info_file), metadata))

    return workflow
示例#5
0
def ltm_workflow(workflow, args):

    config = helpers.load_config(args)

    hmmcopy, timepoints = ltmutils.read_input_file(args['input_csv'])

    cn_matrix = os.path.join(args['out_dir'], 'cn_matrix.csv')
    output_gml = os.path.join(args['out_dir'], 'tree.gml')
    output_rooted_gml = os.path.join(args['out_dir'], 'rooted_tree.gml')

    # Outputs required for visualization with cellscape
    cnv_annots_csv = os.path.join(args['out_dir'], 'cnv_annots.csv')
    cnv_tree_edges_csv = os.path.join(args['out_dir'], 'cnv_tree_edges.csv')
    cnv_data_csv = os.path.join(args['out_dir'], 'cnv_data.csv')
    output_rmd = os.path.join(args['out_dir'], 'cellscape.Rmd')
    root_id_file = os.path.join(args['out_dir'], 'root_id.txt')

    workflow.setobj(
        obj=mgd.OutputChunks('timepoint'),
        value=timepoints,
    )

    workflow.subworkflow(
        name='ltm_scale',
        func=ltm.create_ltm_workflow,
        args=(
            mgd.InputFile('hmmcopy.h5', 'timepoint', fnames=hmmcopy),
            mgd.OutputFile(cn_matrix),
            mgd.OutputFile(output_gml),
            mgd.OutputFile(output_rooted_gml),
            mgd.OutputFile(cnv_annots_csv),
            mgd.OutputFile(cnv_tree_edges_csv),
            mgd.OutputFile(cnv_data_csv),
            mgd.OutputFile(output_rmd),
            config,
            args['root_id'],
            mgd.OutputFile(root_id_file),
            args['number_of_jobs'],
            args['ploidy'],
        ),
    )

    info_file = os.path.join(args["out_dir"], 'results', 'ltm', "info.yaml")

    results = {
        'ltm_cn_matrix': helpers.format_file_yaml(cn_matrix),
        'ltm_gml': helpers.format_file_yaml(output_gml),
        'ltm_rooted_gml': helpers.format_file_yaml(output_rooted_gml),
        'ltm_cnv_annots_csv': helpers.format_file_yaml(cnv_annots_csv),
        'ltm_cnv_tree_edges_csv': helpers.format_file_yaml(cnv_tree_edges_csv),
        'ltm_cnv_data_csv': helpers.format_file_yaml(cnv_data_csv),
        'ltm_output_rmd': helpers.format_file_yaml(output_rmd)
    }

    input_datasets = {
        k: helpers.format_file_yaml(v)
        for k, v in bam_file.iteritems()
    }

    metadata = {
        'LTM': {
            'chromosomes': config['chromosomes'],
            'ref_genome': config['ref_genome'],
            'cell_filters': config["good_cells"],
            'version': single_cell.__version__,
            'results': results,
            'containers': config['containers'],
            'input_datasets': input_datasets,
            'output_datasets': None
        }
    }

    workflow.transform(name='generate_meta_yaml',
                       ctx=dict(
                           mem=config['memory']['med'],
                           pool_id=config['pools']['standard'],
                       ),
                       func="single_cell.utils.helpers.write_to_yaml",
                       args=(mgd.OutputFile(info_file), metadata))

    return workflow
示例#6
0
def breakpoint_calling_workflow(workflow, args):

    config = helpers.load_config(args)

    normal_bam_file = args['matched_normal']
    bam_files, bai_files = helpers.get_bams(args['input_yaml'])

    varcalls_dir = os.path.join(args['out_dir'], 'results',
                                'breakpoint_calling')
    raw_data_directory = os.path.join(varcalls_dir, 'raw')
    breakpoints_filename = os.path.join(varcalls_dir, 'breakpoints.h5')
    ref_data_directory = '/refdata'

    pypeliner.workflow.Workflow()

    workflow.setobj(
        obj=mgd.OutputChunks('cell_id'),
        value=bam_files.keys(),
    )

    workflow.subworkflow(
        name='destruct',
        func=
        "biowrappers.components.breakpoint_calling.destruct.destruct_pipeline",
        args=(
            mgd.InputFile(normal_bam_file),
            mgd.InputFile('tumour.bam', 'cell_id', fnames=bam_files),
            config.get('destruct', {}),
            ref_data_directory,
            mgd.OutputFile(breakpoints_filename),
            raw_data_directory,
        ),
    )

    info_file = os.path.join(args["out_dir"], 'results', 'breakpoint_calling',
                             "info.yaml")

    results = {
        'destruct_data': helpers.format_file_yaml(breakpoints_filename),
    }

    input_datasets = {
        k: helpers.format_file_yaml(v)
        for k, v in bam_files.iteritems()
    }
    input_datasets = {'normal': normal_bam_file, 'tumour': input_datasets}

    metadata = {
        'breakpoint_calling': {
            'ref_data': ref_data_directory,
            'version': single_cell.__version__,
            'results': results,
            'containers': config['containers'],
            'input_datasets': input_datasets,
            'output_datasets': None
        }
    }

    workflow.transform(name='generate_meta_yaml',
                       ctx=dict(mem=config['memory']['med'],
                                pool_id=config['pools']['standard'],
                                mem_retry_increment=2,
                                ncpus=1),
                       func="single_cell.utils.helpers.write_to_yaml",
                       args=(mgd.OutputFile(info_file), metadata))

    return workflow
def copy_number_calling_workflow(workflow, args):

    config = helpers.load_config(args)

    ctx = {'mem_retry_increment': 2, 'ncpus': 1,
           'mem': config["memory"]['low'],
           'pool_id': config['pools']['standard']}
    docker_ctx = helpers.get_container_ctx(config['containers'], 'single_cell_pipeline')
    ctx.update(docker_ctx)

    tumour_bam_files, tumour_bai_files = helpers.get_bams(args['tumour_yaml'])

    normal_bam_files, normal_bai_files = helpers.get_bams(args['normal_yaml'])

    tumour_cellids = helpers.get_samples(args['tumour_yaml'])

    normal_cellids = helpers.get_samples(args['normal_yaml'])

    if set(tumour_bam_files.keys()) != set(tumour_cellids):
        raise ValueError()

    if set(normal_bam_files.keys()) != set(normal_cellids):
        raise ValueError()

    copynumber_dir = os.path.join(args["out_dir"], "copynumber")

    out_file = os.path.join(copynumber_dir, "results", "results.h5")

    cloneid = args["clone_id"]

    remixt_config = config['titan_params'].get('extract_seqdata', {})

    workflow.setobj(
        obj=mgd.OutputChunks('tumour_cell_id'),
        value=tumour_cellids,
    )

    workflow.setobj(
        obj=mgd.OutputChunks('normal_cell_id'),
        value=normal_cellids,
    )

    workflow.transform(
        name="get_snp_positions_filename",
        ctx=ctx,
        func="remixt.config.get_filename",
        ret=mgd.TempOutputObj('snp_positions_filename'),
        args=(
              remixt_config,
              config['titan_params']['ref_data_dir'],
              'snp_positions'
        )
    )

    workflow.transform(
        name="get_bam_max_fragment_length",
        ctx=ctx,
        func="remixt.config.get_param",
        ret=mgd.TempOutputObj('bam_max_fragment_length'),
        args=(
              remixt_config,
              'bam_max_fragment_length'
        )
    )

    workflow.transform(
        name="get_bam_max_soft_clipped",
        ctx=ctx,
        func="remixt.config.get_param",
        ret=mgd.TempOutputObj('bam_max_soft_clipped'),
        args=(
              remixt_config,
              'bam_max_soft_clipped'
        )
    )

    workflow.transform(
        name="get_bam_check_proper_pair",
        ctx=ctx,
        func="remixt.config.get_param",
        ret=mgd.TempOutputObj('bam_check_proper_pair'),
        args=(
              remixt_config,
              'bam_check_proper_pair'
        )
    )


    workflow.subworkflow(
        name="extract_seqdata_tumour",
        axes=('tumour_cell_id',),
        func=extract_seqdata.create_extract_seqdata_workflow,
        args=(
            mgd.InputFile(
                'bam_markdups',
                'tumour_cell_id',
                fnames=tumour_bam_files),
            mgd.InputFile(
                'bam_markdups_index',
                'tumour_cell_id',
                fnames=tumour_bai_files),
            mgd.TempOutputFile("tumour.h5", "tumour_cell_id"),
            config,
            config['titan_params'].get('extract_seqdata', {}),
            config['titan_params']['ref_data_dir'],
            mgd.TempInputObj('snp_positions_filename'),
            mgd.TempInputObj('bam_max_fragment_length'),
            mgd.TempInputObj('bam_max_soft_clipped'),
            mgd.TempInputObj('bam_check_proper_pair'),
        )
    )

    workflow.subworkflow(
        name="extract_seqdata_normal",
        axes=('normal_cell_id',),
        func=extract_seqdata.create_extract_seqdata_workflow,
        args=(
            mgd.InputFile(
                'bam_markdups',
                'normal_cell_id',
                fnames=normal_bam_files),
            mgd.InputFile(
                'bam_markdups_index',
                'normal_cell_id',
                fnames=normal_bai_files),
            mgd.TempOutputFile("normal.h5", "normal_cell_id"),
            config,
            config['titan_params'].get('extract_seqdata', {}),
            config['titan_params']['ref_data_dir'],
            mgd.TempInputObj('snp_positions_filename'),
            mgd.TempInputObj('bam_max_fragment_length'),
            mgd.TempInputObj('bam_max_soft_clipped'),
            mgd.TempInputObj('bam_check_proper_pair'),
        )
    )

    workflow.subworkflow(
        name='titan_workflow',
        func=titan.create_titan_workflow,
        args=(
            mgd.TempInputFile("normal.h5", "normal_cell_id"),
            mgd.TempInputFile("tumour.h5", "tumour_cell_id"),
            config['ref_genome'],
            copynumber_dir,
            out_file,
            config,
            args,
            tumour_cellids,
            normal_cellids,
            cloneid
        ),
    )

    info_file = os.path.join(args["out_dir"],'results','copynumber_calling', "info.yaml")

    results = {
        'copynumber_data': helpers.format_file_yaml(out_file),
    }

    tumours = {k: helpers.format_file_yaml(v) for k,v in tumour_bam_files.iteritems()}
    normals = {k: helpers.format_file_yaml(v) for k,v in normal_bam_files.iteritems()}
    input_datasets = {'tumour': tumours, 'normal': normals}

    metadata = {
        'copynumber_calling': {
            'chromosomes': config['chromosomes'],
            'ref_genome': config['ref_genome'],
            'version': single_cell.__version__,
            'results': results,
            'containers': config['containers'],
            'input_datasets': input_datasets,
            'output_datasets': None
        }
    }

    workflow.transform(
        name='generate_meta_yaml',
        ctx=dict(mem=config['memory']['med'],
                 pool_id=config['pools']['standard'],
                 mem_retry_increment=2, ncpus=1),
        func="single_cell.utils.helpers.write_to_yaml",
        args=(
            mgd.OutputFile(info_file),
            metadata
        )
    )

    return workflow
def merge_bams_workflow(workflow, args):

    input_yaml = args["input_yaml"]
    output_template = args["merged_bam_template"]

    info_file = os.path.join(args["out_dir"], 'results', 'merge_bams',
                             "info.yaml")
    config = helpers.load_config(args)
    bam_files, bai_files = helpers.get_bams(input_yaml)
    cellids = helpers.get_samples(input_yaml)

    wgs_bam_template = output_template
    wgs_bai_template = wgs_bam_template + ".bai"

    ctx = {'mem_retry_increment': 2, 'ncpus': 1}
    ctx.update(
        helpers.get_container_ctx(config['containers'],
                                  'single_cell_pipeline'))

    workflow.setobj(
        obj=mgd.OutputChunks('cell_id'),
        value=cellids,
    )

    workflow.transform(
        name="get_regions",
        ctx=dict(mem=2, pool_id=config['pools']['standard'], **ctx),
        func="single_cell.utils.pysamutils.get_regions_from_reference",
        ret=pypeliner.managed.TempOutputObj('region'),
        args=(
            config["ref_genome"],
            config["split_size"],
            config["chromosomes"],
        ))

    workflow.subworkflow(name="wgs_merge_workflow",
                         func=merge_bams.create_merge_bams_workflow,
                         args=(
                             mgd.InputFile('bam_markdups',
                                           'cell_id',
                                           fnames=bam_files,
                                           extensions=['.bai']),
                             mgd.OutputFile("merged_bam",
                                            "region",
                                            axes_origin=[],
                                            template=wgs_bam_template,
                                            extensions=['.bai']),
                             cellids,
                             config,
                             mgd.TempInputObj("region"),
                         ))

    workflow.transform(name="get_files",
                       ctx=dict(mem=2,
                                pool_id=config['pools']['standard'],
                                **ctx),
                       func='single_cell.utils.helpers.resolve_template',
                       ret=pypeliner.managed.TempOutputObj('outputs'),
                       args=(pypeliner.managed.TempInputObj('region'),
                             wgs_bam_template, 'region'))

    inputs = {k: helpers.format_file_yaml(v) for k, v in bam_files.iteritems()}

    metadata = {
        'merge_bams': {
            'name': 'merge_bams',
            'ref_genome': config["ref_genome"],
            'version': single_cell.__version__,
            'containers': config['containers'],
            'output_datasets': pypeliner.managed.TempInputObj('outputs'),
            'input_datasets': inputs,
            'results': None
        }
    }

    workflow.transform(name='generate_meta_yaml',
                       ctx=dict(mem=2,
                                pool_id=config['pools']['standard'],
                                **ctx),
                       func="single_cell.utils.helpers.write_to_yaml",
                       args=(mgd.OutputFile(info_file), metadata))

    return workflow
示例#9
0
def infer_haps_workflow(workflow, args):

    config = helpers.load_config(args)
    remixt_config = config['titan_params'].get('extract_seqdata', {})

    singlecellimage = config['docker']['images']['single_cell_pipeline']
    ctx = {
        'mem_retry_increment': 2,
        'ncpus': 1,
        'image': singlecellimage['image'],
        'dockerize': config['docker']['dockerize'],
        'mounts': config['docker']['mounts'],
        'username': singlecellimage['username'],
        'password': singlecellimage['password'],
        'server': singlecellimage['server'],
    }

    haps_dir = os.path.join(args["out_dir"], "infer_haps")

    haplotypes_filename = os.path.join(haps_dir, "results", "haplotypes.tsv")
    allele_counts_filename = os.path.join(haps_dir, "results",
                                          "allele_counts.tsv")

    snp_positions_filename = remixt.config.get_filename(
        config, ref_data_dir, 'snp_positions')
    bam_max_fragment_length = remixt.config.get_param(
        config, 'bam_max_fragment_length')
    bam_max_soft_clipped = remixt.config.get_param(config,
                                                   'bam_max_soft_clipped')
    bam_check_proper_pair = remixt.config.get_param(config,
                                                    'bam_check_proper_pair')

    workflow.setobj(obj=mgd.OutputChunks('chromosome'),
                    value=config['titan_params']['chromosomes'])

    if args['input_yaml']:
        bam_files, bai_files = helpers.get_bams(args['input_yaml'])
        cellids = helpers.get_samples(args['input_yaml'])

        workflow.setobj(
            obj=mgd.OutputChunks('cell_id'),
            value=cellids,
        )

        workflow.subworkflow(
            name="extract_seqdata",
            axes=('cell_id', ),
            func=extract_seqdata.create_extract_seqdata_workflow,
            args=(
                mgd.InputFile('bam_markdups', 'cell_id', fnames=bam_files),
                mgd.InputFile('bam_markdups_index',
                              'cell_id',
                              fnames=bai_files),
                mgd.TempOutputFile("tumour.h5", "cell_id"),
                config,
                config['titan_params'].get('extract_seqdata', {}),
                config['titan_params']['ref_data_dir'],
                snp_positions_filename,
                bam_max_fragment_length,
                bam_max_soft_clipped,
                bam_check_proper_pair,
            ))

        workflow.transform(
            name='merge_all_seqdata',
            ctx=dict(mem=config["memory"]['high'],
                     pool_id=config['pools']['highmem'],
                     **ctx),
            func="single_cell.workflows.titan.tasks.merge_overlapping_seqdata",
            args=(mgd.TempOutputFile("seqdata_normal_all_cells_merged.h5"),
                  mgd.TempInputFile("tumour.h5", "cell_id"),
                  config["titan_params"]["chromosomes"]),
        )
    else:
        workflow.subworkflow(
            name="extract_seqdata",
            func=extract_seqdata.create_extract_seqdata_workflow,
            args=(
                mgd.InputFile(args['input_bam']),
                mgd.InputFile(args['input_bam'] + '.bai'),
                mgd.TempOutputFile("seqdata_normal_all_cells_merged.h5"),
                config,
                config['titan_params'].get('extract_seqdata', {}),
                config['titan_params']['ref_data_dir'],
                snp_positions_filename,
                bam_max_fragment_length,
                bam_max_soft_clipped,
                bam_check_proper_pair,
            ),
            kwargs={'multiprocess': True})

    if args["normal"]:
        workflow.transform(
            name='infer_snp_genotype',
            axes=('chromosome', ),
            ctx={'mem': 16},
            func='remixt.analysis.haplotype.infer_snp_genotype_from_normal',
            args=(
                mgd.TempOutputFile('snp_genotype.tsv', 'chromosome'),
                mgd.TempInputFile("seqdata_normal_all_cells_merged.h5"),
                mgd.InputInstance('chromosome'),
                config,
            ),
        )
    else:
        workflow.transform(
            name='infer_snp_genotype',
            axes=('chromosome', ),
            ctx={'mem': 16},
            func='remixt.analysis.haplotype.infer_snp_genotype_from_tumour',
            args=(
                mgd.TempOutputFile('snp_genotype.tsv', 'chromosome'),
                {
                    'sample':
                    mgd.TempInputFile("seqdata_normal_all_cells_merged.h5")
                },
                mgd.InputInstance('chromosome'),
                config,
            ),
        )

    workflow.transform(name='infer_haps',
                       axes=('chromosome', ),
                       ctx={'mem': 16},
                       func='remixt.analysis.haplotype.infer_haps',
                       args=(
                           mgd.TempOutputFile('haps.tsv', 'chromosome'),
                           mgd.TempInputFile('snp_genotype.tsv', 'chromosome'),
                           mgd.InputInstance('chromosome'),
                           mgd.TempSpace('haplotyping', 'chromosome'),
                           config,
                           config['titan_params']['ref_data_dir'],
                       ))

    workflow.transform(name='merge_haps',
                       ctx={'mem': 16},
                       func='remixt.utils.merge_tables',
                       args=(
                           mgd.OutputFile(haplotypes_filename),
                           mgd.TempInputFile('haps.tsv', 'chromosome'),
                       ))

    workflow.transform(
        name='create_segments',
        func='remixt.analysis.segment.create_segments',
        args=(
            mgd.TempOutputFile('segments.tsv'),
            config,
            config['titan_params']['ref_data_dir'],
        ),
    )

    workflow.transform(
        name='haplotype_allele_readcount',
        ctx={'mem': 20},
        func='remixt.analysis.readcount.haplotype_allele_readcount',
        args=(
            mgd.OutputFile(allele_counts_filename),
            mgd.TempInputFile('segments.tsv'),
            mgd.TempInputFile('tumour.h5', 'cell_id'),
            mgd.InputFile(haplotypes_filename),
            config,
        ),
    )

    info_file = os.path.join(args["out_dir"], 'results', 'infer_haps',
                             "info.yaml")

    results = {
        'infer_haps_allele_counts':
        helpers.format_file_yaml(allele_counts_filename),
        'infer_haps_data':
        helpers.format_file_yaml(haplotypes_filename),
    }

    if args['input_yaml']:
        input_datasets = {
            k: helpers.format_file_yaml(v)
            for k, v in bam_files.iteritems()
        }
    else:
        input_datasets = helpers.format_file_yaml(args['input_bam'])

    metadata = {
        'infer_haps': {
            'version': single_cell.__version__,
            'results': results,
            'containers': config['containers'],
            'input_datasets': input_datasets,
            'output_datasets': None
        }
    }

    workflow.transform(name='generate_meta_yaml',
                       ctx=dict(mem=config['memory']['med'],
                                pool_id=config['pools']['standard'],
                                mem_retry_increment=2,
                                ncpus=1),
                       func="single_cell.utils.helpers.write_to_yaml",
                       args=(mgd.OutputFile(info_file), metadata))

    return workflow