Beispiel #1
0
    def __init__(self,
                 sampledata,
                 refdata,
                 job_params,
                 outdir,
                 libdir,
                 umi,
                 maxcores=1,
                 scratch="/scratch/tmp/tmp",
                 **kwargs):
        ClinseqPipeline.__init__(self, sampledata, refdata, job_params, outdir,
                                 libdir, umi, maxcores, scratch, **kwargs)

        # Set the min alt frac value:
        self.default_job_params["vardict-min-alt-frac"] = 0.01
        self.default_job_params["vardict-min-num-reads"] = None
        self.default_job_params[
            "vep-additional-options"] = " --pick --filter_common "

        # Remove clinseq barcodes for which data is not available:
        self.check_sampledata()

        if umi:
            # Configure the umi processes from fastq to bam file:
            self.configure_umi_processing()
        else:
            # Configure alignment and merging of fastq data for all clinseq barcodes:
            self.configure_align_and_merge()

        # Configure all panel analyses:
        self.configure_panel_analyses()

        # Configure liqbio-specific panel analyses:
        self.configure_panel_analyses_liqbio(umi)

        # Configure additional msings analysis:
        self.configure_panel_msings_analyses()

        # Configure QC of all panel data:
        self.configure_all_panel_qcs()

        # Configure fastq QCs:
        self.configure_fastq_qcs()

        # Configure the low-pass whole genome analysis:
        self.configure_lowpass_analyses()

        # Configure low-pass whole genome data QC:
        self.configure_all_lowpass_qcs()

        # Configure MultiQC:
        self.configure_multi_qc()
Beispiel #2
0
    def __init__(self,
                 sampledata,
                 refdata,
                 job_params,
                 outdir,
                 libdir,
                 maxcores=1,
                 scratch="/scratch/tmp/tmp/",
                 referral_db_conf="tests/referrals/referral-db-config.json",
                 addresses="tests/referrals/addresses.csv",
                 **kwargs):
        ClinseqPipeline.__init__(self, sampledata, refdata, job_params, outdir,
                                 libdir, maxcores, scratch, **kwargs)

        self.referral_db_conf = referral_db_conf
        self.addresses = addresses
        self.default_job_params["vardict-min-num-reads"] = 6
        self.default_job_params["create_alascca_report"] = True

        # Check to ensure that the sample data is valid for an ALASCCA analysis:
        self.validate_sample_data_for_alascca()

        # Remove sample capture items for which data is not available:
        self.check_sampledata()

        # Configure alignment and merging of fastq data for all clinseq barcodes:
        self.configure_align_and_merge()

        # Configure all panel analyses:
        self.configure_panel_analyses()

        # Configure QC of all panel data:
        self.configure_all_panel_qcs()

        # Configure ALASCCA report generation:
        self.configure_alascca_specific_analysis()

        # Configure fastq QCs:
        self.configure_fastq_qcs()

        # Configure MultiQC:
        self.configure_multi_qc()
Beispiel #3
0
    def __init__(self, sampledata, refdata, job_params, outdir, libdir, umi, maxcores=1, scratch="/scratch/tmp/tmp",
                 **kwargs):
        ClinseqPipeline.__init__(self, sampledata, refdata, job_params, outdir, libdir, umi,
                                 maxcores, scratch, **kwargs)

        # Set the min alt frac value:
        self.default_job_params["vardict-min-alt-frac"] = 0.01
        self.default_job_params["vardict-min-num-reads"] = None
        self.default_job_params["vep-additional-options"] = " --pick --filter_common "

        #Set initial data
        self.sampledata = sampledata
        self.refdata = refdata
        self.job_params = job_params
        self.outdir = outdir
        self.libdir = libdir
        self.umi = umi
        self.maxcores = maxcores
        self.scratch = scratch
        self.kwargs = kwargs
        self.somatic_merge_vcf = defaultdict(dict)

        #Below dictionary will set the steps to run aws batch job with docker image (key: docker image name , value: function to add job).
        self.step_to_run = {
            "qc": self.qc_step,   #docker: base
            "alignment": self.alignment_step,   #docker: aligner
            "cnvkit": self.cnvkit_step,         #docker: variants
            "germline_variant": self.germline_variant_step, #docker: variants
            "somatic_vardict": self.somatic_variant_vardict_step,  #docker: variants # tested working
            "somatic_strelka": self.somatic_variant_strelka_step,   #docker: variants #tested working
            "somatic_mutect2": self.somatic_variant_mutect2_step,   #docker: variants #tested working
            "somatic_varscan": self.somatic_variant_varscan_step,   #docker: variants #tested working
            "somatic_variant_merge": self.somatic_variant_merge_step, #docker: somaticseq #tested working
            "vep": self.vep_step, #docker : vep #tested
            "msi" : self.msi_sensor_step
        }