Example #1
0
    def _get_test_dna_alignments(job, sample_type):
        """
        Get the test bam and bai from s3

        :return: FSID for the bam and bai
        """
        assert sample_type in ('tumor_dna', 'normal_dna')
        bamfile = sample_type + '_fix_pg_sorted.bam'
        base_call = 's3am download s3://cgl-protect-data/unit_inputs/'
        final_call = base_call + sample_type + '.tar.gz ' + sample_type + '.tar.gz'
        subprocess.check_call(final_call.split(' '))
        untargz(sample_type + '.tar.gz', os.getcwd())
        return {bamfile: job.fileStore.writeGlobalFile(sample_type + '/' + bamfile),
                bamfile + '.bai': job.fileStore.writeGlobalFile(sample_type + '/' + bamfile +
                                                                '.bai')}
Example #2
0
    def _get_test_rna_alignments(job):
        """
        Get the test bam and bai from s3

        :return: FSID for the bam and bai
        """
        sample_type = 'rna'
        bamfile = sample_type + '_fix_pg_sorted.bam'
        base_call = 's3am download s3://cgl-protect-data/unit_inputs/'
        final_call = base_call + sample_type + '.tar.gz ' + sample_type + '.tar.gz'
        subprocess.check_call(final_call.split(' '))
        untargz(sample_type + '.tar.gz', os.getcwd())
        return {'rnaAligned.sortedByCoord.out.bam': {
            bamfile: job.fileStore.writeGlobalFile(sample_type + '/' + bamfile),
            bamfile + '.bai': job.fileStore.writeGlobalFile(sample_type + '/' + bamfile +
                                                            '.bai')}}
Example #3
0
def run_pileup(job, tumor_bam, univ_options, somaticsniper_options):
    """
    Runs a samtools pileup on the tumor bam.

    :param toil.Job job: job
    :param dict tumor_bam: Tumor bam file
    :param dict univ_options: Universal Options
    :returns: jsID for the chromsome pileup file
    :rtype: str
    """
    job.fileStore.logToMaster(
        'Running samtools pileup on %s' % univ_options['patient'])
    work_dir = os.getcwd()
    input_files = {
        'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
        'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
        'genome.fa.tar.gz': somaticsniper_options['genome_fasta'],
        'genome.fa.fai.tar.gz': somaticsniper_options['genome_fai']}
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)

    for key in ('genome.fa', 'genome.fa.fai'):
        input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    parameters = ['pileup',
                  '-cvi',
                  '-f', docker_path(input_files['genome.fa']),
                  docker_path(input_files['tumor.bam'])]

    with open(os.path.join(work_dir, 'pileup.txt'), 'w') as pileup_file:
        docker_call(tool='samtools:0.1.8', tool_parameters=parameters, work_dir=work_dir,
                    dockerhub=univ_options['dockerhub'], outfile=pileup_file)
    outfile = job.fileStore.writeGlobalFile(pileup_file.name)
    return outfile
Example #4
0
def run_pileup(job, tumor_bam, univ_options, somaticsniper_options):
    """
    Runs a samtools pileup on the tumor bam.

    :param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
    :param dict univ_options: Dict of universal options used by almost all tools
    :param dict somaticsniper_options: Options specific to SomaticSniper
    :return: fsID for the pileup file
    :rtype: toil.fileStore.FileID
    """
    work_dir = os.getcwd()
    input_files = {
        'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
        'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
        'genome.fa.tar.gz': somaticsniper_options['genome_fasta'],
        'genome.fa.fai.tar.gz': somaticsniper_options['genome_fai']}
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)

    for key in ('genome.fa', 'genome.fa.fai'):
        input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in list(input_files.items())}

    parameters = ['pileup',
                  '-cvi',
                  '-f', docker_path(input_files['genome.fa']),
                  docker_path(input_files['tumor.bam'])]

    with open(os.path.join(work_dir, 'pileup.txt'), 'w') as pileup_file:
        docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir,
                    dockerhub=univ_options['dockerhub'], outfile=pileup_file,
                    tool_version=somaticsniper_options['samtools']['version'])
    outfile = job.fileStore.writeGlobalFile(pileup_file.name)
    job.fileStore.logToMaster('Ran samtools pileup on %s successfully' % univ_options['patient'])
    return outfile
Example #5
0
def run_snpeff(job, merged_mutation_file, univ_options, snpeff_options):
    """
    Run snpeff on an input vcf.

    :param toil.fileStore.FileID merged_mutation_file: fsID for input vcf
    :param dict univ_options: Dict of universal options used by almost all tools
    :param dict snpeff_options: Options specific to snpeff
    :return: fsID for the snpeffed vcf
    :rtype: toil.fileStore.FileID
    """
    work_dir = os.getcwd()
    input_files = {
        'merged_mutations.vcf': merged_mutation_file,
        'snpeff_index.tar.gz': snpeff_options['index']
    }
    input_files = get_files_from_filestore(job,
                                           input_files,
                                           work_dir,
                                           docker=False)
    input_files['snpeff_index'] = untargz(input_files['snpeff_index.tar.gz'],
                                          work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    parameters = [
        'eff',
        '-dataDir',
        input_files['snpeff_index'],
        '-c',
        '/'.join([
            input_files['snpeff_index'],
            'snpEff_' + univ_options['ref'] + '_gencode.config'
        ]),
        '-no-intergenic',
        '-no-downstream',
        '-no-upstream',
        # '-canon',
        '-noStats',
        univ_options['ref'] + '_gencode',
        input_files['merged_mutations.vcf']
    ]
    xmx = snpeff_options['java_Xmx'] if snpeff_options[
        'java_Xmx'] else univ_options['java_Xmx']
    with open('/'.join([work_dir, 'mutations.vcf']), 'w') as snpeff_file:
        docker_call(tool='snpeff',
                    tool_parameters=parameters,
                    work_dir=work_dir,
                    dockerhub=univ_options['dockerhub'],
                    java_xmx=xmx,
                    outfile=snpeff_file,
                    tool_version=snpeff_options['version'])
    output_file = job.fileStore.writeGlobalFile(snpeff_file.name)
    export_results(job,
                   output_file,
                   snpeff_file.name,
                   univ_options,
                   subfolder='mutations/snpeffed')

    job.fileStore.logToMaster('Ran snpeff on %s successfully' %
                              univ_options['patient'])
    return output_file
Example #6
0
def unmerge(job, input_vcf, tool_name, chromosomes, tool_options,
            univ_options):
    """
    Un-merge a vcf file into per-chromosome vcfs.

    :param str input_vcf: Input vcf
    :param str tool_name: The name of the mutation caller
    :param list chromosomes: List of chromosomes to retain
    :param dict tool_options: Options specific to the mutation caller
    :param dict univ_options: Dict of universal options used by almost all tools
    :return: dict of fsIDs, one for each chromosomal vcf
    :rtype: dict
    """
    work_dir = os.getcwd()
    input_files = {
        'input.vcf': input_vcf,
        'genome.fa.fai.tar.gz': tool_options['genome_fai']
    }
    input_files = get_files_from_filestore(job,
                                           input_files,
                                           work_dir,
                                           docker=False)

    input_files['genome.fa.fai'] = untargz(input_files['genome.fa.fai.tar.gz'],
                                           work_dir)

    read_chromosomes = defaultdict()
    with open(input_files['input.vcf'], 'r') as in_vcf:
        header = []
        for line in in_vcf:
            if line.startswith('#'):
                header.append(line)
                continue
            line = line.strip()
            chrom = line.split()[0]
            if chrom in read_chromosomes:
                print(line, file=read_chromosomes[chrom])
            else:
                read_chromosomes[chrom] = open(
                    os.path.join(os.getcwd(), chrom + '.vcf'), 'w')
                print(''.join(header), file=read_chromosomes[chrom], end='')
                print(line, file=read_chromosomes[chrom])
    # Process chromosomes that had no mutations
    for chrom in set(chromosomes).difference(set(read_chromosomes.keys())):
        read_chromosomes[chrom] = open(
            os.path.join(os.getcwd(), chrom + '.vcf'), 'w')
        print(''.join(header), file=read_chromosomes[chrom], end='')
    outdict = {}
    chroms = set(chromosomes).intersection(set(read_chromosomes.keys()))
    for chrom, chromvcf in read_chromosomes.items():
        chromvcf.close()
        if chrom not in chroms:
            continue
        outdict[chrom] = job.fileStore.writeGlobalFile(chromvcf.name)
        export_results(job,
                       outdict[chrom],
                       chromvcf.name,
                       univ_options,
                       subfolder='mutations/' + tool_name)
    return outdict
Example #7
0
def run_phlat(job, fastqs, sample_type, univ_options, phlat_options):
    """
    Run PHLAT on a pair of input fastqs of type `sample_type`.

    :param list fastqs: List of input fastq files
    :param str sample_type: Description of the sample type to inject into the file name.
    :param dict univ_options: Dict of universal options used by almost all tools
    :param dict phlat_options: Options specific to PHLAT
    :return: fsID for the HLA haplotype called from teh input fastqs
    :rtype: toil.fileStore.FileID
    """
    job.fileStore.logToMaster('Running phlat on %s:%s' %
                              (univ_options['patient'], sample_type))
    work_dir = os.getcwd()
    input_files = {
        'input_1.fastq': fastqs[0],
        'input_2.fastq': fastqs[1],
        'phlat_index.tar.gz': phlat_options['index']
    }
    input_files = get_files_from_filestore(job,
                                           input_files,
                                           work_dir,
                                           docker=False)
    # Handle gzipped files
    gz = '.gz' if is_gzipfile(input_files['input_1.fastq']) else ''
    if gz:
        for read_file in 'input_1.fastq', 'input_2.fastq':
            os.symlink(read_file, read_file + gz)
            input_files[read_file + gz] = input_files[read_file] + gz
    # Untar the index
    input_files['phlat_index'] = untargz(input_files['phlat_index.tar.gz'],
                                         work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    parameters = [
        '-1',
        input_files['input_1.fastq' + gz],
        '-2',
        input_files['input_2.fastq' + gz],
        '-index',
        input_files['phlat_index'],
        '-b2url',
        '/usr/local/bin/bowtie2',
        '-tag',
        sample_type,
        '-e',
        '/home/phlat-1.0',  # Phlat directory home
        '-o',
        '/data',  # Output directory
        '-p',
        str(phlat_options['n'])
    ]  # Number of threads
    docker_call(tool='phlat',
                tool_parameters=parameters,
                work_dir=work_dir,
                dockerhub=univ_options['dockerhub'],
                tool_version=phlat_options['version'])
    output_file = job.fileStore.writeGlobalFile(''.join(
        [work_dir, '/', sample_type, '_HLA.sum']))
    return output_file
    def _get_test_rsem_file(job, test_src_folder):
        """
        Get the test rsem file and write to jobstore

        :return: FSID for the rsem file
        """
        rsem_file = os.path.join(test_src_folder, 'test_inputs/test_rsem_quant.tsv.tar.gz')
        rsem_file = untargz(rsem_file, os.getcwd())
        return job.fileStore.writeGlobalFile(rsem_file)
Example #9
0
    def _get_test_haplotype_file(job, test_src_folder):
        """
        Get the test haplotype file and write to jobstore

        :return: FSID for the MHC file
        """
        rna_haplotype = os.path.join(test_src_folder, 'test_inputs/test_mhc_haplotype.sum.tar.gz')
        rna_haplotype = untargz(rna_haplotype, os.getcwd())
        return job.fileStore.writeGlobalFile(rna_haplotype)
Example #10
0
def run_filter_radia(job, bams, radia_file, univ_options, radia_options, chrom):
    """
    This module will run filterradia on the RNA and DNA bams.

    ARGUMENTS
    1. bams: REFER ARGUMENTS of run_radia()
    2. univ_options: REFER ARGUMENTS of run_radia()
    3. radia_file: <JSid of vcf generated by run_radia()>
    3. radia_options: REFER ARGUMENTS of run_radia()
    4. chrom: REFER ARGUMENTS of run_radia()

    RETURN VALUES
    1. output_file: <JSid of radia_filtered_CHROM.vcf>
    """
    job.fileStore.logToMaster('Running filter-radia on %s:%s' % (univ_options['patient'], chrom))
    work_dir = os.getcwd()
    input_files = {
        'rna.bam': bams['tumor_rna'],
        'rna.bam.bai': bams['tumor_rnai'],
        'tumor.bam': bams['tumor_dna'],
        'tumor.bam.bai': bams['tumor_dnai'],
        'normal.bam': bams['normal_dna'],
        'normal.bam.bai': bams['normal_dnai'],
        'radia.vcf': radia_file,
        'genome.fa.tar.gz': radia_options['genome_fasta'],
        'genome.fa.fai.tar.gz': radia_options['genome_fai']}
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)

    for key in ('genome.fa', 'genome.fa.fai'):
        input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    filterradia_log = ''.join([work_dir, '/radia_filtered_', chrom, '_radia.log'])
    parameters = [univ_options['patient'],  # shortID
                  chrom.lstrip('chr'),
                  input_files['radia.vcf'],
                  '/data',
                  '/home/radia/scripts',
                  '-d', '/home/radia/data/hg19/snp135',
                  '-r', '/home/radia/data/hg19/retroGenes/',
                  '-p', '/home/radia/data/hg19/pseudoGenes/',
                  '-c', '/home/radia/data/hg19/cosmic/',
                  '-t', '/home/radia/data/hg19/gaf/2_1',
                  '--noSnpEff',
                  '--noBlacklist',
                  '--noTargets',
                  '--noRnaBlacklist',
                  '-f', input_files['genome.fa'],
                  '--log=INFO',
                  '-g', docker_path(filterradia_log)]
    docker_call(tool='filterradia', tool_parameters=parameters,
                work_dir=work_dir, dockerhub=univ_options['dockerhub'])
    output_file = ''.join([work_dir, '/', chrom, '.vcf'])
    os.rename(''.join([work_dir, '/', univ_options['patient'], '_', chrom, '.vcf']), output_file)
    export_results(job, output_file, univ_options, subfolder='mutations/radia')
    output_file = job.fileStore.writeGlobalFile(output_file)
    return output_file
    def _get_test_haplotype_file(job, test_src_folder):
        """
        Get the test haplotype file and write to jobstore

        :return: FSID for the MHC file
        """
        rna_haplotype = os.path.join(test_src_folder, 'test_inputs/test_mhc_haplotype.sum.tar.gz')
        rna_haplotype = untargz(rna_haplotype, os.getcwd())
        return job.fileStore.writeGlobalFile(rna_haplotype)
Example #12
0
def run_mutect_perchrom(job, tumor_bam, normal_bam, univ_options, mutect_options, chrom):
    """
    Run MuTect call on a single chromosome in the input bams.

    :param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
    :param dict normal_bam: Dict of bam and bai for normal DNA-Seq
    :param dict univ_options: Dict of universal options used by almost all tools
    :param dict mutect_options: Options specific to MuTect
    :param str chrom: Chromosome to process
    :return: fsID for the chromsome vcf
    :rtype: toil.fileStore.FileID
    """
    work_dir = os.getcwd()
    input_files = {
        'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
        'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
        'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
        'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
        'genome.fa.tar.gz': mutect_options['genome_fasta'],
        'genome.fa.fai.tar.gz': mutect_options['genome_fai'],
        'genome.dict.tar.gz': mutect_options['genome_dict'],
        'cosmic.vcf.tar.gz': mutect_options['cosmic_vcf'],
        'cosmic.vcf.idx.tar.gz': mutect_options['cosmic_idx'],
        'dbsnp.vcf.gz': mutect_options['dbsnp_vcf'],
        'dbsnp.vcf.idx.tar.gz': mutect_options['dbsnp_idx']}
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
    # dbsnp.vcf should be bgzipped, but all others should be tar.gz'd
    input_files['dbsnp.vcf'] = gunzip(input_files['dbsnp.vcf.gz'])
    for key in ('genome.fa', 'genome.fa.fai', 'genome.dict', 'cosmic.vcf', 'cosmic.vcf.idx',
                'dbsnp.vcf.idx'):
        input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    mutout = ''.join([work_dir, '/', chrom, '.out'])
    mutvcf = ''.join([work_dir, '/', chrom, '.vcf'])
    parameters = ['-R', input_files['genome.fa'],
                  '--cosmic', input_files['cosmic.vcf'],
                  '--dbsnp', input_files['dbsnp.vcf'],
                  '--input_file:normal', input_files['normal.bam'],
                  '--input_file:tumor', input_files['tumor.bam'],
                  # '--tumor_lod', str(10),
                  # '--initial_tumor_lod', str(4.0),
                  '-L', chrom,
                  '--out', docker_path(mutout),
                  '--vcf', docker_path(mutvcf)
                  ]
    java_xmx = mutect_options['java_Xmx'] if mutect_options['java_Xmx'] \
        else univ_options['java_Xmx']
    docker_call(tool='mutect', tool_parameters=parameters, work_dir=work_dir,
                dockerhub=univ_options['dockerhub'], java_xmx=java_xmx,
                tool_version=mutect_options['version'])
    output_file = job.fileStore.writeGlobalFile(mutvcf)
    export_results(job, output_file, mutvcf, univ_options, subfolder='mutations/mutect')

    job.fileStore.logToMaster('Ran MuTect on %s:%s successfully' % (univ_options['patient'], chrom))
    return output_file
    def _get_test_rsem_file(job, test_src_folder):
        """
        Get the test rsem file and write to jobstore

        :return: FSID for the rsem file
        """
        rsem_file = os.path.join(test_src_folder,
                                 'test_inputs/test_rsem_quant.tsv.tar.gz')
        rsem_file = untargz(rsem_file, os.getcwd())
        return job.fileStore.writeGlobalFile(rsem_file)
Example #14
0
def run_bwa(job, fastqs, sample_type, univ_options, bwa_options):
    """
    Align a pair of fastqs with bwa.

    :param list fastqs: The input fastqs for alignment
    :param str sample_type: Description of the sample to inject into the filename
    :param dict univ_options: Dict of universal options used by almost all tools
    :param dict bwa_options: Options specific to bwa
    :return: fsID for the generated sam
    :rtype: toil.fileStore.FileID
    """
    work_dir = os.getcwd()
    input_files = {
        'dna_1.fastq': fastqs[0],
        'dna_2.fastq': fastqs[1],
        'bwa_index.tar.gz': bwa_options['index']
    }
    input_files = get_files_from_filestore(job,
                                           input_files,
                                           work_dir,
                                           docker=False)
    # Handle gzipped file
    gz = '.gz' if is_gzipfile(input_files['dna_1.fastq']) else ''
    if gz:
        for read_file in 'dna_1.fastq', 'dna_2.fastq':
            os.symlink(read_file, read_file + gz)
            input_files[read_file + gz] = input_files[read_file] + gz
    # Untar the index
    input_files['bwa_index'] = untargz(input_files['bwa_index.tar.gz'],
                                       work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    parameters = [
        'mem',
        '-t',
        str(bwa_options['n']),
        '-v',
        '1',  # Don't print INFO messages to the stderr
        '/'.join([input_files['bwa_index'], univ_options['ref']]),
        input_files['dna_1.fastq' + gz],
        input_files['dna_2.fastq' + gz]
    ]
    with open(''.join([work_dir, '/', sample_type, '.sam']), 'w') as samfile:
        docker_call(tool='bwa',
                    tool_parameters=parameters,
                    work_dir=work_dir,
                    dockerhub=univ_options['dockerhub'],
                    outfile=samfile,
                    tool_version=bwa_options['version'])
    # samfile.name retains the path info
    output_file = job.fileStore.writeGlobalFile(samfile.name)

    job.fileStore.logToMaster('Ran bwa on %s:%s successfully' %
                              (univ_options['patient'], sample_type))
    return output_file
Example #15
0
def run_strelka_full(job, tumor_bam, normal_bam, univ_options,
                     strelka_options):
    """
    Run strelka on the DNA bams.

    :param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
    :param dict normal_bam: Dict of bam and bai for normal DNA-Seq
    :param dict univ_options: Dict of universal options used by almost all tools
    :param dict strelka_options: Options specific to strelka
    :return: Dict of fsIDs snv and indel prediction files
             output_dict:
                 |-'snvs': fsID
                 +-'indels': fsID
    :rtype: dict
    """
    work_dir = os.getcwd()
    input_files = {
        'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
        'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
        'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
        'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
        'genome.fa.tar.gz': strelka_options['genome_fasta'],
        'genome.fa.fai.tar.gz': strelka_options['genome_fai'],
        'config.ini.tar.gz': strelka_options['config_file']
    }
    input_files = get_files_from_filestore(job,
                                           input_files,
                                           work_dir,
                                           docker=False)

    for key in ('genome.fa', 'genome.fa.fai', 'config.ini'):
        input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
    input_files = {
        key: docker_path(path)
        for key, path in list(input_files.items())
    }

    parameters = [
        input_files['config.ini'], input_files['tumor.bam'],
        input_files['normal.bam'], input_files['genome.fa'],
        str(job.cores)
    ]
    docker_call(tool='strelka',
                tool_parameters=parameters,
                work_dir=work_dir,
                dockerhub=univ_options['dockerhub'],
                tool_version=strelka_options['version'])
    output_dict = {}
    for mutation_type in ['snvs', 'indels']:
        output_dict[mutation_type] = job.fileStore.writeGlobalFile(
            os.path.join(work_dir, 'strelka_out', 'results',
                         'passed.somatic.' + mutation_type + '.vcf'))
    job.fileStore.logToMaster('Ran strelka on %s successfully' %
                              univ_options['patient'])
    return output_dict
Example #16
0
def run_rsem(job, rna_bam, univ_options, rsem_options):
    """
    Run rsem on the input RNA bam.

    ARGUMENTS
    :param toil.fileStore.FileID rna_bam: fsID of a transcriptome bam generated by STAR
    :param dict univ_options: Dict of universal options used by almost all tools
    :param dict rsem_options: Options specific to rsem
    :return: Dict of gene- and isoform-level expression calls
             output_files:
                 |- 'rsem.genes.results': fsID
                 +- 'rsem.isoforms.results': fsID
    :rtype: dict
    """
    work_dir = os.getcwd()
    input_files = {
        'star_transcriptome.bam': rna_bam,
        'rsem_index.tar.gz': rsem_options['index']
    }
    input_files = get_files_from_filestore(job,
                                           input_files,
                                           work_dir,
                                           docker=False)

    input_files['rsem_index'] = untargz(input_files['rsem_index.tar.gz'],
                                        work_dir)
    input_files = {
        key: docker_path(path)
        for key, path in list(input_files.items())
    }

    parameters = [
        '--paired-end', '-p',
        str(20), '--bam', input_files['star_transcriptome.bam'],
        '--no-bam-output',
        '/'.join([input_files['rsem_index'], univ_options['ref']]), 'rsem'
    ]
    docker_call(tool='rsem',
                tool_parameters=parameters,
                work_dir=work_dir,
                dockerhub=univ_options['dockerhub'],
                tool_version=rsem_options['version'])
    output_files = {}
    for filename in ('rsem.genes.results', 'rsem.isoforms.results'):
        output_files[filename] = job.fileStore.writeGlobalFile('/'.join(
            [work_dir, filename]))
        export_results(job,
                       output_files[filename],
                       '/'.join([work_dir, filename]),
                       univ_options,
                       subfolder='expression')
    job.fileStore.logToMaster('Ran rsem on %s successfully' %
                              univ_options['patient'])
    return output_files
Example #17
0
def run_bwa(job, fastqs, sample_type, univ_options, bwa_options):
    """
    This module aligns the SAMPLE_TYPE dna fastqs to the reference

    ARGUMENTS -- <ST> depicts the sample type. Substitute with 'tumor'/'normal'
    1. fastqs: Dict of list of input WGS/WXS fastqs
         fastqs
              +- '<ST>_dna': [<JSid for 1.fastq> , <JSid for 2.fastq>]
    2. sample_type: string of 'tumor_dna' or 'normal_dna'
    3. univ_options: Dict of universal arguments used by almost all tools
         univ_options
                +- 'dockerhub': <dockerhub to use>
    4. bwa_options: Dict of parameters specific to bwa
         bwa_options
              |- 'tool_index': <JSid for the bwa index tarball>
              +- 'n': <number of threads to allocate>

    RETURN VALUES
    1. output_files: Dict of aligned bam + reference (nested return)
         output_files
             |- '<ST>_fix_pg_sorted.bam': <JSid>
             +- '<ST>_fix_pg_sorted.bam.bai': <JSid>

    This module corresponds to nodes 3 and 4 on the tree
    """
    job.fileStore.logToMaster('Running bwa on %s:%s' % (univ_options['patient'], sample_type))
    work_dir = os.getcwd()
    input_files = {
        'dna_1.fastq': fastqs[0],
        'dna_2.fastq': fastqs[1],
        'bwa_index.tar.gz': bwa_options['tool_index']}
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
    # Handle gzipped file
    gz = '.gz' if is_gzipfile(input_files['dna_1.fastq']) else ''
    if gz:
        for read_file in 'dna_1.fastq', 'dna_2.fastq':
            os.symlink(read_file, read_file + gz)
            input_files[read_file + gz] = input_files[read_file] + gz
    # Untar the index
    input_files['bwa_index'] = untargz(input_files['bwa_index.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    parameters = ['mem',
                  '-t', str(bwa_options['n']),
                  '-v', '1',  # Don't print INFO messages to the stderr
                  '/'.join([input_files['bwa_index'], 'hg19']),
                  input_files['dna_1.fastq' + gz],
                  input_files['dna_2.fastq' + gz]]
    with open(''.join([work_dir, '/', sample_type, '_aligned.sam']), 'w') as samfile:
        docker_call(tool='bwa', tool_parameters=parameters, work_dir=work_dir,
                    dockerhub=univ_options['dockerhub'], outfile=samfile)
    # samfile.name retains the path info
    output_file = job.fileStore.writeGlobalFile(samfile.name)
    return output_file
Example #18
0
def run_rsem(job, rna_bam, univ_options, rsem_options):
    """
    This module will run rsem on the RNA Bam file.

    ARGUMENTS
    1. rna_bam: <JSid of rnaAligned.toTranscriptome.out.bam>
    2. univ_options: Dict of universal arguments used by almost all tools
         univ_options
                +- 'dockerhub': <dockerhub to use>
    3. rsem_options: Dict of parameters specific to rsem
         rsem_options
              |- 'tool_index': <JSid for the rsem index tarball>
              +- 'n': <number of threads to allocate>

    RETURN VALUES
    1. output_file: <Jsid of rsem.isoforms.results>

    This module corresponds to node 9 on the tree
    """
    job.fileStore.logToMaster('Running rsem on %s' % univ_options['patient'])
    work_dir = os.getcwd()
    input_files = {
        'star_transcriptome.bam': rna_bam,
        'rsem_index.tar.gz': rsem_options['tool_index']
    }
    input_files = get_files_from_filestore(job,
                                           input_files,
                                           work_dir,
                                           docker=False)

    input_files['rsem_index'] = untargz(input_files['rsem_index.tar.gz'],
                                        work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    parameters = [
        '--paired-end', '-p',
        str(rsem_options['n']), '--bam', input_files['star_transcriptome.bam'],
        '--no-bam-output', '/'.join([input_files['rsem_index'],
                                     'hg19']), 'rsem'
    ]
    docker_call(tool='rsem',
                tool_parameters=parameters,
                work_dir=work_dir,
                dockerhub=univ_options['dockerhub'])
    output_files = {}
    for filename in ('rsem.genes.results', 'rsem.isoforms.results'):
        output_files[filename] = job.fileStore.writeGlobalFile('/'.join(
            [work_dir, filename]))
        export_results(job,
                       output_files[filename],
                       '/'.join([work_dir, filename]),
                       univ_options,
                       subfolder='expression')
    return output_files
Example #19
0
def sample_chromosomes(job, genome_fai_file):
    """
    Get a list of chromosomes in the input data

    :param job: job
    :param string genome_fai_file: Job store file ID for the genome fai file
    :returns list: Chromosomes in the sample
    """
    work_dir = os.getcwd()
    genome_fai = untargz(job.fileStore.readGlobalFile(genome_fai_file), work_dir)
    return chromosomes_from_fai(genome_fai)
Example #20
0
def sample_chromosomes(job, genome_fai_file):
    """
    Get a list of chromosomes in the input data

    :param job: job
    :param string genome_fai_file: Job store file ID for the genome fai file
    :returns list: Chromosomes in the sample
    """
    work_dir = os.getcwd()
    genome_fai = untargz(job.fileStore.readGlobalFile(genome_fai_file), work_dir)
    return chromosomes_from_fai(genome_fai)
def run_transgene(job, snpeffed_file, rna_bam, univ_options, transgene_options):
    """
    This module will run transgene on the input vcf file from the aggregator and produce the
    peptides for MHC prediction

    ARGUMENTS
    1. snpeffed_file: <JSid for snpeffed vcf>
    2. univ_options: Dict of universal arguments used by almost all tools
         univ_options
                +- 'dockerhub': <dockerhub to use>
    3. transgene_options: Dict of parameters specific to transgene
         transgene_options
                +- 'gencode_peptide_fasta': <JSid for the gencode protein fasta>

    RETURN VALUES
    1. output_files: Dict of transgened n-mer peptide fastas
         output_files
                |- 'transgened_tumor_9_mer_snpeffed.faa': <JSid>
                |- 'transgened_tumor_10_mer_snpeffed.faa': <JSid>
                +- 'transgened_tumor_15_mer_snpeffed.faa': <JSid>

    This module corresponds to node 17 on the tree
    """
    job.fileStore.logToMaster('Running transgene on %s' % univ_options['patient'])
    work_dir = os.getcwd()
    rna_bam_key = 'rnaAligned.sortedByCoord.out.bam'  # to reduce next line size
    input_files = {
        'snpeffed_muts.vcf': snpeffed_file,
        'rna.bam': rna_bam[rna_bam_key]['rna_fix_pg_sorted.bam'],
        'rna.bam.bai': rna_bam[rna_bam_key]['rna_fix_pg_sorted.bam.bai'],
        'pepts.fa.tar.gz': transgene_options['gencode_peptide_fasta']}
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
    input_files['pepts.fa'] = untargz(input_files['pepts.fa.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    parameters = ['--peptides', input_files['pepts.fa'],
                  '--snpeff', input_files['snpeffed_muts.vcf'],
                  '--rna_file', input_files['rna.bam'],
                  '--prefix', 'transgened',
                  '--pep_lens', '9,10,15']
    docker_call(tool='transgene', tool_parameters=parameters, work_dir=work_dir,
                dockerhub=univ_options['dockerhub'])
    output_files = defaultdict()
    for peplen in ['9', '10', '15']:
        peptfile = '_'.join(['transgened_tumor', peplen, 'mer_snpeffed.faa'])
        mapfile = '_'.join(['transgened_tumor', peplen, 'mer_snpeffed.faa.map'])
        export_results(job, peptfile, univ_options, subfolder='peptides')
        export_results(job, mapfile, univ_options, subfolder='peptides')
        output_files[peptfile] = job.fileStore.writeGlobalFile(os.path.join(work_dir, peptfile))
        output_files[mapfile] = job.fileStore.writeGlobalFile(os.path.join(work_dir, mapfile))
    os.rename('transgened_transgened.vcf', 'mutations.vcf')
    export_results(job, 'mutations.vcf', univ_options, subfolder='mutations/transgened')
    return output_files
Example #22
0
def sample_chromosomes(job, genome_fai_file):
    """
    Get a list of chromosomes in the input data.

    :param toil.fileStore.FileID genome_fai_file: Job store file ID for the genome fai file
    :return: Chromosomes in the sample
    :rtype: list[str]
    """
    work_dir = os.getcwd()
    genome_fai = untargz(job.fileStore.readGlobalFile(genome_fai_file),
                         work_dir)
    return chromosomes_from_fai(genome_fai)
Example #23
0
def run_phlat(job, fastqs, sample_type, univ_options, phlat_options):
    """
    This module will run PHLAT on SAMPLE_TYPE fastqs.

    ARGUMENTS -- <ST> depicts the sample type. Substitute with 'tumor_dna',
                 'normal_dna', or 'tumor_rna'
    1. fastqs: Dict of list of input WGS/WXS fastqs
         fastqs
              +- '<ST>': [<JSid for 1.fastq> , <JSid for 2.fastq>]
    2. sample_type: string of 'tumor' or 'normal'
    3. univ_options: Dict of universal arguments used by almost all tools
         univ_options
                +- 'dockerhub': <dockerhub to use>
    4. phlat_options: Dict of parameters specific to phlat
         phlat_options
              |- 'tool_index': <JSid for the PHLAT index tarball>
              +- 'n': <number of threads to allocate>

    RETURN VALUES
    1. output_file: <JSid for the allele predictions for ST>

    This module corresponds to nodes 5, 6 and 7 on the tree
    """
    job.fileStore.logToMaster('Running phlat on %s:%s' % (univ_options['patient'], sample_type))
    print(phlat_options, file=sys.stderr)
    work_dir = os.getcwd()
    input_files = {
        'input_1.fastq': fastqs[0],
        'input_2.fastq': fastqs[1],
        'phlat_index.tar.gz': phlat_options['tool_index']}
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
    # Handle gzipped files
    gz = '.gz' if is_gzipfile(input_files['input_1.fastq']) else ''
    if gz:
        for read_file in 'input_1.fastq', 'input_2.fastq':
            os.symlink(read_file, read_file + gz)
            input_files[read_file + gz] = input_files[read_file] + gz
    # Untar the index
    input_files['phlat_index'] = untargz(input_files['phlat_index.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    parameters = ['-1', input_files['input_1.fastq' + gz],
                  '-2', input_files['input_2.fastq' + gz],
                  '-index', input_files['phlat_index'],
                  '-b2url', '/usr/local/bin/bowtie2',
                  '-tag', sample_type,
                  '-e', '/home/phlat-1.0',  # Phlat directory home
                  '-o', '/data',  # Output directory
                  '-p', str(phlat_options['n'])]  # Number of threads
    docker_call(tool='phlat', tool_parameters=parameters, work_dir=work_dir,
                dockerhub=univ_options['dockerhub'])
    output_file = job.fileStore.writeGlobalFile(''.join([work_dir, '/', sample_type, '_HLA.sum']))
    return output_file
Example #24
0
def run_phlat(job, fastqs, sample_type, univ_options, phlat_options):
    """
    This module will run PHLAT on SAMPLE_TYPE fastqs.

    ARGUMENTS -- <ST> depicts the sample type. Substitute with 'tumor_dna',
                 'normal_dna', or 'tumor_rna'
    1. fastqs: Dict of list of input WGS/WXS fastqs
         fastqs
              +- '<ST>': [<JSid for 1.fastq> , <JSid for 2.fastq>]
    2. sample_type: string of 'tumor' or 'normal'
    3. univ_options: Dict of universal arguments used by almost all tools
         univ_options
                +- 'dockerhub': <dockerhub to use>
    4. phlat_options: Dict of parameters specific to phlat
         phlat_options
              |- 'tool_index': <JSid for the PHLAT index tarball>
              +- 'n': <number of threads to allocate>

    RETURN VALUES
    1. output_file: <JSid for the allele predictions for ST>

    This module corresponds to nodes 5, 6 and 7 on the tree
    """
    job.fileStore.logToMaster('Running phlat on %s:%s' % (univ_options['patient'], sample_type))
    work_dir = os.getcwd()
    input_files = {
        'input_1.fastq': fastqs[0],
        'input_2.fastq': fastqs[1],
        'phlat_index.tar.gz': phlat_options['tool_index']}
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
    # Handle gzipped files
    gz = '.gz' if is_gzipfile(input_files['input_1.fastq']) else ''
    if gz:
        for read_file in 'input_1.fastq', 'input_2.fastq':
            os.symlink(read_file, read_file + gz)
            input_files[read_file + gz] = input_files[read_file] + gz
    # Untar the index
    input_files['phlat_index'] = untargz(input_files['phlat_index.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    parameters = ['-1', input_files['input_1.fastq' + gz],
                  '-2', input_files['input_2.fastq' + gz],
                  '-index', input_files['phlat_index'],
                  '-b2url', '/usr/local/bin/bowtie2',
                  '-tag', sample_type,
                  '-e', '/home/phlat-1.0',  # Phlat directory home
                  '-o', '/data',  # Output directory
                  '-p', str(phlat_options['n'])]  # Number of threads
    docker_call(tool='phlat', tool_parameters=parameters, work_dir=work_dir,
                dockerhub=univ_options['dockerhub'])
    output_file = job.fileStore.writeGlobalFile(''.join([work_dir, '/', sample_type, '_HLA.sum']))
    return output_file
Example #25
0
def run_muse_perchrom(job, tumor_bam, normal_bam, univ_options, muse_options,
                      chrom):
    """
    This module will run muse on the DNA bams

    ARGUMENTS
    1. tumor_bam: REFER ARGUMENTS of spawn_muse()
    2. normal_bam: REFER ARGUMENTS of spawn_muse()
    3. univ_options: REFER ARGUMENTS of spawn_muse()
    4. muse_options: REFER ARGUMENTS of spawn_muse()
    5. chrom: String containing chromosome name with chr appended

    RETURN VALUES
    1. output_files: <JSid for CHROM.MuSe.txt>

    This module corresponds to node 12 on the tree
    """
    job.fileStore.logToMaster('Running muse on %s:%s' %
                              (univ_options['patient'], chrom))
    work_dir = os.getcwd()
    input_files = {
        'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
        'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
        'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
        'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
        'genome.fa.tar.gz': muse_options['genome_fasta'],
        'genome.fa.fai.tar.gz': muse_options['genome_fai']
    }
    input_files = get_files_from_filestore(job,
                                           input_files,
                                           work_dir,
                                           docker=False)

    for key in ('genome.fa', 'genome.fa.fai'):
        input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    output_prefix = os.path.join(work_dir, chrom)

    parameters = [
        'call', '-f', input_files['genome.fa'], '-r', chrom, '-O',
        docker_path(output_prefix), input_files['tumor.bam'],
        input_files['normal.bam']
    ]
    docker_call(tool='muse',
                tool_parameters=parameters,
                work_dir=work_dir,
                dockerhub=univ_options['dockerhub'])
    outfile = job.fileStore.writeGlobalFile(''.join(
        [output_prefix, '.MuSE.txt']))
    return outfile
Example #26
0
def run_radia_perchrom(job, bams, univ_options, radia_options, chrom):
    """
    Run RADIA call on a single chromosome in the input bams.

    :param dict bams: Dict of bam and bai for tumor DNA-Seq, normal DNA-Seq and tumor RNA-Seq
    :param dict univ_options: Dict of universal options used by almost all tools
    :param dict radia_options: Options specific to RADIA
    :param str chrom: Chromosome to process
    :return: fsID for the chromsome vcf
    :rtype: toil.fileStore.FileID
    """
    work_dir = os.getcwd()
    input_files = {
        'rna.bam': bams['tumor_rna'],
        'rna.bam.bai': bams['tumor_rnai'],
        'tumor.bam': bams['tumor_dna'],
        'tumor.bam.bai': bams['tumor_dnai'],
        'normal.bam': bams['normal_dna'],
        'normal.bam.bai': bams['normal_dnai'],
        'genome.fa.tar.gz': radia_options['genome_fasta'],
        'genome.fa.fai.tar.gz': radia_options['genome_fai']}
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)

    for key in ('genome.fa', 'genome.fa.fai'):
        input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    radia_output = ''.join([work_dir, '/radia_', chrom, '.vcf'])
    radia_log = ''.join([work_dir, '/radia_', chrom, '_radia.log'])
    parameters = [univ_options['patient'],  # shortID
                  chrom,
                  '-n', input_files['normal.bam'],
                  '-t', input_files['tumor.bam'],
                  '-r', input_files['rna.bam'],
                  ''.join(['--rnaTumorFasta=', input_files['genome.fa']]),
                  '-f', input_files['genome.fa'],
                  '-o', docker_path(radia_output),
                  '-i', univ_options['ref'],
                  '-m', input_files['genome.fa'],
                  '-d', '*****@*****.**',
                  '-q', 'Illumina',
                  '--disease', 'CANCER',
                  '-l', 'INFO',
                  '-g', docker_path(radia_log)]
    docker_call(tool='radia', tool_parameters=parameters,
                work_dir=work_dir, dockerhub=univ_options['dockerhub'],
                tool_version=radia_options['version'])
    output_file = job.fileStore.writeGlobalFile(radia_output)

    job.fileStore.logToMaster('Ran radia on %s:%s successfully' % (univ_options['patient'], chrom))
    return output_file
Example #27
0
def run_strelka_full(job, tumor_bam, normal_bam, univ_options,
                     strelka_options):
    """
    This module will run strelka on the DNA bams.

    ARGUMENTS
    :param dict tumor_bam: REFER ARGUMENTS of spawn_strelka()
    :param dict normal_bam: REFER ARGUMENTS of spawn_strelka()
    :param dict univ_options: REFER ARGUMENTS of spawn_strelka()
    :param dict strelka_options: REFER ARGUMENTS of spawn_strelka()

    RETURN VALUES
    :returns: dict of output vcfs for each chromosome
    :rtype: dict
    """
    job.fileStore.logToMaster('Running strelka on %s' %
                              univ_options['patient'])
    work_dir = os.getcwd()
    input_files = {
        'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
        'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
        'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
        'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
        'genome.fa.tar.gz': strelka_options['genome_fasta'],
        'genome.fa.fai.tar.gz': strelka_options['genome_fai'],
        'config.ini.tar.gz': strelka_options['strelka_config']
    }
    input_files = get_files_from_filestore(job,
                                           input_files,
                                           work_dir,
                                           docker=False)

    for key in ('genome.fa', 'genome.fa.fai', 'config.ini'):
        input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    parameters = [
        input_files['config.ini'], input_files['tumor.bam'],
        input_files['normal.bam'], input_files['genome.fa'],
        str(job.cores)
    ]
    docker_call(tool='strelka',
                tool_parameters=parameters,
                work_dir=work_dir,
                dockerhub=univ_options['dockerhub'])
    output_dict = {}
    for mutation_type in ['snvs', 'indels']:
        output_dict[mutation_type] = job.fileStore.writeGlobalFile(
            os.path.join(work_dir, 'strelka_out', 'results',
                         'passed.somatic.' + mutation_type + '.vcf'))
    return output_dict
Example #28
0
def run_muse_perchrom(job, tumor_bam, normal_bam, univ_options, muse_options,
                      chrom):
    """
    Run MuSE call on a single chromosome in the input bams.

    :param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
    :param dict normal_bam: Dict of bam and bai for normal DNA-Seq
    :param dict univ_options: Dict of universal options used by almost all tools
    :param dict muse_options: Options specific to MuSE
    :param str chrom: Chromosome to process
    :return: fsID for the chromsome vcf
    :rtype: toil.fileStore.FileID
    """
    work_dir = os.getcwd()
    input_files = {
        'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
        'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
        'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
        'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
        'genome.fa.tar.gz': muse_options['genome_fasta'],
        'genome.fa.fai.tar.gz': muse_options['genome_fai']
    }
    input_files = get_files_from_filestore(job,
                                           input_files,
                                           work_dir,
                                           docker=False)

    for key in ('genome.fa', 'genome.fa.fai'):
        input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    output_prefix = os.path.join(work_dir, chrom)

    parameters = [
        'call', '-f', input_files['genome.fa'], '-r', chrom, '-O',
        docker_path(output_prefix), input_files['tumor.bam'],
        input_files['normal.bam']
    ]
    docker_call(tool='muse',
                tool_parameters=parameters,
                work_dir=work_dir,
                dockerhub=univ_options['dockerhub'],
                tool_version=muse_options['version'])
    outfile = job.fileStore.writeGlobalFile(''.join(
        [output_prefix, '.MuSE.txt']))

    job.fileStore.logToMaster('Ran MuSE on %s:%s successfully' %
                              (univ_options['patient'], chrom))
    return outfile
Example #29
0
def get_patient_mhc_haplotype(job, patient_dict):
    """
    Convenience function to get the mhc haplotype from the patient dict

    :param dict patient_dict: dict of patient info
    :return: The vcf
    :rtype: toil.fileStore.FileID
    """
    haplotype_archive = job.fileStore.readGlobalFile(patient_dict['hla_haplotype_files'])
    haplotype_archive = untargz(haplotype_archive, os.getcwd())
    output_dict = {}
    for filename in 'mhci_alleles.list', 'mhcii_alleles.list':
        output_dict[filename] = job.fileStore.writeGlobalFile(os.path.join(haplotype_archive,
                                                                           filename))
    return output_dict
Example #30
0
def sample_chromosomes(job, genome_fai_file):
    """
    Get a list of chromosomes in the input data

    :param job: job
    :param string genome_fai_file: Job store file ID for the genome fai file
    :returns list: Chromosomes in the sample
    """
    work_dir = os.getcwd()
    genome_fai = untargz(job.fileStore.readGlobalFile(genome_fai_file), work_dir)
    chromosomes = []
    with open(genome_fai) as fai_file:
        for line in fai_file:
            line = line.strip().split()
            chromosomes.append(line[0])
    return chromosomes
Example #31
0
def run_snpeff(job, merged_mutation_file, univ_options, snpeff_options):
    """
    This module will run snpeff on the aggregated mutation calls.  Currently the only mutations
    called are SNPs hence SnpEff suffices. This node will be replaced in the future with another
    translator.

    ARGUMENTS
    1. merged_mutation_file: <JSid for merged vcf>
    2. univ_options: Dict of universal arguments used by almost all tools
         univ_options
                +- 'dockerhub': <dockerhub to use>
    3. snpeff_options: Dict of parameters specific to snpeff
         snpeff_options
                +- 'tool_index': <JSid for the snpEff index tarball>

    RETURN VALUES
    1. output_file: <JSid for the snpeffed vcf>

    This node corresponds to node 16 on the tree
    """
    job.fileStore.logToMaster('Running snpeff on %s' % univ_options['patient'])
    work_dir = os.getcwd()
    input_files = {
        'merged_mutations.vcf': merged_mutation_file,
        'snpeff_index.tar.gz': snpeff_options['tool_index']}
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
    input_files['snpeff_index'] = untargz(input_files['snpeff_index.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    parameters = ['eff',
                  '-dataDir', input_files['snpeff_index'],
                  '-c', '/'.join([input_files['snpeff_index'], 'snpEff_hg19_gencode.config']),
                  '-no-intergenic',
                  '-no-downstream',
                  '-no-upstream',
                  # '-canon',
                  '-noStats',
                  'hg19_gencode',
                  input_files['merged_mutations.vcf']]
    xmx = snpeff_options['java_Xmx'] if snpeff_options['java_Xmx'] else univ_options['java_Xmx']
    with open('/'.join([work_dir, 'mutations.vcf']), 'w') as snpeff_file:
        docker_call(tool='snpeff', tool_parameters=parameters, work_dir=work_dir,
                    dockerhub=univ_options['dockerhub'], java_opts=xmx, outfile=snpeff_file)
    export_results(job, snpeff_file.name, univ_options, subfolder='mutations/snpeffed')
    output_file = job.fileStore.writeGlobalFile(snpeff_file.name)
    return output_file
Example #32
0
def run_rsem(job, rna_bam, univ_options, rsem_options):
    """
    This module will run rsem on the RNA Bam file.

    ARGUMENTS
    1. rna_bam: <JSid of rnaAligned.toTranscriptome.out.bam>
    2. univ_options: Dict of universal arguments used by almost all tools
         univ_options
                +- 'dockerhub': <dockerhub to use>
    3. rsem_options: Dict of parameters specific to rsem
         rsem_options
              |- 'tool_index': <JSid for the rsem index tarball>
              +- 'n': <number of threads to allocate>

    RETURN VALUES
    1. output_file: <Jsid of rsem.isoforms.results>

    This module corresponds to node 9 on the tree
    """
    job.fileStore.logToMaster('Running rsem on %s' % univ_options['patient'])
    work_dir = os.getcwd()
    input_files = {
        'star_transcriptome.bam': rna_bam,
        'rsem_index.tar.gz': rsem_options['tool_index']}
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)

    input_files['rsem_index'] = untargz(input_files['rsem_index.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    print(os.listdir('.'), file=sys.stderr)
    parameters = ['--paired-end',
                  '-p', str(rsem_options['n']),
                  '--bam',
                  input_files['star_transcriptome.bam'],
                  '--no-bam-output',
                  '/'.join([input_files['rsem_index'], 'hg19']),
                  'rsem']
    print(parameters, file=sys.stderr)
    docker_call(tool='rsem', tool_parameters=parameters, work_dir=work_dir,
                dockerhub=univ_options['dockerhub'])
    print(os.listdir('.'), file=sys.stderr)
    output_files = {}
    for filename in ('rsem.genes.results', 'rsem.isoforms.results'):
        output_files[filename] = job.fileStore.writeGlobalFile('/'.join([work_dir, filename]))
        export_results(job, '/'.join([work_dir, filename]), univ_options, subfolder='expression')
    return output_files
Example #33
0
def unmerge(job, input_vcf, tool_name, tool_options, univ_options):
    """
    Un-merges a vcf file into a file per chromosome.

    :param str input_vcf: Input vcf
    :param str tool_name: The name of the mutation caller
    :param dict tool_options: Options specific to Somatic Sniper
    :param dict univ_options: Universal options
    :returns: dict of jsIDs, onr for each chromosomal vcf
    :rtype: dict
    """
    work_dir = os.getcwd()
    input_files = {
        'input.vcf': input_vcf,
        'genome.fa.fai.tar.gz': tool_options['genome_fai']}
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)

    input_files['genome.fa.fai'] = untargz(input_files['genome.fa.fai.tar.gz'], work_dir)

    chromosomes = chromosomes_from_fai(input_files['genome.fa.fai'])

    read_chromosomes = defaultdict()
    with open(input_files['input.vcf'], 'r') as in_vcf:
        header = []
        for line in in_vcf:
            if line.startswith('#'):
                header.append(line)
                continue
            line = line.strip()
            chrom = line.split()[0]
            if chrom in read_chromosomes:
                print(line, file=read_chromosomes[chrom])
            else:
                read_chromosomes[chrom] = open(os.path.join(os.getcwd(), chrom + '.vcf'), 'w')
                print(''.join(header), file=read_chromosomes[chrom], end='')
                print(line, file=read_chromosomes[chrom])
    # Process chromosomes that had no mutations
    for chrom in set(chromosomes).difference(set(read_chromosomes.keys())):
        read_chromosomes[chrom] = open(os.path.join(os.getcwd(), chrom + '.vcf'), 'w')
        print(''.join(header), file=read_chromosomes[chrom], end='')
    outdict = {}
    for chrom, chromvcf in read_chromosomes.items():
        chromvcf.close()
        export_results(job, chromvcf.name, univ_options, subfolder='mutations/' + tool_name)
        outdict[chrom] = job.fileStore.writeGlobalFile(chromvcf.name)
    return outdict
Example #34
0
def run_somaticsniper_full(job, tumor_bam, normal_bam, univ_options,
                           somaticsniper_options):
    """
    Run SomaticSniper on the DNA bams.

    :param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq
    :param dict normal_bam: Dict of bam and bai for normal DNA-Seq
    :param dict univ_options: Dict of universal options used by almost all tools
    :param dict somaticsniper_options: Options specific to SomaticSniper
    :return: fsID to the genome-level vcf
    :rtype: toil.fileStore.FileID
    """
    job.fileStore.logToMaster('Running SomaticSniper on %s' %
                              univ_options['patient'])
    work_dir = os.getcwd()
    input_files = {
        'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
        'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
        'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
        'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
        'genome.fa.tar.gz': somaticsniper_options['genome_fasta'],
        'genome.fa.fai.tar.gz': somaticsniper_options['genome_fai']
    }
    input_files = get_files_from_filestore(job,
                                           input_files,
                                           work_dir,
                                           docker=False)

    for key in ('genome.fa', 'genome.fa.fai'):
        input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    output_file = os.path.join(work_dir, 'somatic-sniper_full.vcf')
    parameters = [
        '-f', input_files['genome.fa'], '-F', 'vcf', '-G', '-L', '-q', '1',
        '-Q', '15', input_files['tumor.bam'], input_files['normal.bam'],
        docker_path(output_file)
    ]
    docker_call(tool='somaticsniper',
                tool_parameters=parameters,
                work_dir=work_dir,
                dockerhub=univ_options['dockerhub'],
                tool_version=somaticsniper_options['version'])
    outfile = job.fileStore.writeGlobalFile(output_file)
    return outfile
Example #35
0
def run_strelka_full(job, tumor_bam, normal_bam, univ_options, strelka_options):
    """
    This module will run strelka on the DNA bams.

    ARGUMENTS
    :param dict tumor_bam: REFER ARGUMENTS of spawn_strelka()
    :param dict normal_bam: REFER ARGUMENTS of spawn_strelka()
    :param dict univ_options: REFER ARGUMENTS of spawn_strelka()
    :param dict strelka_options: REFER ARGUMENTS of spawn_strelka()

    RETURN VALUES
    :returns: dict of output vcfs for each chromosome
    :rtype: dict
    """
    job.fileStore.logToMaster('Running strelka on %s' % univ_options['patient'])
    work_dir = os.getcwd()
    input_files = {
        'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
        'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
        'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
        'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
        'genome.fa.tar.gz': strelka_options['genome_fasta'],
        'genome.fa.fai.tar.gz': strelka_options['genome_fai'],
        'config.ini.tar.gz': strelka_options['strelka_config']
    }
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)

    for key in ('genome.fa', 'genome.fa.fai', 'config.ini'):
        input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    parameters = [input_files['config.ini'],
                  input_files['tumor.bam'],
                  input_files['normal.bam'],
                  input_files['genome.fa'],
                  str(job.cores)
                  ]
    docker_call(tool='strelka', tool_parameters=parameters, work_dir=work_dir,
                dockerhub=univ_options['dockerhub'])
    output_dict = {}
    for mutation_type in ['snvs', 'indels']:
        output_dict[mutation_type] = job.fileStore.writeGlobalFile(os.path.join(
            work_dir, 'strelka_out', 'results', 'passed.somatic.' + mutation_type + '.vcf'))
    return output_dict
Example #36
0
def run_muse_perchrom(job, tumor_bam, normal_bam, univ_options, muse_options, chrom):
    """
    This module will run muse on the DNA bams

    ARGUMENTS
    1. tumor_bam: REFER ARGUMENTS of spawn_muse()
    2. normal_bam: REFER ARGUMENTS of spawn_muse()
    3. univ_options: REFER ARGUMENTS of spawn_muse()
    4. muse_options: REFER ARGUMENTS of spawn_muse()
    5. chrom: String containing chromosome name with chr appended

    RETURN VALUES
    1. output_files: <JSid for CHROM.MuSe.txt>

    This module corresponds to node 12 on the tree
    """
    job.fileStore.logToMaster('Running muse on %s:%s' % (univ_options['patient'], chrom))
    work_dir = os.getcwd()
    input_files = {
        'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
        'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
        'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
        'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
        'genome.fa.tar.gz': muse_options['genome_fasta'],
        'genome.fa.fai.tar.gz': muse_options['genome_fai']}
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)

    for key in ('genome.fa', 'genome.fa.fai'):
        input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    output_prefix = os.path.join(work_dir, chrom)

    parameters = ['call',
                  '-f', input_files['genome.fa'],
                  '-r', chrom,
                  '-O', docker_path(output_prefix),
                  input_files['tumor.bam'],
                  input_files['normal.bam']]
    docker_call(tool='muse', tool_parameters=parameters, work_dir=work_dir,
                dockerhub=univ_options['dockerhub'])
    outfile = job.fileStore.writeGlobalFile(''.join([output_prefix, '.MuSE.txt']))
    return outfile
Example #37
0
def run_somaticsniper_full(job, tumor_bam, normal_bam, univ_options, somaticsniper_options):
    """
    This module will run somaticsniper on the DNA bams.

    ARGUMENTS
    :param dict tumor_bam: REFER ARGUMENTS of spawn_somaticsniper()
    :param dict normal_bam: REFER ARGUMENTS of spawn_somaticsniper()
    :param dict univ_options: REFER ARGUMENTS of spawn_somaticsniper()
    :param dict somaticsniper_options: REFER ARGUMENTS of spawn_somaticsniper()

    RETURN VALUES
    :returns: dict of output vcfs for each chromosome
    :rtype: dict
    """
    job.fileStore.logToMaster('Running somaticsniper on %s' % univ_options['patient'])
    work_dir = os.getcwd()
    input_files = {
        'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
        'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
        'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
        'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
        'genome.fa.tar.gz': somaticsniper_options['genome_fasta'],
        'genome.fa.fai.tar.gz': somaticsniper_options['genome_fai']}
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)

    for key in ('genome.fa', 'genome.fa.fai'):
        input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    output_file = os.path.join(work_dir, 'somatic-sniper_full.vcf')
    parameters = ['-f', input_files['genome.fa'],
                  '-F', 'vcf',
                  '-G',
                  '-L',
                  '-q', '1',
                  '-Q', '15',
                  input_files['tumor.bam'],
                  input_files['normal.bam'],
                  docker_path(output_file)]
    docker_call(tool='somaticsniper', tool_parameters=parameters, work_dir=work_dir,
                dockerhub=univ_options['dockerhub'])
    outfile = job.fileStore.writeGlobalFile(output_file)
    return outfile
Example #38
0
def assess_mhc_genes(job, isoform_expression, rna_haplotype, univ_options, mhc_genes_options):
    """
    This module will assess the prevalence of the various genes in the MHC pathway and return a
    report in the tsv format
    :param isoform_expression: Isoform expression from run_rsem
    :param rna_haplotype: PHLAT output from running on rna
    :param univ_options: Universal options for the pipeline
    :param mhc_genes_options: options specific to this module
    """
    job.fileStore.logToMaster('Running mhc gene assessment on %s' % univ_options['patient'])
    work_dir = os.getcwd()
    input_files = {
        'rsem_quant.tsv': isoform_expression,
        'rna_haplotype.sum': rna_haplotype,
        'mhc_genes.json.tar.gz': mhc_genes_options['genes_file']}
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)

    input_files['mhc_genes.json'] = untargz(input_files['mhc_genes.json.tar.gz'], work_dir)

    # Read in the MHC genes
    with open(input_files['mhc_genes.json']) as mhc_file:
        mhc_genes = json.load(mhc_file)

    # Parse the rna phlat file
    with open(input_files['rna_haplotype.sum']) as rna_mhc:
        mhc_alleles = {'HLA_A': [], 'HLA_B': [], 'HLA_C': [], 'HLA_DPA': [], 'HLA_DQA': [],
                       'HLA_DPB': [], 'HLA_DQB': [], 'HLA_DRB': []}
        mhc_alleles = parse_phlat_file(rna_mhc, mhc_alleles)

    # Process the isoform expressions
    gene_expressions = Counter()
    with open(input_files['rsem_quant.tsv']) as rsem_file:
        line = rsem_file.readline()
        line = line.strip().split()
        assert line == ['transcript_id', 'gene_id', 'length', 'effective_length', 'expected_count',
                        'TPM', 'FPKM', 'IsoPct']
        for line in rsem_file:
            line = line.strip().split()
            gene_expressions[line[1]] += float(line[5])

    with open(os.path.join(work_dir, 'mhc_pathway_report.txt'), 'w') as mpr:
        for section in mhc_genes:
            print(section.center(48, ' '), file=mpr)
            print("{:12}{:12}{:12}{:12}".format("Gene", "Threshold", "Observed", "Result"),
                  file=mpr)
            if section == 'MHCI loading':
                for mhci_allele in 'HLA_A', 'HLA_B', 'HLA_C':
                    num_alleles = len(mhc_alleles[mhci_allele])
                    print("{:12}{:12}{:12}{:12}".format(mhci_allele, '2', num_alleles,
                                                        'FAIL' if num_alleles == 0
                                                        else 'LOW' if num_alleles == 1
                                                        else 'PASS'), file=mpr)
            elif section == 'MHCII loading':
                # TODO DP alleles
                for mhcii_allele in ('HLA_DQA', 'HLA_DQB', 'HLA_DRA', 'HLA_DRB'):
                    if mhcii_allele != 'HLA_DRA':
                        num_alleles = len(mhc_alleles[mhcii_allele])
                        print("{:12}{:12}{:12}{:12}".format(mhcii_allele, 2, num_alleles,
                                                            'FAIL' if num_alleles == 0 else
                                                            'LOW' if num_alleles == 1 else
                                                            'PASS'), file=mpr)
                    else:
                        # FIXME This is hardcoded for now. We need to change this.
                        print("{:12}{:<12}{:<12}{:12}".format(
                                    'HLA_DRA', gene_expressions['ENSG00000204287.9'], '69.37',
                                    'LOW' if gene_expressions['ENSG00000204287.9'] <= 69.37
                                    else 'PASS'), file=mpr)
            for gene, ensgene, first_quart in mhc_genes[section]:
                result = 'LOW' if gene_expressions[ensgene] <= float(first_quart) else 'PASS'
                print("{:12}{:<12}{:<12}{:12}".format(gene, float(first_quart),
                                                      gene_expressions[ensgene], result), file=mpr)
            print('', file=mpr)
    export_results(job, mpr.name, univ_options, subfolder='reports')
    output_file = job.fileStore.writeGlobalFile(mpr.name)
    return output_file
Example #39
0
def spawn_antigen_predictors(job, transgened_files, phlat_files, univ_options, mhc_options):
    """
    Based on the number of alleles obtained from node 14, this module will spawn callers to predict
    MHCI:peptide and MHCII:peptide binding on the peptide files from node 17.  Once all MHC:peptide
    predictions are made, merge them via a follow-on job.

    ARGUMENTS
    1. transgened_files: REFER RETURN VALUE of run_transgene()
    2. phlat_files: REFER RETURN VALUE of merge_phlat_calls()
    3. univ_options: Dict of universal arguments used by almost all tools
         univ_options
                +- 'dockerhub': <dockerhub to use>
    4. mhc_options: Dict of dicts of parameters specific to mhci and mhcii
                    respectively
         mhc_options
              |- 'mhci'
              |     |- 'method_file': <JSid for json file containing data
              |     |                  linking alleles, peptide lengths, and
              |     |                  prediction methods>
              |     +- 'pred': String describing prediction method to use
              +- 'mhcii'
                    |- 'method_file': <JSid for json file containing data
                    |                  linking alleles and prediction methods>
                    +- 'pred': String describing prediction method to use

    RETURN VALUES
    1. tuple of (mhci_preds, mhcii_preds)
         mhci_preds: Dict of return value from running predictions on a given
                     mhc for all peptides of length 9 and 10.
             mhci_preds
                |- <MHC molecule 1>_9_mer.faa: <PromisedJobReturnValue>
                |- <MHC molecule 1>_10_mer.faa: <PromisedJobReturnValue>
                |
                ..
                +- <MHC molecule n>_10_mer.faa: <PromisedJobReturnValue>
         mhcii_preds: Dict of return value from running predictions on a given
                     mhc for all peptides of length 15.
             mhci_preds
                |- <MHC molecule 1>_15_mer.faa: <PromisedJobReturnValue>
                |
                ..
                +- <MHC molecule n>_15_mer.faa: <PromisedJobReturnValue>

    This module corresponds to node 18 on the tree
    """
    job.fileStore.logToMaster('Running spawn_anti on %s' % univ_options['patient'])
    work_dir = os.getcwd()
    mhci_options, mhcii_options = mhc_options
    pept_files = {
        '9_mer.faa': transgened_files['transgened_tumor_9_mer_snpeffed.faa'],
        '10_mer.faa': transgened_files['transgened_tumor_10_mer_snpeffed.faa'],
        '15_mer.faa': transgened_files['transgened_tumor_15_mer_snpeffed.faa']}
    input_files = {
        'mhci_alleles.list': phlat_files['mhci_alleles.list'],
        'mhcii_alleles.list': phlat_files['mhcii_alleles.list'],
        'mhci_restrictions.json.tar.gz': mhci_options['method_file'],
        'mhcii_restrictions.json.tar.gz': mhcii_options['method_file']}
    input_files = get_files_from_filestore(job, input_files, work_dir)
    for key in ('mhci_restrictions.json', 'mhcii_restrictions.json'):
        input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)

    mhci_alleles, mhcii_alleles = [], []
    with open(input_files['mhci_alleles.list'], 'r') as mhci_file:
        for line in mhci_file:
            mhci_alleles.append(line.strip())
    with open(input_files['mhcii_alleles.list'], 'r') as mhcii_file:
        for line in mhcii_file:
            mhcii_alleles.append(line.strip())
    # This file contains the list of allele:pept length combinations supported
    # by each prediction type.
    with open(input_files['mhci_restrictions.json'], 'r') as restfile:
        mhci_restrictions = json.load(restfile)
    with open(input_files['mhcii_restrictions.json'], 'r') as restfile:
        mhcii_restrictions = json.load(restfile)
    # Make a regexp to convert non alphanumeric characters in HLA names to _
    strip_allele_re = re.compile('[^A-Z0-9]')
    # For each mhci allele:peptfile combination, spawn a job and store the job handle in the dict.
    # Then do the same for mhcii
    mhci_preds, mhcii_preds = {}, {}
    for allele in mhci_alleles:
        stripped_allele = re.sub(strip_allele_re, '_', allele)
        for peptfile in ['9_mer.faa', '10_mer.faa']:
            peplen = peptfile.split('_')[0]
            # Ensure that the allele is among the list of accepted alleles
            try:
                if not mhci_restrictions[allele][peplen]:
                    continue
            except KeyError:
                continue
            predfile = ''.join([stripped_allele, '_', peptfile[:-4], '_mer.pred'])
            mhci_preds[predfile] = job.addChildJobFn(predict_mhci_binding, pept_files[peptfile],
                                                     allele, peplen, univ_options,
                                                     mhci_options, disk='100M', memory='100M',
                                                     cores=1).rv()
    for allele in mhcii_alleles:
        stripped_allele = re.sub(strip_allele_re, '_', allele)
        predfile = ''.join([stripped_allele, '_15_mer.pred'])
        if allele not in mhcii_restrictions[mhcii_options['pred']]:
            continue
        mhcii_preds[predfile] = job.addChildJobFn(predict_mhcii_binding, pept_files['15_mer.faa'],
                                                  allele, univ_options, mhcii_options,
                                                  disk='100M', memory='100M', cores=1).rv()
    return mhci_preds, mhcii_preds
Example #40
0
def run_star(job, fastqs, univ_options, star_options):
    """
    This module uses STAR to align the RNA fastqs to the reference

    ARGUMENTS
    1. fastqs: REFER RETURN VALUE of run_cutadapt()
    2. univ_options: Dict of universal arguments used by almost all tools
         univ_options
              +- 'dockerhub': <dockerhub to use>
    3. star_options: Dict of parameters specific to STAR
         star_options
             |- 'tool_index': <JSid for the STAR index tarball>
             +- 'n': <number of threads to allocate>
    RETURN VALUES
    1. output_files: Dict of aligned bams
         output_files
             |- 'rnaAligned.toTranscriptome.out.bam': <JSid>
             +- 'rnaAligned.sortedByCoord.out.bam': Dict of genome bam + bai
                                |- 'rna_fix_pg_sorted.bam': <JSid>
                                +- 'rna_fix_pg_sorted.bam.bai': <JSid>

    This module corresponds to node 9 on the tree
    """
    assert star_options['type'] in ('star', 'starlong')
    job.fileStore.logToMaster('Running STAR on %s' % univ_options['patient'])
    work_dir = os.getcwd()
    input_files = {
        'rna_cutadapt_1.fastq': fastqs[0],
        'rna_cutadapt_2.fastq': fastqs[1],
        'star_index.tar.gz': star_options['tool_index']}
    input_files = get_files_from_filestore(job, input_files, work_dir,
                                           docker=False)
    # Handle gzipped file
    gz = '.gz' if is_gzipfile(input_files['rna_cutadapt_1.fastq']) else ''
    if gz:
        for read_file in 'rna_cutadapt_1.fastq', 'rna_cutadapt_2.fastq':
            os.symlink(read_file, read_file + gz)
            input_files[read_file + gz] = input_files[read_file] + gz
    # Untar the index
    input_files['star_index'] = untargz(input_files['star_index.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    parameters = ['--runThreadN', str(star_options['n']),
                  '--genomeDir', input_files['star_index'],
                  '--outFileNamePrefix', 'rna',
                  '--readFilesIn',
                  input_files['rna_cutadapt_1.fastq' + gz],
                  input_files['rna_cutadapt_2.fastq' + gz],
                  '--outSAMattributes', 'NH', 'HI', 'AS', 'NM', 'MD',
                  '--outSAMtype', 'BAM', 'SortedByCoordinate',
                  '--quantMode', 'TranscriptomeSAM']
    if gz:
        parameters.extend(['--readFilesCommand', 'zcat'])
    if star_options['type'] == 'star':
        docker_call(tool='star', tool_parameters=parameters, work_dir=work_dir,
                    dockerhub=univ_options['dockerhub'])
    else:
        docker_call(tool='starlong', tool_parameters=parameters, work_dir=work_dir,
                    dockerhub=univ_options['dockerhub'])
    output_files = defaultdict()
    for bam_file in ['rnaAligned.toTranscriptome.out.bam',
                     'rnaAligned.sortedByCoord.out.bam']:
        output_files[bam_file] = job.fileStore.writeGlobalFile('/'.join([
            work_dir, bam_file]))
    return output_files
Example #41
0
def run_transgene(job,
                  snpeffed_file,
                  rna_bam,
                  univ_options,
                  transgene_options,
                  tumor_dna_bam=None,
                  fusion_calls=None):
    """
    Run transgene on an input snpeffed vcf file and return the peptides for MHC prediction.


    :param toil.fileStore.FileID snpeffed_file: fsID for snpeffed vcf
    :param dict rna_bam: The dict of bams returned by running star
    :param dict univ_options: Dict of universal options used by almost all tools
    :param dict transgene_options: Options specific to Transgene
    :param dict tumor_dna_bam: The dict of bams returned by running bwa
    :return: A dictionary of 9 files (9-, 10-, and 15-mer peptides each for Tumor and Normal and the
             corresponding .map files for the 3 Tumor fastas)
             output_files:
                 |- 'transgened_normal_10_mer_peptides.faa': fsID
                 |- 'transgened_normal_15_mer_peptides.faa': fsID
                 |- 'transgened_normal_9_mer_peptides.faa': fsID
                 |- 'transgened_tumor_10_mer_peptides.faa': fsID
                 |- 'transgened_tumor_10_mer_peptides.faa.map': fsID
                 |- 'transgened_tumor_15_mer_peptides.faa': fsID
                 |- 'transgened_tumor_15_mer_peptides.faa.map': fsID
                 |- 'transgened_tumor_9_mer_peptides.faa': fsID
                 +- 'transgened_tumor_9_mer_peptides.faa.map': fsID
    :rtype: dict
    """
    assert snpeffed_file or fusion_calls
    work_dir = os.getcwd()
    input_files = {
        'pepts.fa.tar.gz': transgene_options['gencode_peptide_fasta'],
        'annotation.gtf.tar.gz': transgene_options['gencode_annotation_gtf'],
        'genome.fa.tar.gz': transgene_options['genome_fasta']
    }

    if snpeffed_file is not None:
        input_files.update({'snpeffed_muts.vcf': snpeffed_file})
    if rna_bam:
        input_files.update({
            'rna.bam':
            rna_bam['rna_genome']['rna_genome_sorted.bam'],
            'rna.bam.bai':
            rna_bam['rna_genome']['rna_genome_sorted.bam.bai'],
        })
    if tumor_dna_bam is not None:
        input_files.update({
            'tumor_dna.bam':
            tumor_dna_bam['tumor_dna_fix_pg_sorted.bam'],
            'tumor_dna.bam.bai':
            tumor_dna_bam['tumor_dna_fix_pg_sorted.bam.bai'],
        })
    input_files = get_files_from_filestore(job,
                                           input_files,
                                           work_dir,
                                           docker=False)
    input_files['pepts.fa'] = untargz(input_files['pepts.fa.tar.gz'], work_dir)
    input_files['genome.fa'] = untargz(input_files['genome.fa.tar.gz'],
                                       work_dir)
    input_files['annotation.gtf'] = untargz(
        input_files['annotation.gtf.tar.gz'], work_dir)
    input_files = {
        key: docker_path(path)
        for key, path in list(input_files.items())
    }

    parameters = [
        '--peptides', input_files['pepts.fa'], '--prefix', 'transgened',
        '--pep_lens', '9,10,15', '--cores',
        str(20), '--genome', input_files['genome.fa'], '--annotation',
        input_files['annotation.gtf'], '--log_file', '/data/transgene.log'
    ]

    if snpeffed_file is not None:
        parameters.extend(['--snpeff', input_files['snpeffed_muts.vcf']])
    if rna_bam:
        parameters.extend(['--rna_file', input_files['rna.bam']])

    if tumor_dna_bam is not None:
        parameters.extend(['--dna_file', input_files['tumor_dna.bam']])

    if fusion_calls:
        fusion_files = {
            'fusion_calls': fusion_calls,
            'transcripts.fa.tar.gz':
            transgene_options['gencode_transcript_fasta']
        }

        fusion_files = get_files_from_filestore(job,
                                                fusion_files,
                                                work_dir,
                                                docker=False)
        fusion_files['transcripts.fa'] = untargz(
            fusion_files['transcripts.fa.tar.gz'], work_dir)
        fusion_files = {
            key: docker_path(path)
            for key, path in list(fusion_files.items())
        }
        parameters += [
            '--transcripts', fusion_files['transcripts.fa'], '--fusions',
            fusion_files['fusion_calls']
        ]

    try:
        docker_call(tool='transgene',
                    tool_parameters=parameters,
                    work_dir=work_dir,
                    dockerhub=univ_options['dockerhub'],
                    tool_version=transgene_options['version'])
    finally:
        logfile = os.path.join(os.getcwd(), 'transgene.log')
        export_results(job,
                       job.fileStore.writeGlobalFile(logfile),
                       logfile,
                       univ_options,
                       subfolder='mutations/transgened')

    output_files = defaultdict()
    peptides_not_found = False
    for peplen in ['9', '10', '15']:
        for tissue_type in ['tumor', 'normal']:
            pepfile = '_'.join(
                ['transgened', tissue_type, peplen, 'mer_peptides.faa'])
            # Backwards compatibility for old transgene output
            old_pepfile = '_'.join(
                ['transgened', tissue_type, peplen, 'mer_snpeffed.faa'])
            if os.path.exists(os.path.join(work_dir, old_pepfile)):
                os.rename(os.path.join(work_dir, old_pepfile),
                          os.path.join(work_dir, pepfile))
                if tissue_type == 'tumor':
                    os.rename(os.path.join(work_dir, old_pepfile + '.map'),
                              os.path.join(work_dir, pepfile + '.map'))
            if not os.path.exists(pepfile):
                peptides_not_found = True
                break
            output_files[pepfile] = job.fileStore.writeGlobalFile(
                os.path.join(work_dir, pepfile))
            export_results(job,
                           output_files[pepfile],
                           pepfile,
                           univ_options,
                           subfolder='peptides')
        if peptides_not_found:
            break
        mapfile = '_'.join(
            ['transgened_tumor', peplen, 'mer_peptides.faa.map'])
        output_files[mapfile] = job.fileStore.writeGlobalFile(
            os.path.join(work_dir, mapfile))
        export_results(job,
                       output_files[mapfile],
                       mapfile,
                       univ_options,
                       subfolder='peptides')
    if snpeffed_file:
        # There won't be an output vcf if there's no input
        os.rename('transgened_transgened.vcf', 'mutations.vcf')
        export_results(job,
                       job.fileStore.writeGlobalFile('mutations.vcf'),
                       'mutations.vcf',
                       univ_options,
                       subfolder='mutations/transgened')
    if fusion_calls:
        # There won't be an output bedpe if there's no input
        os.rename('transgened_transgened.bedpe', 'fusions.bedpe')
        export_results(job,
                       job.fileStore.writeGlobalFile('fusions.bedpe'),
                       'fusions.bedpe',
                       univ_options,
                       subfolder='mutations/transgened')
    if peptides_not_found:
        job.fileStore.logToMaster(
            'Transgene failed to find any peptides for %s.' %
            univ_options['patient'])
        return None
    else:
        job.fileStore.logToMaster('Ran transgene on %s successfully' %
                                  univ_options['patient'])
        return output_files
Example #42
0
def run_mutect_perchrom(job, tumor_bam, normal_bam, univ_options, mutect_options, chrom):
    """
    This module will run mutect on the DNA bams

    ARGUMENTS
    1. tumor_bam: REFER ARGUMENTS of spawn_mutect()
    2. normal_bam: REFER ARGUMENTS of spawn_mutect()
    3. univ_options: REFER ARGUMENTS of spawn_mutect()
    4. mutect_options: REFER ARGUMENTS of spawn_mutect()
    5. chrom: String containing chromosome name with chr appended

    RETURN VALUES
    1. output_files: Dict of results of mutect for chromosome
            output_files
              |- 'mutect_CHROM.vcf': <JSid>
              +- 'mutect_CHROM.out': <JSid>

    This module corresponds to node 12 on the tree
    """
    job.fileStore.logToMaster('Running mutect on %s:%s' % (univ_options['patient'], chrom))
    work_dir = os.getcwd()
    input_files = {
        'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
        'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
        'normal.bam': normal_bam['normal_dna_fix_pg_sorted.bam'],
        'normal.bam.bai': normal_bam['normal_dna_fix_pg_sorted.bam.bai'],
        'genome.fa.tar.gz': mutect_options['genome_fasta'],
        'genome.fa.fai.tar.gz': mutect_options['genome_fai'],
        'genome.dict.tar.gz': mutect_options['genome_dict'],
        'cosmic.vcf.tar.gz': mutect_options['cosmic_vcf'],
        'cosmic.vcf.idx.tar.gz': mutect_options['cosmic_idx'],
        'dbsnp.vcf.gz': mutect_options['dbsnp_vcf'],
        'dbsnp.vcf.idx.tar.gz': mutect_options['dbsnp_idx']}
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
    # dbsnp.vcf should be bgzipped, but all others should be tar.gz'd
    input_files['dbsnp.vcf'] = gunzip(input_files['dbsnp.vcf.gz'])
    for key in ('genome.fa', 'genome.fa.fai', 'genome.dict', 'cosmic.vcf', 'cosmic.vcf.idx',
                'dbsnp.vcf.idx'):
        input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    mutout = ''.join([work_dir, '/', chrom, '.out'])
    mutvcf = ''.join([work_dir, '/', chrom, '.vcf'])
    parameters = ['-R', input_files['genome.fa'],
                  '--cosmic', input_files['cosmic.vcf'],
                  '--dbsnp', input_files['dbsnp.vcf'],
                  '--input_file:normal', input_files['normal.bam'],
                  '--input_file:tumor', input_files['tumor.bam'],
                  # '--tumor_lod', str(10),
                  # '--initial_tumor_lod', str(4.0),
                  '-L', chrom,
                  '--out', docker_path(mutout),
                  '--vcf', docker_path(mutvcf)
                  ]
    print(parameters, file=sys.stderr)
    java_xmx = mutect_options['java_Xmx'] if mutect_options['java_Xmx'] \
        else univ_options['java_Xmx']
    docker_call(tool='mutect:1.1.7', tool_parameters=parameters, work_dir=work_dir,
                dockerhub=univ_options['dockerhub'], java_opts=java_xmx)
    export_results(job, mutvcf, univ_options, subfolder='mutations/mutect')
    output_file = job.fileStore.writeGlobalFile(mutvcf)
    return output_file
Example #43
0
def run_filter_radia(job, bams, radia_file, univ_options, radia_options,
                     chrom):
    """
    Run filterradia on the RADIA output.

    :param dict bams: Dict of bam and bai for tumor DNA-Seq, normal DNA-Seq and tumor RNA-Seq
    :param toil.fileStore.FileID radia_file: The vcf from runnning RADIA
    :param dict univ_options: Dict of universal options used by almost all tools
    :param dict radia_options: Options specific to RADIA
    :param str chrom: Chromosome to process
    :return: fsID for the filtered chromsome vcf
    :rtype: toil.fileStore.FileID
    """
    work_dir = os.getcwd()
    input_files = {
        'rna.bam': bams['tumor_rna'],
        'rna.bam.bai': bams['tumor_rnai'],
        'tumor.bam': bams['tumor_dna'],
        'tumor.bam.bai': bams['tumor_dnai'],
        'normal.bam': bams['normal_dna'],
        'normal.bam.bai': bams['normal_dnai'],
        'radia.vcf': radia_file,
        'genome.fa.tar.gz': radia_options['genome_fasta'],
        'genome.fa.fai.tar.gz': radia_options['genome_fai'],
        'cosmic_beds': radia_options['cosmic_beds'],
        'dbsnp_beds': radia_options['dbsnp_beds'],
        'retrogene_beds': radia_options['retrogene_beds'],
        'pseudogene_beds': radia_options['pseudogene_beds'],
        'gencode_beds': radia_options['gencode_beds']
    }
    input_files = get_files_from_filestore(job,
                                           input_files,
                                           work_dir,
                                           docker=False)

    for key in ('genome.fa', 'genome.fa.fai'):
        input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
    for key in ('cosmic_beds', 'dbsnp_beds', 'retrogene_beds',
                'pseudogene_beds', 'gencode_beds'):
        input_files[key] = untargz(input_files[key], work_dir)

    input_files = {key: docker_path(path) for key, path in input_files.items()}

    filterradia_log = ''.join(
        [work_dir, '/radia_filtered_', chrom, '_radia.log'])
    parameters = [
        univ_options['patient'],  # shortID
        chrom.lstrip('chr'),
        input_files['radia.vcf'],
        '/data',
        '/home/radia/scripts',
        '-d',
        input_files['dbsnp_beds'],
        '-r',
        input_files['retrogene_beds'],
        '-p',
        input_files['pseudogene_beds'],
        '-c',
        input_files['cosmic_beds'],
        '-t',
        input_files['gencode_beds'],
        '--noSnpEff',
        '--noBlacklist',
        '--noTargets',
        '--noRnaBlacklist',
        '-f',
        input_files['genome.fa'],
        '--log=INFO',
        '-g',
        docker_path(filterradia_log)
    ]
    docker_call(tool='filterradia',
                tool_parameters=parameters,
                work_dir=work_dir,
                dockerhub=univ_options['dockerhub'],
                tool_version=radia_options['version'])
    output_file = ''.join([work_dir, '/', chrom, '.vcf'])
    os.rename(
        ''.join([work_dir, '/', univ_options['patient'], '_', chrom, '.vcf']),
        output_file)
    output_fsid = job.fileStore.writeGlobalFile(output_file)
    export_results(job,
                   output_fsid,
                   output_file,
                   univ_options,
                   subfolder='mutations/radia')
    job.fileStore.logToMaster('Ran filter-radia on %s:%s successfully' %
                              (univ_options['patient'], chrom))
    return output_fsid
Example #44
0
def run_transgene(job,
                  snpeffed_file,
                  rna_bam,
                  univ_options,
                  transgene_options,
                  tumor_dna_bam=None,
                  fusion_calls=None):
    """
    Run transgene on an input snpeffed vcf file and return the peptides for MHC prediction.


    :param toil.fileStore.FileID snpeffed_file: fsID for snpeffed vcf
    :param dict rna_bam: The dict of bams returned by running star
    :param dict univ_options: Dict of universal options used by almost all tools
    :param dict transgene_options: Options specific to Transgene
    :param dict tumor_dna_bam: The dict of bams returned by running bwa
    :return: A dictionary of 9 files (9-, 10-, and 15-mer peptides each for Tumor and Normal and the
             corresponding .map files for the 3 Tumor fastas)
             output_files:
                 |- 'transgened_normal_10_mer_snpeffed.faa': fsID
                 |- 'transgened_normal_15_mer_snpeffed.faa': fsID
                 |- 'transgened_normal_9_mer_snpeffed.faa': fsID
                 |- 'transgened_tumor_10_mer_snpeffed.faa': fsID
                 |- 'transgened_tumor_10_mer_snpeffed.faa.map': fsID
                 |- 'transgened_tumor_15_mer_snpeffed.faa': fsID
                 |- 'transgened_tumor_15_mer_snpeffed.faa.map': fsID
                 |- 'transgened_tumor_9_mer_snpeffed.faa': fsID
                 +- 'transgened_tumor_9_mer_snpeffed.faa.map': fsID
    :rtype: dict
    """
    work_dir = os.getcwd()
    input_files = {
        'snpeffed_muts.vcf': snpeffed_file,
        'rna.bam': rna_bam['rna_genome']['rna_genome_sorted.bam'],
        'rna.bam.bai': rna_bam['rna_genome']['rna_genome_sorted.bam.bai'],
        'pepts.fa.tar.gz': transgene_options['gencode_peptide_fasta']
    }
    if tumor_dna_bam is not None:
        input_files.update({
            'tumor_dna.bam':
            tumor_dna_bam['tumor_dna_fix_pg_sorted.bam'],
            'tumor_dna.bam.bai':
            tumor_dna_bam['tumor_dna_fix_pg_sorted.bam.bai'],
        })
    input_files = get_files_from_filestore(job,
                                           input_files,
                                           work_dir,
                                           docker=False)
    input_files['pepts.fa'] = untargz(input_files['pepts.fa.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    parameters = [
        '--peptides', input_files['pepts.fa'], '--snpeff',
        input_files['snpeffed_muts.vcf'], '--rna_file', input_files['rna.bam'],
        '--prefix', 'transgened', '--pep_lens', '9,10,15', '--cores',
        str(transgene_options['n'])
    ]

    if tumor_dna_bam is not None:
        parameters.extend(['--dna_file', input_files['tumor_dna.bam']])

    if fusion_calls:
        fusion_files = {
            'fusion_calls': fusion_calls,
            'transcripts.fa.tar.gz':
            transgene_options['gencode_transcript_fasta'],
            'annotation.gtf.tar.gz':
            transgene_options['gencode_annotation_gtf'],
            'genome.fa.tar.gz': transgene_options['genome_fasta']
        }

        fusion_files = get_files_from_filestore(job,
                                                fusion_files,
                                                work_dir,
                                                docker=False)
        fusion_files['transcripts.fa'] = untargz(
            fusion_files['transcripts.fa.tar.gz'], work_dir)
        fusion_files['genome.fa'] = untargz(fusion_files['genome.fa.tar.gz'],
                                            work_dir)
        fusion_files['annotation.gtf'] = untargz(
            fusion_files['annotation.gtf.tar.gz'], work_dir)
        fusion_files = {
            key: docker_path(path)
            for key, path in fusion_files.items()
        }
        parameters += [
            '--transcripts', fusion_files['transcripts.fa'], '--fusions',
            fusion_files['fusion_calls'], '--genome',
            fusion_files['genome.fa'], '--annotation',
            fusion_files['annotation.gtf']
        ]

    docker_call(tool='transgene',
                tool_parameters=parameters,
                work_dir=work_dir,
                dockerhub=univ_options['dockerhub'],
                tool_version=transgene_options['version'])

    output_files = defaultdict()
    for peplen in ['9', '10', '15']:
        for tissue_type in ['tumor', 'normal']:
            pepfile = '_'.join(
                ['transgened', tissue_type, peplen, 'mer_snpeffed.faa'])
            output_files[pepfile] = job.fileStore.writeGlobalFile(
                os.path.join(work_dir, pepfile))
            export_results(job,
                           output_files[pepfile],
                           pepfile,
                           univ_options,
                           subfolder='peptides')
        mapfile = '_'.join(
            ['transgened_tumor', peplen, 'mer_snpeffed.faa.map'])
        output_files[mapfile] = job.fileStore.writeGlobalFile(
            os.path.join(work_dir, mapfile))
        export_results(job,
                       output_files[mapfile],
                       mapfile,
                       univ_options,
                       subfolder='peptides')
    os.rename('transgened_transgened.vcf', 'mutations.vcf')
    export_results(job,
                   job.fileStore.writeGlobalFile('mutations.vcf'),
                   'mutations.vcf',
                   univ_options,
                   subfolder='mutations/transgened')

    job.fileStore.logToMaster('Ran transgene on %s successfully' %
                              univ_options['patient'])
    return output_files
Example #45
0
def run_bwa(job, fastqs, sample_type, univ_options, bwa_options):
    """
    This module aligns the SAMPLE_TYPE dna fastqs to the reference

    ARGUMENTS -- <ST> depicts the sample type. Substitute with 'tumor'/'normal'
    1. fastqs: Dict of list of input WGS/WXS fastqs
         fastqs
              +- '<ST>_dna': [<JSid for 1.fastq> , <JSid for 2.fastq>]
    2. sample_type: string of 'tumor_dna' or 'normal_dna'
    3. univ_options: Dict of universal arguments used by almost all tools
         univ_options
                +- 'dockerhub': <dockerhub to use>
    4. bwa_options: Dict of parameters specific to bwa
         bwa_options
              |- 'tool_index': <JSid for the bwa index tarball>
              +- 'n': <number of threads to allocate>

    RETURN VALUES
    1. output_files: Dict of aligned bam + reference (nested return)
         output_files
             |- '<ST>_fix_pg_sorted.bam': <JSid>
             +- '<ST>_fix_pg_sorted.bam.bai': <JSid>

    This module corresponds to nodes 3 and 4 on the tree
    """
    job.fileStore.logToMaster('Running bwa on %s:%s' %
                              (univ_options['patient'], sample_type))
    work_dir = os.getcwd()
    input_files = {
        'dna_1.fastq': fastqs[0],
        'dna_2.fastq': fastqs[1],
        'bwa_index.tar.gz': bwa_options['tool_index']
    }
    input_files = get_files_from_filestore(job,
                                           input_files,
                                           work_dir,
                                           docker=False)
    # Handle gzipped file
    gz = '.gz' if is_gzipfile(input_files['dna_1.fastq']) else ''
    if gz:
        for read_file in 'dna_1.fastq', 'dna_2.fastq':
            os.symlink(read_file, read_file + gz)
            input_files[read_file + gz] = input_files[read_file] + gz
    # Untar the index
    input_files['bwa_index'] = untargz(input_files['bwa_index.tar.gz'],
                                       work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    parameters = [
        'mem',
        '-t',
        str(bwa_options['n']),
        '-v',
        '1',  # Don't print INFO messages to the stderr
        '/'.join([input_files['bwa_index'], 'hg19']),
        input_files['dna_1.fastq' + gz],
        input_files['dna_2.fastq' + gz]
    ]
    with open(''.join([work_dir, '/', sample_type, '_aligned.sam']),
              'w') as samfile:
        docker_call(tool='bwa',
                    tool_parameters=parameters,
                    work_dir=work_dir,
                    dockerhub=univ_options['dockerhub'],
                    outfile=samfile)
    # samfile.name retains the path info
    output_file = job.fileStore.writeGlobalFile(samfile.name)
    return output_file
Example #46
0
def run_radia_perchrom(job, bams, univ_options, radia_options, chrom):
    """
    This module will run radia on the RNA and DNA bams

    ARGUMENTS
    1. bams: Dict of bams and their indexes
        bams
         |- 'tumor_rna': <JSid>
         |- 'tumor_rnai': <JSid>
         |- 'tumor_dna': <JSid>
         |- 'tumor_dnai': <JSid>
         |- 'normal_dna': <JSid>
         +- 'normal_dnai': <JSid>
    2. univ_options: Dict of universal arguments used by almost all tools
         univ_options
                +- 'dockerhub': <dockerhub to use>
    3. radia_options: Dict of parameters specific to radia
         radia_options
              |- 'dbsnp_vcf': <JSid for dnsnp vcf file>
              +- 'genome': <JSid for genome fasta file>
    4. chrom: String containing chromosome name with chr appended

    RETURN VALUES
    1. Dict of filtered radia output vcf and logfile (Nested return)
        |- 'radia_filtered_CHROM.vcf': <JSid>
        +- 'radia_filtered_CHROM_radia.log': <JSid>
    """
    job.fileStore.logToMaster('Running radia on %s:%s' % (univ_options['patient'], chrom))
    work_dir = os.getcwd()
    input_files = {
        'rna.bam': bams['tumor_rna'],
        'rna.bam.bai': bams['tumor_rnai'],
        'tumor.bam': bams['tumor_dna'],
        'tumor.bam.bai': bams['tumor_dnai'],
        'normal.bam': bams['normal_dna'],
        'normal.bam.bai': bams['normal_dnai'],
        'genome.fa.tar.gz': radia_options['genome_fasta'],
        'genome.fa.fai.tar.gz': radia_options['genome_fai']}
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)

    for key in ('genome.fa', 'genome.fa.fai'):
        input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    radia_output = ''.join([work_dir, '/radia_', chrom, '.vcf'])
    radia_log = ''.join([work_dir, '/radia_', chrom, '_radia.log'])
    parameters = [univ_options['patient'],  # shortID
                  chrom,
                  '-n', input_files['normal.bam'],
                  '-t', input_files['tumor.bam'],
                  '-r', input_files['rna.bam'],
                  ''.join(['--rnaTumorFasta=', input_files['genome.fa']]),
                  '-f', input_files['genome.fa'],
                  '-o', docker_path(radia_output),
                  '-i', 'hg19_M_rCRS',
                  '-m', input_files['genome.fa'],
                  '-d', '*****@*****.**',
                  '-q', 'Illumina',
                  '--disease', 'CANCER',
                  '-l', 'INFO',
                  '-g', docker_path(radia_log)]
    docker_call(tool='radia', tool_parameters=parameters, work_dir=work_dir,
                dockerhub=univ_options['dockerhub'])
    output_file = job.fileStore.writeGlobalFile(radia_output)
    return output_file
Example #47
0
def filter_somaticsniper(job, tumor_bam, somaticsniper_output, tumor_pileup, univ_options,
                         somaticsniper_options):
    """
    This module will filter the somaticsniper output for a single chromosome

    :param toil.Job job: Job
    :param dict tumor_bam: Tumor bam file and it's bai
    :param str somaticsniper_output: jsID from somatic sniper
    :param str tumor_pileup: jsID for pileup file for this chromsome
    :param dict univ_options: Universal options
    :param dict somaticsniper_options: Options specific to Somatic Sniper
    :returns: filtered chromsome vcf
    :rtype: str
    """
    job.fileStore.logToMaster('Filtering somaticsniper for %s' % univ_options['patient'])
    work_dir = os.getcwd()
    input_files = {
        'tumor.bam': tumor_bam['tumor_dna_fix_pg_sorted.bam'],
        'tumor.bam.bai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'],
        'input.vcf': somaticsniper_output,
        'pileup.txt': tumor_pileup,
        'genome.fa.tar.gz': somaticsniper_options['genome_fasta'],
        'genome.fa.fai.tar.gz': somaticsniper_options['genome_fai']}
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)

    for key in ('genome.fa', 'genome.fa.fai'):
        input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
    input_files = {key: docker_path(path) for key, path in input_files.items()}

    # Run snpfilter.pl
    parameters = ['snpfilter.pl',
                  '--snp-file', input_files['input.vcf'],
                  '--indel-file', input_files['pileup.txt']]
    # Creates /data/input.vcf.SNPfilter
    docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir,
                dockerhub=univ_options['dockerhub'])

    # Run prepare_for_readcount.pl
    parameters = ['prepare_for_readcount.pl',
                  '--snp-file', input_files['input.vcf'] + '.SNPfilter']
    # Creates /data/input.vcf.SNPfilter.pos
    docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir,
                dockerhub=univ_options['dockerhub'])

    # Run  bam-readcount
    parameters = ['-b', '15',
                  '-f', input_files['genome.fa'],
                  '-l', input_files['input.vcf'] + '.SNPfilter.pos',
                  '-w', '1',
                  input_files['tumor.bam']]
    # Creates the read counts file
    with open(os.path.join(work_dir, 'readcounts.txt'), 'w') as readcounts_file:
        docker_call(tool='bam-readcount', tool_parameters=parameters, work_dir=work_dir,
                    dockerhub=univ_options['dockerhub'], outfile=readcounts_file)

    # Run fpfilter.pl
    parameters = ['fpfilter.pl',
                  '--snp-file', input_files['input.vcf'] + '.SNPfilter',
                  '--readcount-file', docker_path(readcounts_file.name)]

    # Creates input.vcf.SNPfilter.fp_pass and input.vcf.SNPfilter.fp_fail
    docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir,
                dockerhub=univ_options['dockerhub'])

    # Run highconfidence.pl
    parameters = ['highconfidence.pl',
                  '--snp-file', input_files['input.vcf'] + '.SNPfilter.fp_pass']

    # Creates input.vcf.SNPfilter.fp_pass.hc
    docker_call(tool='somaticsniper-addons', tool_parameters=parameters, work_dir=work_dir,
                dockerhub=univ_options['dockerhub'])

    outfile = job.fileStore.writeGlobalFile(os.path.join(os.getcwd(),
                                                         'input.vcf.SNPfilter.fp_pass.hc'))
    return outfile
def assess_itx_resistance(job, gene_expression, univ_options, reports_options):
    """
    Assess the prevalence of the various genes in various cancer pathways and return a report in the txt
    format.

    :param toil.fileStore.FileID gene_expression: fsID for the rsem gene expression file
    :param dict univ_options: Dict of universal options used by almost all tools
    :param dict reports_options: Options specific to reporting modules
    :return: The fsID for the itx resistance report file
    :rtype: toil.fileStore.FileID
    """

    work_dir = os.getcwd()
    tumor_type = univ_options['tumor_type']

    # Get the input files
    input_files = {
        'rsem_quant.tsv':
        gene_expression,
        'itx_resistance.tsv.tar.gz':
        reports_options['itx_resistance_file'],
        'immune_resistance_pathways.json.tar.gz':
        reports_options['immune_resistance_pathways_file']
    }

    input_files = get_files_from_filestore(job,
                                           input_files,
                                           work_dir,
                                           docker=False)

    input_files['itx_resistance.tsv'] = untargz(
        input_files['itx_resistance.tsv.tar.gz'], work_dir)
    input_files['immune_resistance_pathways.json'] = untargz(
        input_files['immune_resistance_pathways.json.tar.gz'], work_dir)

    full_data = pd.read_table(input_files['itx_resistance.tsv'], index_col=0)
    # Read pathways descriptions and cancer pathway data
    with open(input_files['immune_resistance_pathways.json']) as json_file:
        json_data = json.load(json_file)

    # Read patient file
    patient_df = pd.read_csv('rsem_quant.tsv',
                             sep=' ',
                             delimiter='\t',
                             header='infer',
                             index_col=0)
    patient_df.index = (patient_df.index).str.replace('\\..*$', '')

    with open('immunotherapy_resistance_report.txt', 'w') as report_file:
        # Check if data exsits for specified tumor type
        try:
            pathways = json_data['Cancer_to_pathway'][tumor_type]
        except KeyError:
            print('Data not available for ' + tumor_type, file=report_file)
        else:
            # If data exists, write a report
            for pathway in pathways:
                up_is_good = json_data['Pathways'][pathway]['up_is_good']

                if up_is_good:
                    comp_fn = lambda x, y: x >= y
                else:
                    comp_fn = lambda x, y: x < y

                # Describe pathway and genes for it
                print('Pathway: ' + pathway + '\n', file=report_file)
                print('Papers: ' + json_data['Pathways'][pathway]['paper'],
                      file=report_file)
                description = json_data['Pathways'][pathway]['description']
                print('Description of pathway:\n' +
                      textwrap.fill(description, width=100),
                      file=report_file)
                print('Pathway genes: ', file=report_file)
                print('\t{:10}{:<20}{:<20}{:<12}'.format(
                    'Gene', 'GTEX Median', 'TCGA N Median', 'Observed'),
                      file=report_file)
                status = []
                # Write TCGA, GTEX, and observed values
                for gene in json_data['Pathways'][pathway]['genes']:
                    gtex = '{0:.2f}'.format(
                            float(full_data.loc[gene, TCGAToGTEx[tumor_type]])) \
                            if gene in full_data.index else 'NA'
                    tcga = '{0:.2f}'.format(
                            float(full_data.loc[gene, tumor_type + ' normal'])) \
                            if gene in full_data.index else 'NA'
                    tpm_value = '{0:.2f}'.format(float(patient_df.loc[gene, 'TPM'])) \
                                if gene in patient_df.index else 'NA'
                    ensg = json_data['Pathways'][pathway]['genes'][gene]
                    print('\t{:10}{:<20}{:<20}{:<12}'.format(
                        ensg, gtex, tcga, tpm_value),
                          file=report_file)
                    if gtex != 'NA' and tpm_value != 'NA':
                        tcga_bool = comp_fn(float(tpm_value), float(tcga))
                        gtex_bool = comp_fn(float(tpm_value), float(gtex))
                        status.append(tcga_bool and gtex_bool)
                    else:
                        status.append(False)

                # Based on the number of genes with expression values above normal, assess the status
                print('Status: ' +
                      json_data['Pathways'][pathway]['status'][str(
                          sum(status) >= 0.75 * len(status))] + '\n',
                      file=report_file)

    output_file = job.fileStore.writeGlobalFile(report_file.name)
    export_results(job,
                   output_file,
                   report_file.name,
                   univ_options,
                   subfolder='reports')

    job.fileStore.logToMaster(
        'Ran create immunotherapy resistance report on %s successfully' %
        univ_options['patient'])
    return output_file
Example #49
0
def assess_mhc_genes(job, gene_expression, rna_haplotype, univ_options, reports_options):
    """
    Assess the prevalence of the various genes in the MHC pathway and return a report in the tsv
    format.

    :param toil.fileStore.FileID gene_expression: fsID for the rsem gene expression file
    :param toil.fileStore.FileID|None rna_haplotype: fsID for the RNA PHLAT file
    :param dict univ_options: Dict of universal options used by almost all tools
    :param dict reports_options: Options specific to reporting modules
    :return: The fsID for the mhc pathway report file
    :rtype: toil.fileStore.FileID
    """

    work_dir = os.getcwd()
    # Take file parameters for both TCGA and GTEX files
    tumor_type = univ_options['tumor_type']
    b_types = {
    'tcga': tumor_type + " normal",
    'gtex': TCGAToGTEx[tumor_type] if tumor_type in TCGAToGTEx else "NA"}

    input_files = {
        'rsem_quant.tsv': gene_expression,
        'mhc_pathways.tsv.tar.gz': reports_options['mhc_pathways_file']}
    if rna_haplotype is not None:
        input_files['rna_haplotype.sum'] = rna_haplotype
    input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)

    input_files['mhc_pathways.tsv'] = untargz(input_files['mhc_pathways.tsv.tar.gz'], work_dir)

    # Read the background file

    background_df = pd.read_table(input_files['mhc_pathways.tsv'], index_col=0, header=0)

    # Parse the rna phlat file
    if rna_haplotype is not None:
        with open(input_files['rna_haplotype.sum']) as rna_mhc:
            mhc_alleles = {'HLA_A': [], 'HLA_B': [], 'HLA_C': [], 'HLA_DPA': [], 'HLA_DQA': [],
                           'HLA_DPB': [], 'HLA_DQB': [], 'HLA_DRB': []}
            mhc_alleles = parse_phlat_file(rna_mhc, mhc_alleles)

    # Read the patient gene values into a dictionary
    gene_expressions = pd.read_table(input_files['rsem_quant.tsv'], index_col=0, header=0)
    gene_expressions = Counter({x.split('.')[0]: y for x, y in gene_expressions['TPM'].to_dict().items()})
    # Print the report
    roles = {x for x in background_df['Roles'].values if ',' not in x}
    with open('mhc_pathway_report.txt', 'w') as mpr:
        for role in roles:
            role_df = background_df[background_df['Roles'].str.contains(role)]
            print(role.center(90, ' '), file=mpr)
            print(
                "{:12}{:<12}{:<17}{:<12}{:<20}{:<17}\n".format('Gene', 'Observed', 'Threshold_GTEX',
                                                                 'Result', 'Threshold_TCGA_N', 'Result'),
                file=mpr)
            # If tumor_type in TCGAToGTEx.keys():
            if role == 'MHCI loading':
                for mhci_allele in 'HLA_A', 'HLA_B', 'HLA_C':
                    if rna_haplotype is not None:
                        num_alleles = len(mhc_alleles[mhci_allele])
                        result = ('FAIL' if num_alleles == 0 else
                                  'LOW' if num_alleles == 1 else
                                  'PASS')
                    else:
                        result = num_alleles = 'NA'
                    print("{:12}{:<12}{:<17}{:<12}{:<20}{:<17}".format(mhci_allele, 2,
                                                                           num_alleles, result,
                                                                           2, result), file=mpr)
            elif role == 'MHCII loading':
                for mhcii_allele in ('HLA_DQA', 'HLA_DQB', 'HLA_DRB'):
                    if rna_haplotype is not None:
                        num_alleles = len(mhc_alleles[mhcii_allele])
                        result = ('FAIL' if num_alleles == 0 else
                                  'LOW' if num_alleles == 1 else
                                  'PASS')
                    else:
                        result = num_alleles = 'NA'
                    print(
                        "{:12}{:<12}{:<17}{:<12}{:<20}{:<17}".format(mhcii_allele, 2, num_alleles,
                                                                     result, 2, result), file=mpr)

            for ensg in role_df.index:
                ensgName = background_df.ix[ensg, 'Name']
                b_vals = {}
                for bkg in b_types:
                    val = "{0:.2f}".format(role_df.loc[ensg].get(b_types[bkg], default='NA'))
                    result = ('NA' if val == 'NA' else
                              'LOW' if float(val) >= float(gene_expressions[ensg]) else
                              'PASS')
                    b_vals[bkg] = val, result
                print(
                    "{:12}{:<12}{:<17}{:<12}{:<20}{:<17}".format(ensgName, float(gene_expressions[ensg]),
                                                                 b_vals['gtex'][0], b_vals['gtex'][1],
                                                                 b_vals['tcga'][0], b_vals['tcga'][1]),
                    file=mpr)

            print('\n', file=mpr)

    output_file = job.fileStore.writeGlobalFile(mpr.name)
    export_results(job, output_file, mpr.name, univ_options, subfolder='reports')
    job.fileStore.logToMaster('Ran mhc gene assessment on %s successfully'
                              % univ_options['patient'])
    return output_file