Ejemplo n.º 1
0
    def testMixedAnnotation(self):
        """Test that the COSMIC datasource can retrieve entries by both gp and gpp."""
        tabixDir = "testdata/small_cosmic_with_gp_and_gpp/"
        cosmicDS = Cosmic(
            src_file=tabixDir + "small_cosmic_trimmed_for_sorting.txt.tbi.gz",
            title="Cosmic",
            version="test",
            gpp_tabix_file=tabixDir +
            "small_cosmic_trimmed_for_sorting.txt.tbi.byAA.sorted.tsv.gz")

        # These values are not taken from a real world scenario, but are cooked for this test.
        # Line 9 should get picked up genomic coords
        # Lines 7,8 should get picked up by the protein position
        m = MutationDataFactory.default_create()
        m.createAnnotation("gene", "A2M")
        m.createAnnotation("transcript_protein_position_start", "1300")
        m.createAnnotation("transcript_protein_position_end", "1400")
        m.chr = '12'
        m.start = '9227220'
        m.end = '9227230'
        m = cosmicDS.annotate_mutation(m)

        self.assertTrue(m['COSMIC_n_overlapping_mutations'] == '3')
        self.assertTrue(
            m['COSMIC_overlapping_mutation_AAs'].find('1229') != -1,
            "Could not find the entry specified by genomic coords.")
        self.assertTrue(
            m['COSMIC_overlapping_primary_sites'] == "lung(3)",
            "Did not have the correct primary sites annotation (lung(3)): " +
            m['COSMIC_overlapping_primary_sites'])
Ejemplo n.º 2
0
    def testBasicAnnotate(self):
        '''Test that the COSMIC datasource can be initialized with two index files (gp and gpp) and a simple annotation performed'''
        tabixDir = "testdata/small_cosmic_with_gp_and_gpp/"
        cosmicDS = Cosmic(src_file=tabixDir + "small_cosmic_trimmed_for_sorting.txt.tbi.gz", title="Cosmic", version="test", gpp_tabix_file= tabixDir + "small_cosmic_trimmed_for_sorting.txt.tbi.byAA.sorted.tsv.gz")

        # These values are not taken from a real world scenario, but are cooked for this test.
        m = MutationDataFactory.default_create()
        m.createAnnotation("gene", "EGFR")
        m.createAnnotation("transcript_protein_position_start", "747")
        m.createAnnotation("transcript_protein_position_end", "747")
        m.chr = '7'
        m.start = '55259560'
        m.end = '55259560'
        m = cosmicDS.annotate_mutation(m)

        self.assertTrue(m['COSMIC_n_overlapping_mutations'] == '2')
Ejemplo n.º 3
0
    def testBasicAnnotate(self):
        '''Test that the COSMIC datasource can be initialized with two index files (gp and gpp) and a simple annotation performed'''
        tabixDir = "testdata/small_cosmic_with_gp_and_gpp/"
        cosmicDS = Cosmic(src_file=tabixDir + "small_cosmic_trimmed_for_sorting.txt.tbi.gz", title="Cosmic", version="test", gpp_tabix_file= tabixDir + "small_cosmic_trimmed_for_sorting.txt.tbi.byAA.sorted.tsv.gz")

        # These values are not taken from a real world scenario, but are cooked for this test.
        m = MutationDataFactory.default_create()
        m.createAnnotation("gene", "EGFR")
        m.createAnnotation("transcript_protein_position_start", "747")
        m.createAnnotation("transcript_protein_position_end", "747")
        m.chr = '7'
        m.start = '55259560'
        m.end = '55259560'
        m = cosmicDS.annotate_mutation(m)

        self.assertTrue(m['COSMIC_n_overlapping_mutations'] == '2')
Ejemplo n.º 4
0
    def testMixedAnnotation(self):
        """Test that the COSMIC datasource can retrieve entries by both gp and gpp."""
        tabixDir = "testdata/small_cosmic_with_gp_and_gpp/"
        cosmicDS = Cosmic(src_file=tabixDir + "small_cosmic_trimmed_for_sorting.txt.tbi.gz", title="Cosmic", version="test", gpp_tabix_file= tabixDir + "small_cosmic_trimmed_for_sorting.txt.tbi.byAA.sorted.tsv.gz")

        # These values are not taken from a real world scenario, but are cooked for this test.
        # Line 9 should get picked up genomic coords
        # Lines 7,8 should get picked up by the protein position
        m = MutationDataFactory.default_create()
        m.createAnnotation("gene", "A2M")
        m.createAnnotation("transcript_protein_position_start", "1300")
        m.createAnnotation("transcript_protein_position_end", "1400")
        m.chr = '12'
        m.start = '9227220'
        m.end = '9227230'
        m = cosmicDS.annotate_mutation(m)

        self.assertTrue(m['COSMIC_n_overlapping_mutations'] == '3')
        self.assertTrue(m['COSMIC_overlapping_mutation_AAs'].find('1229') != -1, "Could not find the entry specified by genomic coords.")
        self.assertTrue(m['COSMIC_overlapping_primary_sites'] == "lung(3)", "Did not have the correct primary sites annotation (lung(3)): " + m['COSMIC_overlapping_primary_sites'])
Ejemplo n.º 5
0
    def createDatasourceFromConfigParser(configParser, leafDir):
        """
        configParser -- config parser instance from the config file in the leafdir. For information on config file format/conventions see (TODO)
        
        leafDir -- contains the file and necessary files (post indexing and install steps) to instantiate a datasource.

        """
        result = None
        # Determine the type
        dsType = configParser.get("general", "type")
        
        # TODO: Replace these if statements with something a bit more robust, such as a proper dependency injection framework
        filePrefix = leafDir + "/"
        if dsType == "gaf":
            gaf_fname = filePrefix + configParser.get('general', 'gaf_fname')
            gaf_transcript_sequences_fname = filePrefix + configParser.get('general', 'gaf_transcript_seqs_fname')
            result = Gaf(gaf_fname, gaf_transcript_sequences_fname, title=configParser.get("general", "title"), version=configParser.get("general", "version"), protocol=configParser.get("general", "protocol"))
        elif dsType == "dbsnp":
            result = dbSNP(filePrefix + configParser.get('general', 'src_file'), title=configParser.get('general', 'title'), version=configParser.get('general', 'version'))
        elif dsType == "ensembl":
            result = EnsemblTranscriptDatasource(filePrefix + configParser.get('general', 'src_file'),
                                                 title=configParser.get('general', 'title'),
                                                 version=configParser.get('general', 'version'),
                                                 tx_filter=configParser.get('general', 'transcript_filter'))
        elif dsType == "cosmic":
            result = Cosmic(src_file=filePrefix + configParser.get('general', 'src_file'), version=configParser.get('general', 'version'), gpp_tabix_file=filePrefix + configParser.get('general', 'gpp_src_file'))
        elif dsType == 'ref':
            if configParser.has_option('general', 'windowSizeRef'):
                window_size = configParser.get('general', 'windowSizeRef')
            else:
                window_size = 10
            result = ReferenceDatasource(filePrefix, title=configParser.get("general", "title"), version=configParser.get('general', 'version'), windowSizeRef=window_size)
        elif dsType == 'gene_tsv':
            result = GenericGeneDatasource(src_file=filePrefix + configParser.get('general', 'src_file'), title=configParser.get("general", "title"), version=configParser.get('general', 'version'), geneColumnName=configParser.get('general', 'gene_col'))
        elif dsType == 'transcript_tsv':
            result = GenericTranscriptDatasource(src_file=filePrefix + configParser.get('general', 'src_file'), title=configParser.get("general", "title"), version=configParser.get('general', 'version'), geneColumnName=configParser.get('general', 'transcript_col'))
        elif dsType == 'vc_tsv':
            result = GenericVariantClassificationDatasource(src_file=filePrefix + configParser.get('general', 'src_file'), title=configParser.get("general", "title"), version=configParser.get('general', 'version'), geneColumnName=configParser.get('general', 'vc_col'))
        elif dsType == 'gp_tsv':
            result = GenericGenomicPositionDatasource(src_file=filePrefix + configParser.get('general', 'src_file'), title=configParser.get("general", "title"), version=configParser.get('general', 'version'), gpColumnNames=configParser.get('general', 'genomic_position_cols'))
        elif dsType == 'gm_tsv':
            result = GenericGenomicMutationDatasource(src_file=filePrefix + configParser.get('general', 'src_file'), title=configParser.get("general", "title"), version=configParser.get('general', 'version'), gpColumnNames=configParser.get('general', 'genomic_position_cols'))
        elif dsType == 'gm_tsv_reverse_complement':
            result = GenericGenomicMutationDatasource(src_file=filePrefix + configParser.get('general', 'src_file'), title=configParser.get("general", "title"), version=configParser.get('general', 'version'), gpColumnNames=configParser.get('general', 'genomic_position_cols'), use_complementary_strand_alleles_for_negative_strand_transcripts=True)
        elif dsType == 'gpp_tsv':
            result = GenericGeneProteinPositionDatasource(src_file=filePrefix + configParser.get('general', 'src_file'),title=configParser.get("general", "title"), version=configParser.get('general', 'version'), gpColumnNames=configParser.get('general', 'gene_protein_position_cols'))
        elif dsType == "transcript_to_uniprot_aa":
            result = TranscriptToUniProtProteinPositionTransformingDatasource(title=configParser.get("general", "title"),
                                                                              version=configParser.get('general', 'version'),
                                                                              src_file="file://" + filePrefix + configParser.get('general', 'src_file'), # three slashes for sqlite
                                                                              inputPositionAnnotationName=configParser.get('general', 'inputPositionAnnotationName'),
                                                                              outputPositionAnnotationName=configParser.get('general','outputPositionAnnotationName'))
        
        elif dsType == "mock_exception":
            result = MockExceptionThrowingDatasource(title=configParser.get("general", "title"), version=configParser.get('general', 'version'))
        elif dsType == "indexed_vcf":
            result = IndexedVcfDatasource(src_file=filePrefix + configParser.get('general', 'src_file'),
                                           title=configParser.get("general", "title"),
                                           version=configParser.get('general', 'version'),
                                           match_mode=configParser.get('general', 'match_mode'))
        elif dsType == "indexed_tsv":
            columnNames = configParser.get("general", "column_names")
            columnNames = columnNames.split(",")

            annotationColumnNames = configParser.get("general", "annotation_column_names")
            annotationColumnNames = annotationColumnNames.split(",")

            indexColumnNames = configParser.get("general", "index_column_names")
            indexColumnNames = indexColumnNames.split(",")

            DatasourceFactory._log_missing_column_name_msg(columnNames, annotationColumnNames)

            columnDataTypes = dict()
            for columnName in annotationColumnNames:
                if columnName.strip() == "":
                    continue
                columnDataTypes[columnName] = configParser.get("data_types", columnName)

            result = IndexedTsvDatasource(src_file=filePrefix + configParser.get("general", "src_file"),
                                           title=configParser.get("general", "title"),
                                           version=configParser.get("general", "version"),
                                           colNames=columnNames,
                                           annotationColNames=annotationColumnNames,
                                           indexColNames=indexColumnNames,
                                           match_mode=configParser.get("general", "match_mode"),
                                           colDataTypes=columnDataTypes)

        
        elif dsType == 'bigwig':
            if not NGSLIB_INSTALLED:
                raise RuntimeError("Bigwig datasource found in db-dir but ngslib library not installed.")
            result = BigWigDatasource(src_file=filePrefix + configParser.get('general', 'src_file'), title=configParser.get("general", "title"), version=configParser.get('general', 'version'))
        else:
            raise RuntimeError('Unknown datasource type: %s' % dsType)


        hashcode = DatasourceFactory._retrieve_hash_code(leafDir)
        result.set_hashcode(hashcode)
        return result