Beispiel #1
0
def parser(databases_dir, download=True):
    config = builder_utils.get_config(config_name="goaConfig.yml",
                                      data_type='databases')
    url = config['url']
    rel_header = config['header']

    protein_mapping = mp.getMappingForEntity(entity="Protein")
    valid_proteins = list(set(protein_mapping.values))

    directory = os.path.join(databases_dir, "GOA")
    builder_utils.checkDirectory(directory)
    file_name = os.path.join(directory, url.split('/')[-1])
    if download:
        builder_utils.downloadDB(url, directory)

    annotations = parse_annotations_with_pandas(file_name, valid_proteins)

    builder_utils.remove_directory(directory)

    return annotations, rel_header
Beispiel #2
0
def parser(databases_directory, download=True):
    config = builder_utils.get_config(config_name="reactomeConfig.yml",
                                      data_type='databases')
    urls = config['reactome_urls']
    entities = set()
    relationships = defaultdict(set)
    entities_header = config['pathway_header']
    relationships_headers = config['relationships_header']
    directory = os.path.join(databases_directory, "Reactome")
    builder_utils.checkDirectory(directory)
    metabolite_mapping = mp.getMappingForEntity("Metabolite")
    #drug_mapping = mp.getMappingForEntity("Drug")

    for dataset in urls:
        url = urls[dataset]
        file_name = url.split('/')[-1]
        if download:
            builder_utils.downloadDB(url, directory)
        f = os.path.join(directory, file_name)
        with open(f, 'r') as rf:
            if dataset == "pathway":
                entities = parsePathways(config, databases_directory, rf)
            elif dataset == "hierarchy":
                relationships[("pathway",
                               "has_parent")] = parsePathwayHierarchy(rf)
            elif dataset == "protein":
                relationships[(
                    dataset,
                    "annotated_to_pathway")] = parsePathwayRelationships(
                        config, rf)
            elif dataset == "metabolite":
                relationships[(
                    dataset,
                    "annotated_to_pathway")] = parsePathwayRelationships(
                        config, rf, metabolite_mapping)
            #elif dataset == "drug":
            #relationships[(dataset, "annotated_to_pathway")] = set()

    builder_utils.remove_directory(directory)

    return entities, relationships, entities_header, relationships_headers
def parser(databases_directory, download=True):
    config = builder_utils.get_config(
        config_name="drugGeneInteractionDBConfig.yml", data_type='databases')
    url = config['DGIdb_url']
    header = config['header']
    output_file = "dgidb_targets.tsv"
    drugmapping = mp.getMappingForEntity("Drug")

    relationships = set()
    directory = os.path.join(databases_directory, "DGIdb")
    builder_utils.checkDirectory(directory)
    fileName = os.path.join(directory, url.split('/')[-1])
    if download:
        builder_utils.downloadDB(url, directory)
    with open(fileName, 'r', encoding='utf-8') as associations:
        first = True
        for line in associations:
            if first:
                first = False
                continue
            data = line.rstrip("\r\n").split("\t")
            gene = data[0]
            source = data[3]
            interactionType = data[4] if data[4] != '' else 'unknown'
            drug = data[8].lower()
            if drug == "":
                drug = data[7]
                if drug == "" and data[6] != "":
                    drug = data[6]
                else:
                    continue
            if gene != "":
                if drug in drugmapping:
                    drug = drugmapping[drug]
                    relationships.add((drug, gene, "TARGETS", "NA", "NA", "NA",
                                       interactionType, "DGIdb: " + source))

    builder_utils.remove_directory(directory)

    return (relationships, header, output_file)
Beispiel #4
0
def parser(databases_directory, download=True):
    directory = os.path.join(databases_directory, "ExposomeExplorer")
    builder_utils.checkDirectory(directory)
    config = builder_utils.get_config(config_name="exposomeConfig.yml", data_type='databases')
    database_urls = config['database_urls']
    relationships_header = config['relationships_header']
    mapping = mp.getMappingForEntity("Food")
    correlations = {}
    for url in database_urls:
        zipped_fileName = os.path.join(directory, url.split('/')[-1])
        file_name = '.'.join(url.split('/')[-1].split('.')[0:2])
        if download:
            builder_utils.downloadDB(url, directory)

        with zipfile.ZipFile(zipped_fileName) as z:
            if file_name == "biomarkers.csv":
                biomarkers = parseBiomarkersFile(z, file_name)
            elif file_name == "correlations.csv":
                correlations = parseCorrelationsFile(z, file_name, biomarkers, mapping)

    builder_utils.remove_directory(directory)

    return correlations, relationships_header
Beispiel #5
0
def parser(databases_directory,
           import_directory,
           download=True,
           updated_on=None):
    config = builder_utils.get_config(config_name="pfamConfig.yml",
                                      data_type='databases')
    entity_header = config['entity_header']
    relationship_headers = config['relationship_headers']

    directory = os.path.join(databases_directory, 'Pfam')
    builder_utils.checkDirectory(directory)
    protein_mapping = mp.getMappingForEntity(entity="Protein")
    valid_proteins = list(set(protein_mapping.values()))

    ftp_url = config['ftp_url']
    filename = config['full_uniprot_file']
    # url = config['test']

    if not os.path.exists(os.path.join(directory, filename)):
        if download:
            builder_utils.downloadDB(ftp_url + filename, directory)

    stats = set()
    if os.path.exists(os.path.join(directory, filename)):
        fhandler = builder_utils.read_gzipped_file(
            os.path.join(directory, filename))
        identifier = None
        description = []
        lines = []
        missed = 0
        entities = set()
        relationships = defaultdict(set)
        is_first = True
        i = 0
        read_lines = 0
        num_entities = 0
        num_relationships = {}
        try:
            for line in fhandler:
                i += 1
                read_lines += 1
                if line.startswith("# STOCKHOLM"):
                    if identifier is not None:
                        entities.add((identifier, 'Functional_region', name,
                                      " ".join(description), "PFam"))
                        if len(entities) == 100:
                            print_files(entities,
                                        entity_header,
                                        outputfile=os.path.join(
                                            import_directory,
                                            'Functional_region.tsv'),
                                        is_first=is_first)
                            num_entities += len(entities)
                            if 'mentioned_in_publication' in relationships:
                                print_files(
                                    relationships['mentioned_in_publication'],
                                    relationship_headers[
                                        'mentioned_in_publication'],
                                    outputfile=os.path.join(
                                        import_directory,
                                        'Functional_region_mentioned_in_publication.tsv'
                                    ),
                                    is_first=is_first)
                                if 'mentioned_in_publication' not in num_relationships:
                                    num_relationships[
                                        'mentioned_in_publication'] = 0
                                num_relationships[
                                    'mentioned_in_publication'] += len(
                                        relationships[
                                            'mentioned_in_publication'])
                            if 'found_in_protein' in relationships:
                                print_files(
                                    relationships['found_in_protein'],
                                    relationship_headers['found_in_protein'],
                                    outputfile=os.path.join(
                                        import_directory,
                                        'Functional_region_found_in_protein.tsv'
                                    ),
                                    is_first=is_first,
                                    filter_for=('END_ID', valid_proteins))
                                if 'found_in_protein' not in num_relationships:
                                    num_relationships['found_in_protein'] = 0
                                num_relationships['found_in_protein'] += len(
                                    relationships['found_in_protein'])
                            entities = set()
                            relationships = defaultdict(set)
                            is_first = False
                        identifier = None
                        description = []
                elif line.startswith("#=GF"):
                    data = line.rstrip('\r\n').split()
                    if 'AC' in data:
                        identifier = data[2].split('.')[0]
                    elif 'DE' in data:
                        name = " ".join(data[2:])
                    elif 'RM' in data:
                        relationships['mentioned_in_publication'].add(
                            (identifier, data[2], "MENTIONED_IN_PUBLICATION",
                             "PFam"))
                    elif 'CC' in data:
                        description.append(" ".join(data[2:]))
                elif not line.startswith('//'):
                    data = line.rstrip('\r\n').split()
                    protein, positions = data[0].split('/')
                    protein = protein.replace('.', '-')
                    start, end = positions.split('-')
                    sequence = data[1]
                    relationships['found_in_protein'].add(
                        (identifier, protein, "FOUND_IN_PROTEIN", start, end,
                         sequence, "PFam"))
                    if protein.split('-')[0] != protein:
                        relationships['found_in_protein'].add(
                            (identifier, protein.split('-')[0],
                             "FOUND_IN_PROTEIN", start, end, sequence, "PFam"))
        except UnicodeDecodeError:
            lines.append(i)
            missed += 1

        fhandler.close()

        if len(entities) > 0:
            print_files(entities,
                        entity_header,
                        outputfile=os.path.join(import_directory,
                                                'Functional_region.tsv'),
                        is_first=is_first)
            num_entities += len(entities)
            print_files(relationships['mentioned_in_publication'],
                        relationship_headers['mentioned_in_publication'],
                        outputfile=os.path.join(
                            import_directory,
                            'Functional_region_mentioned_in_publication.tsv'),
                        is_first=is_first)
            num_relationships['mentioned_in_publication'] += len(
                relationships['mentioned_in_publication'])
            print_files(relationships['found_in_protein'],
                        relationship_headers['found_in_protein'],
                        outputfile=os.path.join(
                            import_directory,
                            'Functional_region_found_in_protein.tsv'),
                        is_first=is_first)
            num_relationships['found_in_protein'] += len(
                relationships['found_in_protein'])

        stats.add(
            builder_utils.buildStats(num_entities, "entity",
                                     "Functional_region", "Pfam",
                                     'Functional_region.tsv', updated_on))

        for rel in num_relationships:
            stats.add(
                builder_utils.buildStats(num_relationships[rel],
                                         "relationship", rel.upper(), "Pfam",
                                         'Functional_region_' + rel + '.tsv',
                                         updated_on))

    builder_utils.remove_directory(directory)

    return stats
Beispiel #6
0
def parser(databases_directory, download=True):
    variant_regex = r"(\D\d+\D)$"
    regex = r"(chr\d+)\:g\.(\d+)(\w)>(\w)"
    config = builder_utils.get_config(
        config_name="cancerGenomeInterpreterConfig.yml", data_type='databases')
    url = config['cancerBiomarkers_url']
    entities_header = config['entities_header']
    relationships_headers = config['relationships_headers']
    amino_acids = config['amino_acids']
    mapping = mp.getMappingFromOntology(ontology="Disease", source=None)
    drugmapping = mp.getMappingForEntity("Drug")
    protein_mapping = mp.getMultipleMappingForEntity("Protein")

    fileName = config['cancerBiomarkers_variant_file']
    relationships = defaultdict(set)
    entities = set()
    directory = os.path.join(databases_directory, "CancerGenomeInterpreter")
    builder_utils.checkDirectory(directory)
    zipFile = os.path.join(directory, url.split('/')[-1])

    if download:
        builder_utils.downloadDB(url, directory)
    with zipfile.ZipFile(zipFile) as z:
        if fileName in z.namelist():
            with z.open(fileName, 'r') as responses:
                first = True
                for line in responses:
                    if first:
                        first = False
                        continue
                    data = line.decode('utf-8').rstrip("\r\n").split("\t")
                    gene_variant = data[0].split(':')
                    if len(gene_variant) < 2:
                        continue
                    gene = gene_variant[0]
                    variants = gene_variant[1].split(',')
                    #alterationType = data[1]
                    response = data[3]
                    drugs = data[10].split(';')
                    #status = data[11].split(';')
                    evidence = data[12]
                    tumors = data[16].split(';')
                    publications = data[17].split(';')
                    identifier = data[21]
                    prot_variant = data[22]
                    matches = re.match(regex, identifier)
                    alternative_names = [identifier]
                    if matches is not None:
                        cpra = matches.groups()
                        chromosome, position, reference, alternative = cpra
                        variant = chromosome + ":g." + position + reference + ">" + alternative
                        if prot_variant != "":
                            prot_variant = prot_variant.split(':')[1]
                            alternative_names.append(prot_variant)

                    valid_variants = []
                    if gene in protein_mapping:
                        for protein in protein_mapping[gene]:
                            for variant in variants:
                                match = re.search(variant_regex, variant)
                                if match:
                                    if variant[0] in amino_acids and variant[
                                            -1] in amino_acids:
                                        valid_variant = protein + '_p.' + amino_acids[
                                            variant[0]] + ''.join(
                                                variant[1:-1]) + amino_acids[
                                                    variant[-1]]
                                        valid_variants.append(valid_variant)
                                        entities.add(
                                            (valid_variant,
                                             "Clinically_relevant_variant",
                                             ",".join(alternative_names),
                                             chromosome, position, reference,
                                             alternative, "", "", "CGI"))
                                        relationships[
                                            "known_variant_is_clinically_relevant"].add(
                                                (valid_variant, valid_variant,
                                                 "KNOWN_VARIANT_IS_CLINICALLY_RELEVANT",
                                                 "CGI"))

                    for drug in drugs:
                        if drug.lower() in drugmapping:
                            drug = drugmapping[drug.lower()]
                        elif drug.split(" ")[0].lower() in drugmapping:
                            drug = drugmapping[drug.split(" ")[0].lower()]
                        elif " ".join(
                                drug.split(" ")[1:]).lower() in drugmapping:
                            drug = drugmapping[" ".join(
                                drug.split(" ")[1:]).lower()]
                        relationships["targets"].add(
                            (drug, gene, "CURATED_TARGETS", evidence, response,
                             ",".join(tumors), "curated", "CGI"))

                        for valid_variant in valid_variants:
                            relationships[
                                "targets_clinically_relevant_variant"].add(
                                    (drug, valid_variant,
                                     "TARGETS_CLINICALLY_RELEVANT_VARIANT",
                                     evidence, response, "".join(tumors),
                                     "curated", "CGI"))

                    for tumor in tumors:
                        if tumor.lower() in mapping:
                            tumor = mapping[tumor.lower()]
                            for valid_variant in valid_variants:
                                relationships["associated_with"].add(
                                    (valid_variant, tumor, "ASSOCIATED_WITH",
                                     "curated", "curated", "CGI",
                                     len(publications)))

    builder_utils.remove_directory(directory)

    return (entities, relationships, entities_header, relationships_headers)
Beispiel #7
0
def parser(databases_directory, download=True):
    config = builder_utils.get_config(config_name="oncokbConfig.yml", data_type='databases')
    url_actionable = config['OncoKB_actionable_url']
    url_annotation = config['OncoKB_annotated_url']
    amino_acids = config['amino_acids']
    entities_header = config['entities_header']
    relationships_headers = config['relationships_headers']
    mapping = mp.getMappingFromOntology(ontology="Disease", source=None)

    drug_mapping = mp.getMappingForEntity("Drug")
    protein_mapping = mp.getMultipleMappingForEntity("Protein")

    levels = config['OncoKB_levels']
    entities = set()
    relationships = defaultdict(set)
    directory = os.path.join(databases_directory, "OncoKB")
    builder_utils.checkDirectory(directory)
    acfileName = os.path.join(directory, os.path.basename(os.path.normpath(url_actionable)))
    anfileName = os.path.join(directory, os.path.basename(os.path.normpath(url_annotation)))
    
    if download:
        builder_utils.downloadDB(url_actionable, directory)
        builder_utils.downloadDB(url_annotation, directory)

    variant_regex = r"(\D\d+\D)$"
    with open(anfileName, 'r', errors='replace') as variants:
        first = True
        for line in variants:
            if line.startswith('#'):
                continue
            elif first:
                first = False
                continue
            data = line.rstrip("\r\n").split("\t")
            if len(data) >= 7:
                gene = data[3]
                variant = data[5]
                oncogenicity = data[6]
                effect = data[7]
                if gene in protein_mapping:
                    for protein in protein_mapping[gene]:
                        match = re.search(variant_regex, variant)
                        if match:
                            if variant[0] in amino_acids and variant[-1] in amino_acids:
                                valid_variant = protein + '_p.' + amino_acids[variant[0]] + ''.join(variant[1:-1]) + amino_acids[variant[-1]]
                                entities.add((valid_variant, "Clinically_relevant_variant", "", "", "", "", "", effect, oncogenicity))

    with open(acfileName, 'r', errors='replace') as associations:
        first = True
        for line in associations:
            if line.startswith('#'):
                continue
            elif first:
                first = False
                continue
            data = line.rstrip("\r\n").split("\t")
            if len(data) >= 9:
                isoform = data[1]
                gene = data[3]
                variant = data[5]
                disease = data[6]
                level = data[7]
                drugs = data[8].split(', ')
                pubmed_ids = data[9].split(',')
                if level in levels:
                    level = levels[level]

                valid_variants = []
                if gene in protein_mapping:
                    for protein in protein_mapping[gene]:
                        match = re.search(variant_regex, variant)
                        if match:
                            if variant[0] in amino_acids and variant[-1] in amino_acids:
                                valid_variants.append(protein + '_p.' + amino_acids[variant[0]] + ''.join(variant[1:-1]) + amino_acids[variant[-1]])
                for drug in drugs:
                    for d in drug.split(' + '):
                        if d.lower() in drug_mapping:
                            drug = drug_mapping[d.lower()]
                            relationships["targets"].add((drug, gene, "CURATED_TARGETS", "curated", "NA", "NA", "curated", "OncoKB"))
                            for valid_variant in valid_variants:
                                relationships["targets_clinically_relevant_variant"].add((drug, valid_variant, "TARGETS_KNOWN_VARIANT", level[0], level[1], disease, "curated", "OncoKB"))
                for valid_variant in valid_variants:
                    if disease.lower() in mapping:
                        disease = mapping[disease.lower()]
                        relationships["associated_with"].add((valid_variant, disease, "ASSOCIATED_WITH", "curated", "curated", "OncoKB", len(pubmed_ids)))
                    else:
                        pass
                    relationships["known_variant_is_clinically_relevant"].add((valid_variant, valid_variant, "KNOWN_VARIANT_IS_CLINICALLY_RELEVANT", "OncoKB"))
    
    builder_utils.remove_directory(directory)
    
    return (entities, relationships, entities_header, relationships_headers)