示例#1
0
def parser(databases_directory, download=True):
    config = builder_utils.get_config(config_name="smpdbConfig.yml", data_type='databases')
    urls = config['smpdb_urls']
    entities = set()
    relationships = defaultdict(set)
    entities_header = config['pathway_header']
    relationships_headers = config['relationships_header']
    directory = os.path.join(databases_directory, "SMPDB")
    builder_utils.checkDirectory(directory)

    for dataset in urls:
        url = urls[dataset]
        file_name = url.split('/')[-1]
        if download:
            builder_utils.downloadDB(url, directory)
        zipped_file = os.path.join(directory, file_name)
        with zipfile.ZipFile(zipped_file) as rf:
            if dataset == "pathway":
                entities = parsePathways(config, rf)
            elif dataset == "protein":
                relationships.update(parsePathwayProteinRelationships(rf))
            elif dataset == "metabolite":
                relationships.update(parsePathwayMetaboliteDrugRelationships(rf))

    builder_utils.remove_directory(directory)

    return entities, relationships, entities_header, relationships_headers
示例#2
0
def parser(databases_directory, download=True):
    relationships = defaultdict(set)
    config = builder_utils.get_config(config_name="disgenetConfig.yml",
                                      data_type='databases')

    files = config['disgenet_files']
    mapping_files = config['disgenet_mapping_files']
    url = config['disgenet_url']
    directory = os.path.join(databases_directory, "disgenet")
    builder_utils.checkDirectory(directory)
    header = config['disgenet_header']
    output_file = 'disgenet_associated_with.tsv'

    if download:
        for f in files:
            builder_utils.downloadDB(url + files[f], directory)
        for f in mapping_files:
            builder_utils.downloadDB(url + mapping_files[f], directory)

    proteinMapping = readDisGeNetProteinMapping(config, directory)
    diseaseMapping = readDisGeNetDiseaseMapping(config, directory)
    for f in files:
        first = True
        associations = gzip.open(os.path.join(directory, files[f]), 'r')
        dtype, atype = f.split('_')
        if dtype == 'gene':
            idType = "Protein"
            scorePos = 9
        if dtype == 'variant':
            idType = "Transcript"
            scorePos = 5
        for line in associations:
            if first:
                first = False
                continue
            try:
                data = line.decode('utf-8').rstrip("\r\n").split("\t")
                geneId = str(int(data[0]))
                #disease_specificity_index =  data[2]
                #disease_pleiotropy_index = data[3]
                diseaseId = data[4]
                score = float(data[scorePos])
                pmids = data[13]
                source = data[-1]
                if geneId in proteinMapping:
                    for identifier in proteinMapping[geneId]:
                        if diseaseId in diseaseMapping:
                            for code in diseaseMapping[diseaseId]:
                                code = "DOID:" + code
                                relationships[idType].add(
                                    (identifier, code, "ASSOCIATED_WITH",
                                     score, atype, "DisGeNet: " + source,
                                     pmids))
            except UnicodeDecodeError:
                continue
        associations.close()

    builder_utils.remove_directory(directory)

    return (relationships, header, output_file)
示例#3
0
def parser(databases_directory, importDirectory, download=True):
    config = builder_utils.get_config(config_name="jensenlabConfig.yml", data_type='databases')
    outputfileName = "Publications.tsv"
    url = config['db_url']
    ifile = config['organisms_file']
    organisms = str(config['organisms'])
    directory = os.path.join(databases_directory, "Jensenlab")
    builder_utils.checkDirectory(os.path.join(directory, "textmining"))

    if download:
        builder_utils.downloadDB(url.replace("FILE", ifile), os.path.join(directory, "textmining"))

    ifile = os.path.join(directory, os.path.join("textmining", ifile))
    valid_pubs = read_valid_pubs(organisms, ifile)
    entities, header = parse_PMC_list(config, os.path.join(directory, "textmining"), download=download, valid_pubs=valid_pubs)
    num_entities = len(entities)
    outputfile = os.path.join(importDirectory, outputfileName)
    builder_utils.write_entities(entities, header, outputfile)
    entities = None

    for qtype in config['db_mentions_types']:
        parse_mentions(config, directory, qtype, importDirectory, download)

    builder_utils.remove_directory(os.path.join(directory, "textmining"))

    return (num_entities, outputfile)
示例#4
0
def parser(databases_directory, download=True):
    config = builder_utils.get_config(config_name="gwasCatalogConfig.yml",
                                      data_type='databases')
    url = config['GWASCat_url']
    entities_header = config['entities_header']
    relationships_header = config['relationships_header']
    entities = set()
    relationships = defaultdict(set)
    directory = os.path.join(databases_directory, "GWAScatalog")
    builder_utils.checkDirectory(directory)
    fileName = os.path.join(directory, url.split('/')[-1])
    if download:
        builder_utils.downloadDB(url, directory)
    with open(fileName, 'r', encoding="utf-8") as catalog:
        for line in catalog:
            data = line.rstrip("\r\n").split("\t")
            if len(data) > 36:
                pubmedid = data[1]
                date = data[3]
                title = data[6]
                sample_size = data[8]
                replication_size = data[9]
                #chromosome = data[11]
                #position = data[12]
                #genes_mapped = data[14].split(" - ")
                snp_id = data[20].split('-')[0]
                freq = data[26]
                pval = data[27]
                odds_ratio = data[30]
                trait = data[34]
                exp_factor = data[35]
                study = data[36]

                entities.add((study, "GWAS_study", title, date, sample_size,
                              replication_size, trait))
                if pubmedid != "":
                    relationships["published_in_publication"].add(
                        (study, pubmedid, "PUBLISHED_IN", "GWAS Catalog"))
                if snp_id != "":
                    relationships["variant_found_in_gwas"].add(
                        (re.sub(r"^\W+|\W+$", "",
                                snp_id), study, "VARIANT_FOUND_IN_GWAS", freq,
                         pval, odds_ratio, trait, "GWAS Catalog"))
                if exp_factor != "":
                    exp_factor = exp_factor.split('/')[-1].replace('_', ':')
                    relationships["studies_trait"].add(
                        (study, exp_factor, "STUDIES_TRAIT", "GWAS Catalog"))

    builder_utils.remove_directory(directory)

    return (entities, relationships, entities_header, relationships_header)
示例#5
0
def parser(databases_directory, download=True):
    relationships = defaultdict(set)
    directory = os.path.join(databases_directory, "FooDB")
    builder_utils.checkDirectory(directory)
    config = builder_utils.get_config(config_name="foodbConfig.yml", data_type='databases')

    database_url = config['database_url']
    entities_header = config['entities_header']
    relationships_headers = config['relationships_headers']
    tar_fileName = os.path.join(directory, database_url.split('/')[-1])
    if download:
        builder_utils.downloadDB(database_url, directory)

    contents = {}
    food = set()
    compounds = {}
    try:
        tf = tarfile.open(tar_fileName, 'r')
        file_content = tf.getnames()
        tar_dir = file_content[1]
        tf.extractall(path=directory)
        tf.close()
        for file_name in config['files']:
            path = os.path.join(directory, os.path.join(tar_dir, file_name))
            with open(path, 'r', encoding="utf-8", errors='replace') as f:
                if file_name == "Content.csv":
                    contents = parseContents(f)
                elif file_name == "Food.csv":
                    food, mapping = parseFood(f)
                elif file_name == "Compound.csv":
                    compounds = parseCompounds(f)
        for food_id, compound_id in contents:
            if compound_id in compounds:
                compound_code = compounds[compound_id].replace("HMDB", "HMDB00")
                relationships[("food", "has_content")].add((food_id, compound_code, "HAS_CONTENT") + contents[(food_id, compound_id)])
        mp.reset_mapping(entity="Food")
        with open(os.path.join(directory, "mapping.tsv"), 'w', encoding='utf-8') as out:
            for food_id in mapping:
                for alias in mapping[food_id]:
                    out.write(str(food_id)+"\t"+str(alias)+"\n")

        mp.mark_complete_mapping(entity="Food")
    except tarfile.ReadError as err:
        raise Exception("Error importing database FooDB.\n {}".format(err))

    builder_utils.remove_directory(directory)

    return food, relationships, entities_header, relationships_headers
示例#6
0
def parser(databases_directory, download=True):
    relationships = set()
    config = builder_utils.get_config(config_name="mutationDsConfig.yml",
                                      data_type='databases')
    header = config['header']
    output_file_name = "mutation_curated_affects_interaction_with.tsv"
    regex = r":(\w+)\("
    url = config['mutations_url']
    directory = os.path.join(databases_directory, "MutationDs")
    builder_utils.checkDirectory(directory)
    file_name = os.path.join(directory, url.split('/')[-1])
    if download:
        builder_utils.downloadDB(url, directory)

    with open(file_name, 'r') as mf:
        first = True
        for line in mf:
            if first:
                first = False
                continue
            data = line.rstrip("\r\n").split("\t")
            if len(data) > 12:
                internal_id = data[0]
                pvariant = data[1]
                effect = data[5]
                protein = data[7].split(':')
                organism = data[10]
                interaction = data[11]
                evidence = data[12]

                if organism.startswith("9606") and len(protein) > 1:
                    protein = protein[1]
                    pvariant = protein + "_" + pvariant
                    matches = re.finditer(regex, interaction)
                    for matchNum, match in enumerate(matches, start=1):
                        interactor = match.group(1)
                        relationships.add((pvariant, interactor,
                                           "CURATED_AFFECTS_INTERACTION_WITH",
                                           effect, interaction, evidence,
                                           internal_id, "Intact-MutationDs"))

    builder_utils.remove_directory(directory)

    return (relationships, header, output_file_name)
示例#7
0
def parser(databases_dir, download=True):
    config = builder_utils.get_config(config_name="goaConfig.yml",
                                      data_type='databases')
    url = config['url']
    rel_header = config['header']

    protein_mapping = mp.getMappingForEntity(entity="Protein")
    valid_proteins = list(set(protein_mapping.values))

    directory = os.path.join(databases_dir, "GOA")
    builder_utils.checkDirectory(directory)
    file_name = os.path.join(directory, url.split('/')[-1])
    if download:
        builder_utils.downloadDB(url, directory)

    annotations = parse_annotations_with_pandas(file_name, valid_proteins)

    builder_utils.remove_directory(directory)

    return annotations, rel_header
def parser(databases_directory, download=True):
    config = builder_utils.get_config(
        config_name="drugGeneInteractionDBConfig.yml", data_type='databases')
    url = config['DGIdb_url']
    header = config['header']
    output_file = "dgidb_targets.tsv"
    drugmapping = mp.getMappingForEntity("Drug")

    relationships = set()
    directory = os.path.join(databases_directory, "DGIdb")
    builder_utils.checkDirectory(directory)
    fileName = os.path.join(directory, url.split('/')[-1])
    if download:
        builder_utils.downloadDB(url, directory)
    with open(fileName, 'r', encoding='utf-8') as associations:
        first = True
        for line in associations:
            if first:
                first = False
                continue
            data = line.rstrip("\r\n").split("\t")
            gene = data[0]
            source = data[3]
            interactionType = data[4] if data[4] != '' else 'unknown'
            drug = data[8].lower()
            if drug == "":
                drug = data[7]
                if drug == "" and data[6] != "":
                    drug = data[6]
                else:
                    continue
            if gene != "":
                if drug in drugmapping:
                    drug = drugmapping[drug]
                    relationships.add((drug, gene, "TARGETS", "NA", "NA", "NA",
                                       interactionType, "DGIdb: " + source))

    builder_utils.remove_directory(directory)

    return (relationships, header, output_file)
示例#9
0
def parser(databases_directory, download=True):
    config = builder_utils.get_config(config_name="pathwayCommonsConfig.yml",
                                      data_type='databases')
    url = config['pathwayCommons_pathways_url']
    entities = set()
    relationships = set()
    directory = os.path.join(databases_directory, "PathwayCommons")
    builder_utils.checkDirectory(directory)
    fileName = url.split('/')[-1]
    entities_header = config['pathways_header']
    relationships_header = config['relationships_header']

    if download:
        builder_utils.downloadDB(url, directory)
    f = os.path.join(directory, fileName)
    associations = gzip.open(f, 'r')
    for line in associations:
        data = line.decode('utf-8').rstrip("\r\n").split("\t")
        linkout = data[0]
        code = data[0].split("/")[-1]
        ptw_dict = dict([item.split(": ")[0], ":".join(item.split(": ")[1:])]
                        for item in data[1].split("; "))
        proteins = data[2:]
        if "organism" in ptw_dict and ptw_dict["organism"] == "9606":
            name = ptw_dict["name"]
            source = ptw_dict["datasource"]
        else:
            continue

        entities.add((code, "Pathway", name, name, ptw_dict["organism"],
                      source, linkout))
        for protein in proteins:
            relationships.add((protein, code, "ANNOTATED_IN_PATHWAY", linkout,
                               "PathwayCommons: " + source))

    associations.close()

    builder_utils.remove_directory(directory)

    return (entities, relationships, entities_header, relationships_header)
示例#10
0
def parser(databases_directory, download=True):
    directory = os.path.join(databases_directory, "ExposomeExplorer")
    builder_utils.checkDirectory(directory)
    config = builder_utils.get_config(config_name="exposomeConfig.yml", data_type='databases')
    database_urls = config['database_urls']
    relationships_header = config['relationships_header']
    mapping = mp.getMappingForEntity("Food")
    correlations = {}
    for url in database_urls:
        zipped_fileName = os.path.join(directory, url.split('/')[-1])
        file_name = '.'.join(url.split('/')[-1].split('.')[0:2])
        if download:
            builder_utils.downloadDB(url, directory)

        with zipfile.ZipFile(zipped_fileName) as z:
            if file_name == "biomarkers.csv":
                biomarkers = parseBiomarkersFile(z, file_name)
            elif file_name == "correlations.csv":
                correlations = parseCorrelationsFile(z, file_name, biomarkers, mapping)

    builder_utils.remove_directory(directory)

    return correlations, relationships_header
示例#11
0
def parser(databases_directory, download=True):
    config = builder_utils.get_config(config_name="hgncConfig.yml",
                                      data_type='databases')
    url = config['hgnc_url']
    entities = set()
    directory = os.path.join(databases_directory, "HGNC")
    builder_utils.checkDirectory(directory)
    fileName = os.path.join(directory, url.split('/')[-1])
    taxid = 9606
    entities_header = config['header']

    if download:
        builder_utils.downloadDB(url, directory)

    with open(fileName, 'r', encoding="utf-8") as df:
        first = True
        for line in df:
            if first:
                first = False
                continue
            data = line.rstrip("\r\n").split("\t")
            geneSymbol = data[1]
            geneName = data[2]
            status = data[5]
            geneFamily = data[12]
            synonyms = data[18:23]
            transcript = data[23]
            if status != "Approved":
                continue

            entities.add((geneSymbol, "Gene", geneName, geneFamily,
                          ",".join(synonyms), taxid))
            #relationships.add((geneSymbol, transcript, "TRANSCRIBED_INTO"))

    builder_utils.remove_directory(directory)

    return entities, entities_header
示例#12
0
def parser(databases_directory, download=True):
    config = builder_utils.get_config(config_name="hpaConfig.yml",
                                      data_type='databases')
    url = config['hpa_pathology_url']
    disease_mapping = mp.getMappingFromOntology(ontology="Disease",
                                                source=None)
    protein_mapping = mp.getMultipleMappingForEntity("Protein")
    directory = os.path.join(databases_directory, "HPA")
    builder_utils.checkDirectory(directory)
    compressed_fileName = os.path.join(directory, url.split('/')[-1])
    file_name = '.'.join(url.split('/')[-1].split('.')[0:2])
    relationships_headers = config['relationships_headers']

    if download:
        builder_utils.downloadDB(url, directory)

    with zipfile.ZipFile(compressed_fileName) as z:
        if file_name == "pathology.tsv":
            pathology = parsePathologyFile(config, z, file_name,
                                           protein_mapping, disease_mapping)

    builder_utils.remove_directory(directory)

    return (pathology, relationships_headers)
示例#13
0
def parser(databases_directory,
           import_directory,
           download=True,
           updated_on=None):
    config = builder_utils.get_config(config_name="pfamConfig.yml",
                                      data_type='databases')
    entity_header = config['entity_header']
    relationship_headers = config['relationship_headers']

    directory = os.path.join(databases_directory, 'Pfam')
    builder_utils.checkDirectory(directory)
    protein_mapping = mp.getMappingForEntity(entity="Protein")
    valid_proteins = list(set(protein_mapping.values()))

    ftp_url = config['ftp_url']
    filename = config['full_uniprot_file']
    # url = config['test']

    if not os.path.exists(os.path.join(directory, filename)):
        if download:
            builder_utils.downloadDB(ftp_url + filename, directory)

    stats = set()
    if os.path.exists(os.path.join(directory, filename)):
        fhandler = builder_utils.read_gzipped_file(
            os.path.join(directory, filename))
        identifier = None
        description = []
        lines = []
        missed = 0
        entities = set()
        relationships = defaultdict(set)
        is_first = True
        i = 0
        read_lines = 0
        num_entities = 0
        num_relationships = {}
        try:
            for line in fhandler:
                i += 1
                read_lines += 1
                if line.startswith("# STOCKHOLM"):
                    if identifier is not None:
                        entities.add((identifier, 'Functional_region', name,
                                      " ".join(description), "PFam"))
                        if len(entities) == 100:
                            print_files(entities,
                                        entity_header,
                                        outputfile=os.path.join(
                                            import_directory,
                                            'Functional_region.tsv'),
                                        is_first=is_first)
                            num_entities += len(entities)
                            if 'mentioned_in_publication' in relationships:
                                print_files(
                                    relationships['mentioned_in_publication'],
                                    relationship_headers[
                                        'mentioned_in_publication'],
                                    outputfile=os.path.join(
                                        import_directory,
                                        'Functional_region_mentioned_in_publication.tsv'
                                    ),
                                    is_first=is_first)
                                if 'mentioned_in_publication' not in num_relationships:
                                    num_relationships[
                                        'mentioned_in_publication'] = 0
                                num_relationships[
                                    'mentioned_in_publication'] += len(
                                        relationships[
                                            'mentioned_in_publication'])
                            if 'found_in_protein' in relationships:
                                print_files(
                                    relationships['found_in_protein'],
                                    relationship_headers['found_in_protein'],
                                    outputfile=os.path.join(
                                        import_directory,
                                        'Functional_region_found_in_protein.tsv'
                                    ),
                                    is_first=is_first,
                                    filter_for=('END_ID', valid_proteins))
                                if 'found_in_protein' not in num_relationships:
                                    num_relationships['found_in_protein'] = 0
                                num_relationships['found_in_protein'] += len(
                                    relationships['found_in_protein'])
                            entities = set()
                            relationships = defaultdict(set)
                            is_first = False
                        identifier = None
                        description = []
                elif line.startswith("#=GF"):
                    data = line.rstrip('\r\n').split()
                    if 'AC' in data:
                        identifier = data[2].split('.')[0]
                    elif 'DE' in data:
                        name = " ".join(data[2:])
                    elif 'RM' in data:
                        relationships['mentioned_in_publication'].add(
                            (identifier, data[2], "MENTIONED_IN_PUBLICATION",
                             "PFam"))
                    elif 'CC' in data:
                        description.append(" ".join(data[2:]))
                elif not line.startswith('//'):
                    data = line.rstrip('\r\n').split()
                    protein, positions = data[0].split('/')
                    protein = protein.replace('.', '-')
                    start, end = positions.split('-')
                    sequence = data[1]
                    relationships['found_in_protein'].add(
                        (identifier, protein, "FOUND_IN_PROTEIN", start, end,
                         sequence, "PFam"))
                    if protein.split('-')[0] != protein:
                        relationships['found_in_protein'].add(
                            (identifier, protein.split('-')[0],
                             "FOUND_IN_PROTEIN", start, end, sequence, "PFam"))
        except UnicodeDecodeError:
            lines.append(i)
            missed += 1

        fhandler.close()

        if len(entities) > 0:
            print_files(entities,
                        entity_header,
                        outputfile=os.path.join(import_directory,
                                                'Functional_region.tsv'),
                        is_first=is_first)
            num_entities += len(entities)
            print_files(relationships['mentioned_in_publication'],
                        relationship_headers['mentioned_in_publication'],
                        outputfile=os.path.join(
                            import_directory,
                            'Functional_region_mentioned_in_publication.tsv'),
                        is_first=is_first)
            num_relationships['mentioned_in_publication'] += len(
                relationships['mentioned_in_publication'])
            print_files(relationships['found_in_protein'],
                        relationship_headers['found_in_protein'],
                        outputfile=os.path.join(
                            import_directory,
                            'Functional_region_found_in_protein.tsv'),
                        is_first=is_first)
            num_relationships['found_in_protein'] += len(
                relationships['found_in_protein'])

        stats.add(
            builder_utils.buildStats(num_entities, "entity",
                                     "Functional_region", "Pfam",
                                     'Functional_region.tsv', updated_on))

        for rel in num_relationships:
            stats.add(
                builder_utils.buildStats(num_relationships[rel],
                                         "relationship", rel.upper(), "Pfam",
                                         'Functional_region_' + rel + '.tsv',
                                         updated_on))

    builder_utils.remove_directory(directory)

    return stats
示例#14
0
def parser(databases_directory, download=True):
    config = builder_utils.get_config(config_name="oncokbConfig.yml",
                                      data_type='databases')
    url_actionable = config['OncoKB_actionable_url']
    url_annotation = config['OncoKB_annotated_url']
    amino_acids = config['amino_acids']
    entities_header = config['entities_header']
    relationships_headers = config['relationships_headers']
    mapping = mp.getMappingFromOntology(ontology="Disease", source=None)

    drug_mapping = mp.getMappingForEntity("Drug")
    protein_mapping = mp.getMultipleMappingForEntity("Protein")

    levels = config['OncoKB_levels']
    entities = set()
    relationships = defaultdict(set)
    directory = os.path.join(databases_directory, "OncoKB")
    builder_utils.checkDirectory(directory)
    acfileName = os.path.join(directory, url_actionable.split('/')[-1])
    anfileName = os.path.join(directory, url_annotation.split('/')[-1])
    if download:
        builder_utils.downloadDB(url_actionable, directory)
        builder_utils.downloadDB(url_annotation, directory)

    variant_regex = r"(\D\d+\D)$"
    with open(anfileName, 'r', errors='replace') as variants:
        first = True
        for line in variants:
            if first:
                first = False
                continue
            data = line.rstrip("\r\n").split("\t")
            gene = data[3]
            variant = data[4]
            oncogenicity = data[5]
            effect = data[6]
            if gene in protein_mapping:
                for protein in protein_mapping[gene]:
                    match = re.search(variant_regex, variant)
                    if match:
                        if variant[0] in amino_acids and variant[
                                -1] in amino_acids:
                            valid_variant = protein + '_p.' + amino_acids[
                                variant[0]] + ''.join(
                                    variant[1:-1]) + amino_acids[variant[-1]]
                            entities.add(
                                (valid_variant, "Clinically_relevant_variant",
                                 "", "", "", "", "", effect, oncogenicity))

    with open(acfileName, 'r', errors='replace') as associations:
        first = True
        for line in associations:
            if first:
                first = False
                continue
            data = line.rstrip("\r\n").split("\t")
            isoform = data[1]
            gene = data[3]
            variant = data[5]
            disease = data[6]
            level = data[7]
            drugs = data[8].split(', ')
            pubmed_ids = data[9].split(',')
            if level in levels:
                level = levels[level]

            valid_variants = []
            if gene in protein_mapping:
                for protein in protein_mapping[gene]:
                    match = re.search(variant_regex, variant)
                    if match:
                        if variant[0] in amino_acids and variant[
                                -1] in amino_acids:
                            valid_variants.append(protein + '_p.' +
                                                  amino_acids[variant[0]] +
                                                  ''.join(variant[1:-1]) +
                                                  amino_acids[variant[-1]])
            for drug in drugs:
                for d in drug.split(' + '):
                    if d.lower() in drug_mapping:
                        drug = drug_mapping[d.lower()]
                        relationships["targets"].add(
                            (drug, gene, "CURATED_TARGETS", "curated", "NA",
                             "NA", "curated", "OncoKB"))
                        for valid_variant in valid_variants:
                            relationships[
                                "targets_clinically_relevant_variant"].add(
                                    (drug, valid_variant,
                                     "TARGETS_KNOWN_VARIANT", level[0],
                                     level[1], disease, "curated", "OncoKB"))
            for valid_variant in valid_variants:
                if disease.lower() in mapping:
                    disease = mapping[disease.lower()]
                    relationships["associated_with"].add(
                        (valid_variant, disease, "ASSOCIATED_WITH", "curated",
                         "curated", "OncoKB", len(pubmed_ids)))
                else:
                    pass
                relationships["known_variant_is_clinically_relevant"].add(
                    (valid_variant, valid_variant,
                     "KNOWN_VARIANT_IS_CLINICALLY_RELEVANT", "OncoKB"))

    builder_utils.remove_directory(directory)

    return (entities, relationships, entities_header, relationships_headers)
示例#15
0
def run_processing(n_clicks, project_id):
    message = None
    style = {'display': 'none'}
    table = None
    if n_clicks > 0:
        session_cookie = flask.request.cookies.get('custom-auth-session')
        destDir = os.path.join(experimentDir, project_id)
        builder_utils.checkDirectory(destDir)
        temporaryDirectory = os.path.join(tmpDirectory,
                                          session_cookie + "upload")
        datasets = builder_utils.listDirectoryFoldersNotEmpty(
            temporaryDirectory)
        res_n = dataUpload.check_samples_in_project(driver, project_id)
        if 'experimental_design' in datasets:
            dataset = 'experimental_design'
            directory = os.path.join(temporaryDirectory, dataset)
            experimental_files = os.listdir(directory)
            if config['file_design'].replace('PROJECTID',
                                             project_id) in experimental_files:
                experimental_filename = config['file_design'].replace(
                    'PROJECTID', project_id)
                designData = builder_utils.readDataset(
                    os.path.join(directory, experimental_filename))
                designData = designData.astype(str)
                if 'subject external_id' in designData.columns and 'biological_sample external_id' in designData.columns and 'analytical_sample external_id' in designData.columns:
                    if (res_n > 0).any().values.sum() > 0:
                        res = dataUpload.remove_samples_nodes_db(
                            driver, project_id)
                        res_n = dataUpload.check_samples_in_project(
                            driver, project_id)
                        if (res_n > 0).any().values.sum() > 0:
                            message = 'ERROR: There is already an experimental design loaded into the database and there was an error when trying to delete it. Contact your administrator.'.format(
                                experimental_filename, ','.join([
                                    'subject external_id',
                                    'biological_sample external_id',
                                    'analytical_sample external_id'
                                ]))
                            return message, style, table

                    res_n = None
                    result = create_new_identifiers.apply_async(
                        args=[
                            project_id,
                            designData.to_json(), directory,
                            experimental_filename
                        ],
                        task_id='data_upload_' + session_cookie +
                        datetime.now().strftime('%Y%m-%d%H-%M%S-'))
                    result_output = result.wait(timeout=None,
                                                propagate=True,
                                                interval=0.2)
                    res_n = pd.DataFrame.from_dict(result_output['res_n'])
                else:
                    message = 'ERROR: The Experimental design file provided ({}) is missing some of the required fields: {}'.format(
                        experimental_filename, ','.join([
                            'subject external_id',
                            'biological_sample external_id',
                            'analytical_sample external_id'
                        ]))
                    builder_utils.remove_directory(directory)

                    return message, style, table

        if 'clinical' in datasets:
            dataset = 'clinical'
            directory = os.path.join(temporaryDirectory, dataset)
            clinical_files = os.listdir(directory)
            if config['file_clinical'].replace('PROJECTID',
                                               project_id) in clinical_files:
                clinical_filename = config['file_clinical'].replace(
                    'PROJECTID', project_id)
                data = builder_utils.readDataset(
                    os.path.join(directory, clinical_filename))
                external_ids = {}
                if 'subject external_id' in data and 'biological_sample external_id' in data and 'analytical_sample external_id' in data:
                    external_ids['subjects'] = data[
                        'subject external_id'].astype(str).unique().tolist()
                    external_ids['biological_samples'] = data[
                        'biological_sample external_id'].astype(
                            str).unique().tolist()
                    external_ids['analytical_samples'] = data[
                        'analytical_sample external_id'].astype(
                            str).unique().tolist()
                    dataUpload.create_mapping_cols_clinical(
                        driver,
                        data,
                        directory,
                        clinical_filename,
                        separator=separator)
                    if 0 in res_n.values:
                        samples = ', '.join([k for (k, v) in res_n if v == 0])
                        message = 'ERROR: No {} for project {} in the database. Please upload first the experimental design (ExperimentalDesign_{}.xlsx)'.format(
                            samples, project_id, project_id)
                        builder_utils.remove_directory(directory)

                        return message, style, table
                    else:
                        db_ids = dataUpload.check_external_ids_in_db(
                            driver, project_id).to_dict()
                        message = ''
                        intersections = {}
                        differences_in = {}
                        differences_out = {}
                        for col in external_ids:
                            intersect = list(
                                set(db_ids[col].values()).intersection(
                                    external_ids[col]))
                            difference_in = list(
                                set(db_ids[col].values()).difference(
                                    external_ids[col]))
                            difference_out = list(
                                set(external_ids[col]).difference(
                                    set(db_ids[col].values())))
                            if len(difference_in) > 0 or len(
                                    difference_out) > 0:
                                intersections[col] = intersect
                                differences_in[col] = difference_in
                                differences_out[col] = difference_out
                        for col in intersections:
                            message += 'WARNING: Some {} identifiers were not matched:\n Matching: {}\n No information provided: {} \n Non-existing in the database: {}\n'.format(
                                col, len(intersections[col]),
                                ','.join(differences_in[col]),
                                ','.join(differences_out[col]))
                else:
                    message = 'ERROR: Format of the Clinical Data file is not correct. Check template in the documentation. Check columns: subject external_id, biological_sample external_id and analytical_sample external_id'
                    builder_utils.remove_directory(directory)

                    return message, style, table
        try:
            for dataset in datasets:
                source = os.path.join(temporaryDirectory, dataset)
                destination = os.path.join(destDir, dataset)
                builder_utils.copytree(source, destination)
                datasetPath = os.path.join(
                    os.path.join(experimentsImportDir, project_id), dataset)
                if dataset != "experimental_design":
                    eh.generate_dataset_imports(project_id, dataset,
                                                datasetPath)

            loader.partialUpdate(imports=['project', 'experiment'],
                                 specific=[project_id])
            filename = os.path.join(tmpDirectory,
                                    'Uploaded_files_' + project_id)
            utils.compress_directory(filename,
                                     temporaryDirectory,
                                     compression_format='zip')
            style = {'display': 'block'}
            message = 'Files successfully uploaded.'
            table = dataUpload.get_project_information(driver, project_id)
        except Exception as err:
            style = {'display': 'block'}
            message = str(err)

    return message, style, table
示例#16
0
def parser(databases_directory, download=True):
    variant_regex = r"(\D\d+\D)$"
    regex = r"(chr\d+)\:g\.(\d+)(\w)>(\w)"
    config = builder_utils.get_config(
        config_name="cancerGenomeInterpreterConfig.yml", data_type='databases')
    url = config['cancerBiomarkers_url']
    entities_header = config['entities_header']
    relationships_headers = config['relationships_headers']
    amino_acids = config['amino_acids']
    mapping = mp.getMappingFromOntology(ontology="Disease", source=None)
    drugmapping = mp.getMappingForEntity("Drug")
    protein_mapping = mp.getMultipleMappingForEntity("Protein")

    fileName = config['cancerBiomarkers_variant_file']
    relationships = defaultdict(set)
    entities = set()
    directory = os.path.join(databases_directory, "CancerGenomeInterpreter")
    builder_utils.checkDirectory(directory)
    zipFile = os.path.join(directory, url.split('/')[-1])

    if download:
        builder_utils.downloadDB(url, directory)
    with zipfile.ZipFile(zipFile) as z:
        if fileName in z.namelist():
            with z.open(fileName, 'r') as responses:
                first = True
                for line in responses:
                    if first:
                        first = False
                        continue
                    data = line.decode('utf-8').rstrip("\r\n").split("\t")
                    gene_variant = data[0].split(':')
                    if len(gene_variant) < 2:
                        continue
                    gene = gene_variant[0]
                    variants = gene_variant[1].split(',')
                    #alterationType = data[1]
                    response = data[3]
                    drugs = data[10].split(';')
                    #status = data[11].split(';')
                    evidence = data[12]
                    tumors = data[16].split(';')
                    publications = data[17].split(';')
                    identifier = data[21]
                    prot_variant = data[22]
                    matches = re.match(regex, identifier)
                    alternative_names = [identifier]
                    if matches is not None:
                        cpra = matches.groups()
                        chromosome, position, reference, alternative = cpra
                        variant = chromosome + ":g." + position + reference + ">" + alternative
                        if prot_variant != "":
                            prot_variant = prot_variant.split(':')[1]
                            alternative_names.append(prot_variant)

                    valid_variants = []
                    if gene in protein_mapping:
                        for protein in protein_mapping[gene]:
                            for variant in variants:
                                match = re.search(variant_regex, variant)
                                if match:
                                    if variant[0] in amino_acids and variant[
                                            -1] in amino_acids:
                                        valid_variant = protein + '_p.' + amino_acids[
                                            variant[0]] + ''.join(
                                                variant[1:-1]) + amino_acids[
                                                    variant[-1]]
                                        valid_variants.append(valid_variant)
                                        entities.add(
                                            (valid_variant,
                                             "Clinically_relevant_variant",
                                             ",".join(alternative_names),
                                             chromosome, position, reference,
                                             alternative, "", "", "CGI"))
                                        relationships[
                                            "known_variant_is_clinically_relevant"].add(
                                                (valid_variant, valid_variant,
                                                 "KNOWN_VARIANT_IS_CLINICALLY_RELEVANT",
                                                 "CGI"))

                    for drug in drugs:
                        if drug.lower() in drugmapping:
                            drug = drugmapping[drug.lower()]
                        elif drug.split(" ")[0].lower() in drugmapping:
                            drug = drugmapping[drug.split(" ")[0].lower()]
                        elif " ".join(
                                drug.split(" ")[1:]).lower() in drugmapping:
                            drug = drugmapping[" ".join(
                                drug.split(" ")[1:]).lower()]
                        relationships["targets"].add(
                            (drug, gene, "CURATED_TARGETS", evidence, response,
                             ",".join(tumors), "curated", "CGI"))

                        for valid_variant in valid_variants:
                            relationships[
                                "targets_clinically_relevant_variant"].add(
                                    (drug, valid_variant,
                                     "TARGETS_CLINICALLY_RELEVANT_VARIANT",
                                     evidence, response, "".join(tumors),
                                     "curated", "CGI"))

                    for tumor in tumors:
                        if tumor.lower() in mapping:
                            tumor = mapping[tumor.lower()]
                            for valid_variant in valid_variants:
                                relationships["associated_with"].add(
                                    (valid_variant, tumor, "ASSOCIATED_WITH",
                                     "curated", "curated", "CGI",
                                     len(publications)))

    builder_utils.remove_directory(directory)

    return (entities, relationships, entities_header, relationships_headers)
示例#17
0
def parser(databases_directory, download=True):
    entities = set()
    relationships = defaultdict(set)
    directory = os.path.join(databases_directory, "CORUM")
    builder_utils.checkDirectory(directory)

    try:
        config = builder_utils.get_config(config_name="corumConfig.yml",
                                          data_type='databases')
    except Exception as err:
        raise Exception("Reading configuration > {}.".format(err))

    database_url = config['database_url']
    entities_header = config['entities_header']
    relationships_headers = config['relationships_headers']
    zipped_fileName = os.path.join(directory, database_url.split('/')[-1])
    fileName = '.'.join(database_url.split('/')[-1].split('.')[0:2])
    if download:
        builder_utils.downloadDB(database_url, directory)
    names = set()
    first = True
    with zipfile.ZipFile(zipped_fileName) as z:
        with z.open(fileName) as f:
            for line in f:
                if first:
                    first = False
                    continue
                data = line.decode("utf-8").rstrip("\r\n").split("\t")
                identifier = data[0]
                name = data[1]
                organism = data[2]
                synonyms = data[3].split(';') if data[3] != "None" else [""]
                cell_lines = data[4].join(';')
                subunits = data[5].split(';')
                evidences = data[7].split(';')
                processes = data[8].split(';')
                pubmedid = data[14]

                if organism == "Human":
                    #ID name organism synonyms source
                    if name not in names:
                        entities.add((identifier, name, "9606",
                                      ",".join(synonyms), "CORUM"))
                        names.add(name)
                    for subunit in subunits:
                        #START_ID END_ID type cell_lines evidences publication source
                        relationships[("Protein", "is_subunit_of")].add(
                            (subunit, identifier, "IS_SUBUNIT_OF",
                             ",".join(cell_lines), ",".join(evidences),
                             pubmedid, "CORUM"))
                    for process in processes:
                        #START_ID END_ID type evidence_type score source
                        relationships["Biological_process",
                                      "associated_with"].add(
                                          (identifier, process,
                                           "ASSOCIATED_WITH", "CURATED", 5,
                                           "CORUM"))

    builder_utils.remove_directory(directory)

    return entities, relationships, entities_header, relationships_headers
示例#18
0
def parser(databases_directory, download=True):
    intact_dictionary = defaultdict()
    stored = set()
    relationships = set()
    config = builder_utils.get_config(config_name="intactConfig.yml",
                                      data_type='databases')
    header = config['header']
    outputfileName = "intact_interacts_with.tsv"
    regex = r"\((.*)\)"
    taxid_regex = r"\:(\d+)"
    url = config['intact_psimitab_url']
    directory = os.path.join(databases_directory, "Intact")
    builder_utils.checkDirectory(directory)
    fileName = os.path.join(directory, url.split('/')[-1])
    if download:
        builder_utils.downloadDB(url, directory)

    with open(fileName, 'r', encoding="utf-8") as idf:
        first = True
        for line in idf:
            if first:
                first = False
                continue
            data = line.rstrip("\r\n").split("\t")
            intA = data[0].split(":")[1]
            intB = data[1].split(':')
            if len(intB) > 1:
                intB = intB[1]
            else:
                continue
            methodMatch = re.search(regex, data[6])
            method = methodMatch.group(1) if methodMatch else "unknown"
            publications = data[8]
            tAmatch = re.search(taxid_regex, data[9])
            tBmatch = re.search(taxid_regex, data[10])
            taxidA = ""
            taxidB = ""
            if tAmatch and tBmatch:
                taxidA = tAmatch.group(1)
                taxidB = tBmatch.group(1)
            itypeMatch = re.search(regex, data[11])
            itype = itypeMatch.group(1) if itypeMatch else "unknown"
            sourceMatch = re.search(regex, data[12])
            source = sourceMatch.group(1) if sourceMatch else "unknown"
            score = data[14].split(":")[1]
            if builder_utils.is_number(score):
                score = float(score)
            else:
                continue
            if taxidA == "9606" and taxidB == "9606":
                if (intA, intB) in intact_dictionary:
                    intact_dictionary[(intA, intB)]['methods'].add(method)
                    intact_dictionary[(intA, intB)]['sources'].add(source)
                    intact_dictionary[(intA, intB)]['publications'].add(
                        publications.replace('|', ','))
                    intact_dictionary[(intA, intB)]['itype'].add(itype)
                else:
                    intact_dictionary[(intA, intB)] = {
                        'methods': set([method]),
                        'sources': set([source]),
                        'publications': set([publications]),
                        'itype': set([itype]),
                        'score': score
                    }
    for (intA, intB) in intact_dictionary:
        if (intA, intB, intact_dictionary[(intA,
                                           intB)]["score"]) not in stored:
            relationships.add(
                (intA, intB, "CURATED_INTERACTS_WITH",
                 intact_dictionary[(intA, intB)]['score'],
                 ",".join(intact_dictionary[(intA, intB)]['itype']),
                 ",".join(intact_dictionary[(intA, intB)]['methods']),
                 ",".join(intact_dictionary[(intA, intB)]['sources']),
                 ",".join(intact_dictionary[(intA, intB)]['publications'])))
            stored.add((intA, intB, intact_dictionary[(intA, intB)]["score"]))

    builder_utils.remove_directory(directory)

    return (relationships, header, outputfileName)
示例#19
0
def parser(databases_directory, download=True):
    config = builder_utils.get_config(config_name="refseqConfig.yml",
                                      data_type='databases')
    url = config['refseq_url']
    ftp_dir = config['refseq_ftp_dir']
    entities = defaultdict(set)
    relationships = defaultdict(set)
    directory = os.path.join(databases_directory, "RefSeq")
    builder_utils.checkDirectory(directory)
    fileName = os.path.join(directory, url.split('/')[-1])
    headers = config['headerEntities']
    taxid = 9606

    if download:
        file_dir = builder_utils.list_ftp_directory(ftp_dir)[0]
        new_file = file_dir.split('/')[-1] + "_feature_table.txt.gz"
        url = ftp_dir + file_dir.split('/')[-1] + "/" + new_file
        builder_utils.downloadDB(url, directory)
        fileName = os.path.join(directory, new_file)

    if os.path.isfile(fileName):
        df = builder_utils.read_gzipped_file(fileName)
        first = True
        for line in df:
            if first:
                first = False
                continue
            data = line.rstrip("\r\n").split("\t")
            tclass = data[1]
            assembly = data[2]
            chrom = data[5]
            geneAcc = data[6]
            start = data[7]
            end = data[8]
            strand = data[9]
            protAcc = data[10]
            name = data[13]
            symbol = data[14]

            if protAcc != "":
                entities["Transcript"].add(
                    (protAcc, "Transcript", name, tclass, assembly, taxid))
                if chrom != "":
                    entities["Chromosome"].add(
                        (chrom, "Chromosome", chrom, taxid))
                    relationships["LOCATED_IN"].add(
                        (protAcc, chrom, "LOCATED_IN", start, end, strand,
                         "RefSeq"))
                if symbol != "":
                    relationships["TRANSCRIBED_INTO"].add(
                        (symbol, protAcc, "TRANSCRIBED_INTO", "RefSeq"))
            elif geneAcc != "":
                entities["Transcript"].add(
                    (geneAcc, "Transcript", name, tclass, assembly, taxid))
                if chrom != "":
                    entities["Chromosome"].add(
                        (chrom, "Chromosome", chrom, taxid))
                    relationships["LOCATED_IN"].add(
                        (protAcc, chrom, "LOCATED_IN", start, end, strand,
                         "RefSeq"))
        df.close()

    builder_utils.remove_directory(directory)

    return (entities, relationships, headers)