def _main(save: bool): click.secho(cls.name, fg='green', bold=True) inst = cls() stmts = inst.extract_statements() if save: import pystow stmts_path = pystow.join("indra", cls.name, name="stmts.pkl") with stmts_path.open("wb") as file: pickle.dump(stmts, file, protocol=pickle.HIGHEST_PROTOCOL) print_stmt_summary(stmts)
def test_get(self): """Test the :func:`get` function.""" parts_examples = [ [n()], [n(), n()], [n(), n(), n()], ] with self.mock_directory(): for parts in parts_examples: with self.subTest(parts=parts): self.assertEqual(self.join(*parts), join(*parts))
def assert_io(self, obj, ext: str, dump, load): """Test an object can be dumped and loaded. :param obj: The object to dump :param ext: The extension to use :param dump: The dump function :param load: The load function """ name = f"test.{ext}" path = pystow.join("test", name=name) if path.is_file(): path.unlink() self.assertFalse(path.is_file()) dump("test", name=name, obj=obj) self.assertTrue(path.is_file()) self.assertEqual(obj, load("test", name=name))
# -*- coding: utf-8 -*- """Constants for ComPath's resources.""" import os from pathlib import Path import pystow COMPATH_HOME = pystow.join('compath') HERE = os.path.abspath(os.path.dirname(__file__)) ROOT = Path(os.path.abspath(os.path.join(HERE, os.pardir, os.pardir))) RESOURCES = Path(HERE) / 'resources' DOCS_DIRECTORY = ROOT / 'docs' IMG_DIRECTORY = DOCS_DIRECTORY / 'img' DATA_DIRECTORY = DOCS_DIRECTORY / 'data' DATA_DIRECTORY.mkdir(parents=True, exist_ok=True) _BASE_URL = 'https://raw.githubusercontent.com/ComPath/resources/master/mappings' # Inter-database mappings URLs KEGG_WIKIPATHWAYS_URL = f'{_BASE_URL}/kegg_wikipathways.csv' KEGG_REACTOME_URL = f'{_BASE_URL}/kegg_reactome.csv' WIKIPATHWAYS_REACTOME_URL = f'{_BASE_URL}/wikipathways_reactome.csv' PATHBANK_KEGG_URL = f'{_BASE_URL}/pathbank_kegg.csv' PATHBANK_REACTOME_URL = f'{_BASE_URL}/pathbank_reactome.csv' PATHBANK_WIKIPATHWAYS_URL = f'{_BASE_URL}/pathbank_wikipathways.csv' # Intra-database mappings URLS
import os import boto3 import pystow import logging import botocore from gilda import __version__ logger = logging.getLogger(__name__) HERE = os.path.abspath(os.path.dirname(__file__)) MESH_MAPPINGS_PATH = os.path.join(HERE, 'mesh_mappings.tsv') resource_dir = pystow.join('gilda', __version__) GROUNDING_TERMS_BASE_NAME = 'grounding_terms.tsv' GROUNDING_TERMS_PATH = os.path.join(resource_dir, GROUNDING_TERMS_BASE_NAME) # Popular organisms per UniProt, see # https://www.uniprot.org/help/filter_options popular_organisms = [ '9606', '10090', '10116', '9913', '7955', '7227', '6239', '44689', '3702', '39947', '83333', '224308', '559292' ] def _download_from_s3(path, base_name): config = botocore.client.Config(signature_version=botocore.UNSIGNED) s3 = boto3.client('s3', config=config) tc = boto3.s3.transfer.TransferConfig(use_threads=False) full_key = '%s/%s' % (__version__, base_name) out_file = os.path.join(path, base_name)
import botocore from ftplib import FTP from io import BytesIO, StringIO from collections import namedtuple from urllib.request import urlretrieve from xml.etree import ElementTree as ET from . import __version__ logger = logging.getLogger('protmapper.resources') logger.setLevel(logging.INFO) # If the protmapper resource directory does not exist, try to create it using PyStow # Can be specified with PROTMAPPER_HOME environment variable, otherwise defaults # to $HOME/.data/protmapper/<__version__>. The location of $HOME can be overridden with # the PYSTOW_HOME environment variable resource_dir_path = pystow.join('protmapper', __version__) resource_dir = resource_dir_path.as_posix() def _download_from_s3(key, out_file): s3 = boto3.client( 's3', config=botocore.client.Config(signature_version=botocore.UNSIGNED)) tc = boto3.s3.transfer.TransferConfig(use_threads=False) # Path to the versioned resource file full_key = 'protmapper/%s/%s' % (__version__, key) s3.download_file('bigmech', full_key, out_file, Config=tc) def _download_ftp(ftp_host, ftp_path,
# -*- coding: utf-8 -*- """Download registry information from N2T.""" import itertools as itt from operator import itemgetter import click import pystow import yaml from more_itertools import pairwise URL = 'https://n2t.net/e/cdl_ebi_prefixes.yaml' PATH = pystow.join('bioregistry', name='n2t.json') def _parse_1(file): lines = (line.strip() for line in file if not line.startswith('#') and line.strip()) it = itt.groupby(lines, lambda line: line.startswith('-')) for a, grouped_lines in it: if a: k, v = [ part.strip() for part in list(grouped_lines)[0].lstrip('-').lstrip().split( ':', 1) ] yield [(k, v)] else: yield [[part.strip() for part in line.split(':', 1)] for line in grouped_lines]
# -*- coding: utf-8 -*- """Connection configuration for PyBEL.""" import logging import pystow __all__ = [ 'connection', 'PYBEL_MINIMUM_IMPORT_VERSION', 'PYBEL_HOME', ] logger = logging.getLogger(__name__) #: The last PyBEL version where the graph data definition changed PYBEL_MINIMUM_IMPORT_VERSION = 0, 14, 0 PYBEL_HOME = pystow.join('pybel') DEFAULT_CACHE_NAME = 'pybel_{}.{}.{}_cache.db'.format( *PYBEL_MINIMUM_IMPORT_VERSION) DEFAULT_CACHE_PATH = pystow.join('pybel', name=DEFAULT_CACHE_NAME) #: The default cache connection string uses sqlite. DEFAULT_CACHE_CONNECTION = 'sqlite:///' + DEFAULT_CACHE_PATH.as_posix() connection = pystow.get_config( 'pybel', 'connection', default=DEFAULT_CACHE_CONNECTION, )
""" Contains paths to locations on user's system where models and resources are to be stored. These all live in adeft's home folder which defaults to the hidden directory ".adeft" in the user's home directory but which can be specified by setting the environment variable ADEFT_HOME in the user's profile. """ import os import pystow from adeft import __version__ # If the adeft resource directory does not exist, try to create it using PyStow # Can be specified with ADEFT_HOME environment variable, otherwise defaults # to $HOME/.data/adeft/<__version__>. The location of $HOME can be overridden with # the PYSTOW_HOME environment variable ADEFT_HOME = pystow.join('adeft') ADEFT_PATH = os.path.join(ADEFT_HOME, __version__) ADEFT_MODELS_PATH = os.path.join(ADEFT_PATH, 'models') RESOURCES_PATH = os.path.join(ADEFT_PATH, 'resources') GROUNDINGS_FILE_PATH = os.path.join(RESOURCES_PATH, 'groundings.csv') TEST_RESOURCES_PATH = os.path.join(ADEFT_PATH, 'test_resources') S3_BUCKET_URL = f'https://adeft.s3.amazonaws.com/{__version__}'
import pystow # S3 bucket for raw data S3_BUCKET = 'bigmech' # Path to data within bucket S3_DATA_PATH = 'causal_precedence' # Path to where data is stored locally LOCAL_DATA_HOME = pystow.join('causal_precedence_training') # Path to SIGNOR pathways SIGNOR_PATHWAYS_DIRECTORY = pystow.join('causal_precedence_training', 'SIGNOR_pathways') # Path to triples used as input to generate causal precedence datasets TRIPLES_DIRECTORY = pystow.join('causal_precedence_training', 'causal_triples') TRAINING_DATA_EXPORT_DIRECTORY = pystow.join('causal_precedence_training', 'training_data')
import datetime import enum import ftplib import os from typing import Any, ClassVar, Mapping, Optional, Union import bioregistry import pydantic import pystow import requests import requests_ftp from bs4 import BeautifulSoup from cachier import cachier BIOVERSIONS_HOME = pystow.join("bioversions") HERE = os.path.abspath(os.path.dirname(__file__)) DOCS = os.path.abspath(os.path.join(HERE, os.pardir, os.pardir, "docs")) IMG = os.path.join(DOCS, "img") requests_ftp.monkeypatch_session() class VersionType(enum.Enum): """Different types of versions.""" semver = "SemVer (X.Y.Z)" date = "CalVer (YYYY-MM-DD)" month = "CalVer (YYYY-MM)" year = "CalVer (YYYY)" year_minor = "CalVer (YYYY.X)"