Ejemplo n.º 1
0
def parse_config(parse_cli_args: bool = True,
                 config_class: Optional[Dict] = None) -> Dict:
    """
    Parse and validate MS²ReScore configuration files and arguments.

    Default configuration, user configuration files, and CLI/class arguments are parsed
    in cascading order.

    Parameters
    ----------
    parse_cli_args : bool
        parse command line arguments or not, default True
    config_class : Dict
        dictionary with arguments from the Python class; required if `parse_cli_args`
        is False
    """
    config_schema = pkg_resources.open_text(package_data, "config_schema.json")
    config_default = pkg_resources.open_text(package_data,
                                             "config_default.json")

    # MS²ReScore can be run from the CLI, or as a Python module
    if parse_cli_args:
        args = _parse_arguments()
        config_user = args.config_file
        if config_class:
            raise MS2RescoreConfigurationError(
                "If `parse_cli_args` is True, `config_class` must be None.")
    elif config_class:
        args = None
        config_user = config_class["general"]["config_file"]
    else:
        raise MS2RescoreConfigurationError(
            "If `parse_cli_args` is False, `config_class` arguments are required."
        )

    cascade_conf = CascadeConfig(validation_schema=json.load(config_schema))
    cascade_conf.add_dict(json.load(config_default))
    if config_user:
        cascade_conf.add_json(config_user)
    if parse_cli_args:
        cascade_conf.add_namespace(args, subkey="general")
    elif config_class:
        cascade_conf.add_dict(config_class)
    config = cascade_conf.parse()

    config = _validate_filenames(config)
    config = _validate_num_cpu(config)

    config["general"]["pipeline"] = config["general"]["pipeline"].lower()

    try:
        config["maxquant_to_rescore"]["mgf_title_pattern"] = re.compile(
            config["maxquant_to_rescore"]["mgf_title_pattern"])
    except re.error:
        raise MS2RescoreConfigurationError(
            "Invalid regex pattern, please provide valid regex patttern")

    return config
Ejemplo n.º 2
0
def db_conn():
    db_conn = db.db_connection(':memory:')
    db_conn.row_factory = sqlite3.Row
    db.init(db_conn)
    with open_text('tests', 'test_ratings.json') as ratings:
        db.add_ratings(db_conn, ratings)
    with open_text('tests', 'test_restaurants.json') as restaurants:
        db.add_restaurants(db_conn, restaurants)
    with open_text('tests', 'test_teammates.json') as teammates:
        db.add_teammates(db_conn, teammates)
    return db_conn
Ejemplo n.º 3
0
 def test_open_text_with_errors(self):
     # Raises UnicodeError without the 'errors' argument.
     with resources.open_text(self.data, 'utf-16.file', 'utf-8',
                              'strict') as fp:
         self.assertRaises(UnicodeError, fp.read)
     with resources.open_text(self.data, 'utf-16.file', 'utf-8',
                              'ignore') as fp:
         result = fp.read()
     self.assertEqual(
         result, 'H\x00e\x00l\x00l\x00o\x00,\x00 '
         '\x00U\x00T\x00F\x00-\x001\x006\x00 '
         '\x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00')
Ejemplo n.º 4
0
 def test_open_text_with_errors(self):
     # Raises UnicodeError without the 'errors' argument.
     with resources.open_text(
             self.data, 'utf-16.file', 'utf-8', 'strict') as fp:
         self.assertRaises(UnicodeError, fp.read)
     with resources.open_text(
             self.data, 'utf-16.file', 'utf-8', 'ignore') as fp:
         result = fp.read()
     self.assertEqual(
         result,
         'H\x00e\x00l\x00l\x00o\x00,\x00 '
         '\x00U\x00T\x00F\x00-\x001\x006\x00 '
         '\x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00')
Ejemplo n.º 5
0
    def __init__(self, userDir=None):
        """
        read in the unit system information and set things up
        userDir is the path to the directory containing user data, if any
        """
        self.baseDataPath = None
        self.userDataPath = userDir

        # read in types
        self.types = {}
        # first base types
        # fileName = self.baseDataPath + os.sep + 'UnitType.txt'
        f = open_text('sim21.old.uom.data', 'UnitType.txt')
        while 1:
            unitType = UnitType()
            unit_id = unitType.ReadFile(f)
            if unit_id is None:
                break
            self.types[unit_id] = unitType

        # check if user types exist
        # REMOVED

        # read in unit items
        self.units = {}
        # fileName = self.baseDataPath + os.sep + 'UnitItem.txt'
        f = open_text('sim21.old.uom.data', 'UnitItem.txt')
        while 1:
            unit_type = UnitItem(self)
            unit_id = unit_type.ReadFile(f)
            if unit_id is None:
                break
            self.units[unit_id] = unit_type

        # check if user items exist

        # create cross reference for quick look up by name
        self.nameIndex = {}
        for unit_type in list(self.units.values()):
            self.nameIndex[unit_type.name] = unit_type.id

        # read standard unit sets
        self.unitSets = {}
        self.ReadSets()

        # see if there is a current default unit set
        self.defaultSet = self.unitSets['SI']
        self.sim42Set = self.unitSets.get('sim42', None)

        # fix up equivalent unit types after creation of nameIndex
        self.FixEquivalentTypes()
Ejemplo n.º 6
0
    def init_resources(self, resource_dir: str, mode: str):
        if resource_dir:
            f_concepts = open(os.path.join(resource_dir, 'concepts.json'))
            f_relations = open(os.path.join(resource_dir, 'relations.json'))
        else:
            m = wiser if mode == 'wiser' else amr
            f_concepts = pkg_resources.open_text(m, 'concepts.json')
            f_relations = pkg_resources.open_text(m, 'relations.json')

        # concepts
        self.concept_dict = json.load(f_concepts)
        self.concept_list = sorted(self.concept_dict.keys())
        self.relation_dict = json.load(f_relations)
        self.relation_list = sorted(self.relation_dict.keys())
Ejemplo n.º 7
0
    def __init__(self, update_tlds=False):
        """ Read the ETLD list provided by the user.
            Args:
                @update_tlds:   Boolean flag to allow the user to update the
                                TLD file from Mozilla source (DEFAULT_URL)
            Returns:
        """
        DEFAULT_URL = 'http://mxr.mozilla.org/mozilla-central/source/netwerk/dns/effective_tld_names.dat?raw=1'
        ETLD_FILENAME = 'mozilla_etlds.dat'

        etld_inf = open_text('dnspy.data', ETLD_FILENAME)
        self.etlds = {}

        if update_tlds:
            etld_inf.close()
            # Download the latest ETLD list
            urllib.request.urlretrieve(DEFAULT_URL, etld_inf.name)
            etld_inf = open_text('dnspy.data', ETLD_FILENAME)
            print('File updated...')

        for line in etld_inf:

            # Py3: line is of type 'bytes'; convert to ASCII string
            line = line.strip()
            # Ignore comments and whitespace lines
            if (line.startswith('//') or line == ''):
                continue

            if line[0] == '*':
                # Any hostname matches wildcard
                etld_ = line[2:].encode('idna').decode()
                if etld_ not in self.etlds:
                    self.etlds[etld_] = set()
                self.etlds[line[2:]].add('*')
            elif line[0] == '!':
                # Exceptions to the wildcard rule
                lbls = line.split('.')
                etld_ = '.'.join(lbls[1:]).encode('idna').decode()
                if etld_ not in self.etlds:
                    self.etlds[etld_] = set()
                self.etlds[etld_].add(lbls[0])
            else:
                # Else the normal case
                etld_ = line.encode('idna').decode()
                if etld_ not in self.etlds:
                    self.etlds[etld_] = set()

        etld_inf.close()
        return
Ejemplo n.º 8
0
def _load_whp_names():
    _dtype_map = {"string": str, "decimal": float, "integer": int}
    whp_name = {}
    with open_text("hydro.data", "parameters.json") as f:
        for record in load(f):
            record["data_type"] = _dtype_map[record["data_type"]]
            param = WHPName(**record)
            whp_name[param.key] = param
    # load the aliases
    with open_text("hydro.data", "aliases.json") as f:
        for record in load(f):
            whp_name[(record["whp_name"], record["whp_unit"])] = whp_name[(
                record["canonical_name"], record["canonical_unit"])]

    return whp_name
Ejemplo n.º 9
0
    def __init__(self):
        translator = VarXpathTranslator()
        with open_text(resources, CONVERSION_FILENAME_1) as translation_table:
            translator.read_translation_table(translation_table)
        super().__init__(translator)

        self.xml_unit_attribute = "unit"
Ejemplo n.º 10
0
 def load_builtin_definitions(self) -> None:
     """Load the built-in key definitions from the ``defs`` directory"""
     for fname in resources.contents(_defs):
         if fname.endswith(".toml") and resources.is_resource(_defs, fname):
             logger.debug("Loading defs from %s", fname)
             with resources.open_text(_defs, fname) as file:
                 self.attach_defs(toml.load(file))
Ejemplo n.º 11
0
def setup_logging(default_level=logging.INFO):
    with pkg_resources.open_text(config, "logging.yml") as ymlfile:
        logging_config = yaml.load(ymlfile, Loader=yaml.SafeLoader)
        if logging_config:
            logging.config.dictConfig(logging_config) 
        else:
            logging.basicConfig(level=default_level)
Ejemplo n.º 12
0
def load_dataset_list(dataset_list_path=DATASET_LIST_PATH):
    datasets = set()
    header = True
    with pkg_resources.open_text("aimmx", dataset_list_path) as f:
        for line in f:
            datasets.add(line.strip())
    return datasets
def process_results(economy):
    """
    Combine OSeMOSYS solution files and write as the result as an Excel file where each result parameter is a tab in the Excel file.
    """
    print('\n-- Preparing results...')
    parent_directory = "./results/"
    child_directory = economy
    path = os.path.join(parent_directory,child_directory)
    try:
        os.mkdir(path)
    except OSError:
        #print ("Creation of the directory %s failed" % path)
        pass
    else:
        print ("Successfully created the directory %s " % path)

    with resources.open_text('aperc_osemosys','results_config.yml') as open_file:
        contents_var = yaml.load(open_file, Loader=yaml.FullLoader)

    results_df={}
    for key,value in contents_var.items():
        if contents_var[key]['type'] == 'var':
            fpath = './tmp/'+key+'.csv'
            #print(fpath)
            _df = pd.read_csv(fpath).reset_index(drop=True)
            results_df[key] = _df
    results_dfs = {}
    results_dfs = {k:v for (k,v) in results_df.items() if not v.empty}
    _result_tables = {}
    for key,value in results_dfs.items():
        indices = contents_var[key]['indices']
        _df = results_dfs[key]
        if 'TIMESLICE' in indices:
            unwanted_members = {'YEAR', 'VALUE'}
            _indices = [ele for ele in indices if ele not in unwanted_members]
            df = pd.pivot_table(_df,index=_indices,columns='YEAR',values='VALUE',aggfunc=np.sum)
            df = df.loc[(df != 0).any(1)] # remove rows if all are zero
            _result_tables[key] = df
        elif 'TIMESLICE' not in indices:
            if contents_var[key]['type'] == 'var':
                unwanted_members = {'YEAR', 'VALUE'}
                _indices = [ele for ele in indices if ele not in unwanted_members]
                df = pd.pivot_table(_df,index=_indices,columns='YEAR',values='VALUE')
                df = df.loc[(df != 0).any(1)] # remove rows if all are zero
                _result_tables[key] = df
            elif contents_var[key]['type'] == 'param':
                unwanted_members = {'YEAR', 'VALUE'}
                _indices = [ele for ele in indices if ele not in unwanted_members]
                df = pd.pivot_table(_df,index=_indices,columns='YEAR',values='VALUE')
                df = df.loc[(df != 0).any(1)] # remove rows if all are zero
                _result_tables[key] = df
            elif contents_var[key]['type'] == 'equ':
                unwanted_members = {'YEAR', 'VALUE'}
                _indices = [ele for ele in indices if ele not in unwanted_members]
                df = pd.pivot_table(_df,index=_indices,columns='YEAR',values='VALUE')
                #df = df.loc[(df != 0).any(1)] # remove rows if all are zero
                _result_tables[key] = df
        _result_tables[key]=_result_tables[key].fillna(0)
    results_tables = {k: v for k, v in _result_tables.items() if not v.empty}
    return results_tables
def load_and_filter(keep_list,config_dict,economy,scenario):
    """
    Load data sets according to specified sectors.

    Filters data based on scenario, years, and economies.
    """
    subset_of_economies = economy
    scenario = scenario
    print('Solving {} scenario...\n'.format(scenario))
    with resources.open_text('aperc_osemosys','model_config.yml') as open_file:
        contents = yaml.load(open_file, Loader=yaml.FullLoader)
    list_of_dicts = []
    for key,value in contents.items():
        if key in config_dict['sector']:
            _mypath = Path(value)
            if _mypath.exists():
                print(value)
                _path = value
                _dict = pd.read_excel(_path,sheet_name=None) # creates dict of dataframes
                __dict = {k: _dict[k] for k in keep_list}
                filtered_data = {}
                for key,value in __dict.items():
                    __df = __dict[key]
                    if 'SCENARIO' in __df.columns:
                        ___df = __df[__df['SCENARIO']==scenario].drop(['SCENARIO'],axis=1)
                        ____df = ___df.loc[(___df != 0).any(1)] # remove rows if all are zero
                        filtered_data[key] = ____df
                    else:
                        filtered_data[key] = __df
                for key,value in filtered_data.items():
                    __df = filtered_data[key]
                    if 'REGION' in __df.columns:
                        ___df = __df[__df['REGION']==subset_of_economies]
                        ____df = ___df.loc[(___df != 0).any(1)] # remove rows if all are zero
                        filtered_data[key] = ____df
                    else:
                        filtered_data[key] = __df
                for key,value in filtered_data.items():
                    __df = filtered_data[key]
                    if key == 'REGION':
                        ___df = __df[__df['VALUE']==subset_of_economies]
                        ____df = ___df.loc[(___df != 0).any(1)] # remove rows if all are zero
                        filtered_data[key] = ____df
                    else:
                        filtered_data[key] = __df
                for key,value in filtered_data.items():
                    __df = filtered_data[key]
                    if 'UNITS' in __df.columns:
                        ___df = __df.drop(['UNITS'],axis=1)
                        ____df = ___df.loc[(___df != 0).any(1)] # remove rows if all are zero
                        filtered_data[key] = ____df
                for key,value in filtered_data.items():
                    __df = filtered_data[key]
                    if 'NOTES' in __df.columns:
                        ___df = __df.drop(['NOTES'],axis=1)
                        ____df = ___df.loc[(___df != 0).any(1)] # remove rows if all are zero
                        filtered_data[key] = ____df
                __dict = {k: filtered_data[k] for k in keep_list}
                list_of_dicts.append(__dict)
    return list_of_dicts
Ejemplo n.º 15
0
def load_dr_data() -> Dict[int, Dict[str, np.ndarray]]:
    """
    Loads the avaliable DR transition data from the resource directory

    Returns
    -------
    dict of dicts
        A dictiomary with the proton number as dict-keys.
        Each value is another dictionary with the items
        "dr_e_res" (resonance energy),
        "dr_strength" (transitions strength),
        and "dr_cs" (charge state)
        The values are linear numpy arrays holding corresponding data on the same rows.

    """
    out = {}
    empt = np.array([])
    empt.setflags(write=False)
    for z in range(1, 106):
        try:
            with open_text(_drdata, f"DR_{z}.csv") as f:
                dat = _parse_dr_file(f)
        except FileNotFoundError:
            dat = dict(dr_e_res=empt.copy(),
                       dr_strength=empt.copy(),
                       dr_cs=empt.copy())
        dat["dr_cs"] = dat["dr_cs"].astype(
            int)  # Need to assure int for indexing purposes
        out[z] = dat
    return out
Ejemplo n.º 16
0
    def get_db(self):
        global conn_global
        if conn_global is None:
            conn_global = sqlite3.connect("foil_simulator.db")
            c = conn_global.cursor()
            result = c.execute(
                "SELECT name FROM sqlite_master WHERE type='table' AND name='foil'"
            ).fetchone()
            if result == None:
                # Create database tables
                logger.info("Creating Database for the first time")
                # fd = open('foil_simulator.sql', 'r')
                fd = pkg_resources.open_text("proply.sql",
                                             "foil_simulator.sql")
                sqlFile = fd.read()
                fd.close()

                # all SQL commands (split on ';')
                sqlCommands = sqlFile.split(";")

                # Execute every command from the input file
                for command in sqlCommands:
                    # This will skip and report errors
                    # For example, if the tables do not yet exist, this will skip over
                    # the DROP TABLE commands
                    try:
                        logger.info(command)
                        c.execute(command)
                    except sqlite3.OperationalError as msg:
                        print("Command skipped: ", msg)
            conn_global.commit()

        return conn_global
Ejemplo n.º 17
0
 def init(self, filename, typ):
     if typ == TYPE_LOCAL:
         path = 'levels/{}'.format(filename)
         stream = open(path, 'r')
     else:
         stream = pkg_resources.open_text('levels', filename)
     data = yaml.load(stream, Loader=yaml.FullLoader)
     self.number = data.get('level')
     self.name = data.get('name')
     self.intro = data.get('intro')
     self.outro = data.get('outro')
     self.money = data.get('money')
     self.tutorial = data.get('tutorial')
     diner.init(data.get('diner'))
     goals.init(data.get('goals'))
     time.init(data.get('calendar'))
     skills.init(data.get('skills', []))
     ingredients.init(data.get('ingredients', []))
     storage.init(data.get('storage', []))
     kitchen.init(data.get('kitchen', []))
     shopping.init(data.get('shopping', []))
     food.init(data.get('food'))
     guests.init(data.get('guests', []))
     social.init(data.get('social', []))
     activities.init(data.get('activities', []))
     stream.close()
Ejemplo n.º 18
0
def load_crypt(apps, schema_editor):
    CryptCard = apps.get_model('api', 'CryptCard')
    with open_text(PACKAGE, 'vtescrypt.csv', encoding='utf8') as csv_crypt:
        next(csv_crypt)
        reader = csv.reader(csv_crypt, delimiter=',')
        crypt_cards = [
            CryptCard(id=row[0],
                      name=row[1],
                      aka=row[2] if row[2] else None,
                      alias=row[1] if not bool(row[5]) else row[1] + " (ADV)",
                      card_type=row[3],
                      clan=row[4],
                      advanced=bool(row[5]),
                      group_id=row[6],
                      capacity=int(row[7]),
                      disciplines=row[8],
                      card_text=row[9],
                      publish_set=row[10],
                      title=row[11] if row[11] else None,
                      banned=int(row[12]) if row[12] else None,
                      artist=row[13]
                      )
            for row in reader
        ]
        crypt_cards.sort(key=lambda card: card.id)
        used_aliases = set()
        for cc in crypt_cards:
            if not cc.advanced:
                if cc.alias in used_aliases:
                    cc.alias = f"{cc.alias} (G{cc.group_id})"
                else:
                    used_aliases.add(cc.alias)
            cc.save()
            load_card_expansions(cc.id, cc.publish_set)  # id - publish_set
Ejemplo n.º 19
0
 def test_compatibility(self):
     """Compare results with Java results."""
     with open_text('myanmartools.resources', 'compatibility.tsv') as f:
         for row in csv.reader(f, delimiter='\t'):
             self.assertAlmostEqual(
                 self.detector.get_zawgyi_probability(row[1]),
                 float(row[0]))
Ejemplo n.º 20
0
def get_profile(file_name: str = "BACJ.txt",
                chord_length=1.0,
                thickness_ratio=None) -> Profile:
    """
    Reads profile from indicated resource file and returns it after resize

    :param file_name: name of resource
    :param chord_length: set to None to get original chord length
    :param thickness_ratio:
    :return: the Profile instance
    """

    with open_text(resources, file_name) as source:
        x_z = np.genfromtxt(source,
                            skip_header=1,
                            delimiter="\t",
                            names="x, z")
    profile = Profile()
    profile.set_points(x_z["x"], x_z["z"])

    if thickness_ratio:
        profile.thickness_ratio = thickness_ratio

    if chord_length:
        profile.chord_length = chord_length

    return profile
 def load(cls, version: Optional[str] = None) -> TableConfigurationModel:
     """Load the configuration from the packaged file."""
     with open_text(data, "metanetx.toml") as handle:
         obj = toml.load(handle)
     if version is None:
         version = obj["latest"]
     return cls(version=version, **obj[version])
Ejemplo n.º 22
0
def xontrib_metadata():
    """Loads and returns the xontribs.json file."""
    impres = None
    pkg_resources = None

    # NOTE: Reduce all of these alternate implementations when the minimum Python
    #       is >=3.7
    try:
        # Python 3.7
        import importlib.resources as impres
    except ImportError:
        try:
            # Optional backport for <3.7
            import importlib_resources as impres
        except ImportError:
            try:
                # Try the slower and clunkier pkg_resources
                # This is only available if setuptools is part of the environment
                import pkg_resources
            except ImportError:
                pass

    if impres:
        with impres.open_text("xonsh", "xontribs.json") as f:
            md = json.load(f)
    elif pkg_resources:
        # Despite the name, this is a bytes
        bytesdata = pkg_resources.resource_string("xonsh", "xontribs.json")
        md = json.loads(bytesdata.decode("utf-8"))
    else:
        path = os.path.join(os.path.dirname(__file__), "xontribs.json")
        with open(path, "r") as f:
            md = json.load(f)

    return md
Ejemplo n.º 23
0
def scrape_recovered_from_wikipedia(c, restart='US-AL',
                                    output=WIKIPEDIA_RECOVERED):
    """Scrape time series of recovered cases from historic versions of Wikipedia pages.

    :param restart Restart scraping at a state, given by its ISO code, e.g. US-VA for Virginia
    :param output Write CSV files into this directory, defaults to 'build/scraping/wikipedia-recovered/YYYY-MM-DD_HHMM'

    Example: https://en.wikipedia.org/w/index.php?title=COVID-19_pandemic_in_Wisconsin
    """
    outdir = pathlib.Path(output)
    outdir.mkdir(parents=True, exist_ok=True)
    rundir = outdir / dt.datetime.now().strftime("%Y-%m-%d_%H%M")
    rundir.mkdir(parents=True, exist_ok=True)
    print("Writing output to {}".format(rundir))

    states = pd.read_csv(
        pkg_resources.open_text(data, "wikipedia_ISO_3166-2_US.csv"))
    all_states = []
    restart_index = list(states.Iso_3166_2).index(restart)
    for index, row in states.iloc[restart_index:].iterrows():
        if pd.isna(row['Wikipedia_Name']):
            continue
        time_series = time_series_recovered(row['Wikipedia_Name'],
            name=row['Name'],
            iso_code=row['Iso_3166_2'], limit=500)
        filename = 'time_servies_recovered_wikipedia_{}.csv'.format(
            row['Iso_3166_2'])
        time_series.to_csv(rundir / filename)
        all_states.append(time_series)
    pd.concat(all_states).to_csv(
        rundir / 'time_servies_recovered_wikipedia.csv')
Ejemplo n.º 24
0
def create_formation_image(names_list, cache_dir, GK_name=None):
    """ Create soccer formation image using names of attending players. """
    with open_text("cktool", "formation.svg") as f:
        svg_data = f.read()
        svg_data = svg_data.replace("_z_", str(len(names_list)))
        if GK_name and GK_name in names_list:
            svg_data = svg_data.replace(f"_GK_", GK_name)
            names_list.remove(GK_name)

        for i, _ in enumerate(names_list.copy(), start=1):
            svg_data = svg_data.replace(f"_x{i}_", names_list.pop())

        for i in range(1, 30):
            svg_data = svg_data.replace(f"_x{i}_", "?")

    svg_root = etree.fromstring(svg_data,
                                parser=etree.XMLParser(remove_comments=True,
                                                       recover=True))
    svgRenderer = SvgRenderer(path="")
    drawing = svgRenderer.render(svg_root)
    filename = "formation_{}.png".format(
        dt.datetime.now().strftime("%Y%m%d_%Hh%Mm%Ss"))
    filepath = os.path.join(cache_dir, filename)
    renderPM.drawToFile(drawing, filepath, fmt="PNG")
    return filepath
Ejemplo n.º 25
0
def get_config():
    try:
        with open(os.path.expanduser('~/.vest/config.json')) as f:
            return json.load(f)
    except:
        with pkg_resources.open_text(data, 'config.json') as f:
            return json.load(f)
Ejemplo n.º 26
0
def load_font(font_path, font_is_resource, page=0, ch1=EMPTY, ch2="#"):

    initial = page << 8
    last = initial + 0x100

    if font_is_resource and resources:
        data = list(resources.open_text("terminedia.data", font_path))

    elif font_is_resource and not resources:
        path = Path(__file__).parent / "data" / font_path
        data = list(open(path).readlines())
    else:
        # TODO: enable more font types, and
        # TODO: enable fallback to other fonts if glyphs not present in the requested one
        data = list(open(font_path).readlines())

    font = {}

    for i, line in enumerate(data[initial:last], initial):
        line = line.split(":")[1].strip()
        line = binascii.unhexlify(line)
        char = "\n".join(f"{bin(v).split('b')[1]}".zfill(8) for v in line)
        char = char.replace("0", ch1).replace("1", ch2)
        font[chr(i)] = char

    return font
Ejemplo n.º 27
0
def review_scraped_recoveries(c, csvdir=None):
    """Transform scraped data for Recovered into heatmaps in html and Excel format.

    :param csvdir The directory with CSV files of scraped wikipedia data for Recovered cases.
    """
    states = pd.read_csv(
        pkg_resources.open_text(data, "wikipedia_ISO_3166-2_US.csv"))
    assert os.path.exists(csvdir), "CSV directory not found"
    location = pathlib.Path(csvdir)
    filenames = [location / f"time_servies_recovered_wikipedia_{statecode}.csv"
                 for statecode in states.Iso_3166_2]
    filenames = [fname for fname in filenames if os.path.isfile(fname)]

    all_states = pd.concat([pd.read_csv(fname) for fname in filenames])
    all_states = all_states[['date', 'Name', 'Recovered']].sort_values('date',
        ascending=False)
    pivoted = all_states.pivot(index='Name', columns='date')
    pivoted = pivoted.round().fillna(-1)
    # Reverse column order: most recent days should be left.
    pivoted = pivoted.iloc[:, ::-1]
    # Heat map, see https://stackoverflow.com/questions/29432629/plot-correlation-matrix-using-pandas/50703596#50703596
    # Alternative: cmap='coolwarm'
    heatmap = pivoted.style.background_gradient(cmap='viridis',
        axis=1).set_na_rep('').set_precision(0)
    print("Writing heatmaps to {}".format(location))
    with open(location / 'heatmap.html', 'w') as file:
        file.write(heatmap.render().replace('>-1</td>', '></td>'))
    heatmap.to_excel(location / 'heatmap.xlsx')
Ejemplo n.º 28
0
 def __init__(self, _case_name, _dir, **kwargs):
     '''
     Inputs:
         _case_name(str) - case name
         _dir(str) - directory of case data
         _kwargs:
             layout(dict):
                 layout of doc
             layout_json(str):
                 file name to read layout from
     '''
     self.case_name = _case_name
     self.idir = _dir
     self.media = {}
     # get layout
     try:
         _layout = kwargs['layout']
         my_assert(type(_layout) == dict, TypeError, "layout must be a dictionary")
         self.layout = _layout
     except KeyError:
         _layout_json = kwargs.get('layout_json', 'DocLayout.json')
         with resources.open_text(shilofue.json, _layout_json) as fin:
             _layout = json.load(fin)
         my_assert(type(_layout) == dict, TypeError, "layout must be a dictionary")
         self.layout = _layout
Ejemplo n.º 29
0
def print_method(method):
    '''Print configuration and recipe of method'''

    methods = get_methods()
    if method in methods:
        # Print out method configuration
        print()
        print(method + ' configuration:')
        print()
        f = pkg_resources.open_text(recipes, method + '.cfg')
        for line in f:
            print(line[0:-1])

        # Print out method recipe
        print()
        print(method + ' recipe:')
        print()
        config = configparser.ConfigParser()
        with pkg_resources.path(recipes, method + '.cfg') as config_path:
            config.read(config_path)
        recipe = config[method]['recipe']
        with pkg_resources.path(recipes, recipe) as recipe_path:
            f = open(recipe_path)
        for line in f:
            print(line[0:-1])
Ejemplo n.º 30
0
 def parse(self):
     self.level = 0
     mod_name = '.'.join(__name__.split('.')[:-1])
     # module.parser --> module
     with open_text(mod_name, self.rpath) as fp:
         for self.count, self.line in enumerate(fp, 1):
             self.line = self.line.rstrip()
             if not self.line:
                 continue
             m = self.LEVEL_RE.match(self.line)
             self.level = len(m.group(1))
             self.line = self.line[self.level:]
             # '  xxx' --> 'xxx'
             if self.line.startswith('#'):
                 # comment
                 continue
             if self.skip_level and self.level > self.skip_level:
                 # arch: ...
                 #   ...
                 #   ...
                 continue
             self.skip_level = 0
             self.parse2()
         # last condition (or, and)
         self.create_cond()
Ejemplo n.º 31
0
def load_font(font_path,
              font_is_resource,
              initial=0,
              last=256,
              ch1=" ",
              ch2="#"):

    if font_is_resource and resources:
        data = list(resources.open_text("terminedia.data", font_path))

    elif font_is_resource and not resources:
        path = Path(__file__).parent / "data" / font_path
        data = list(open(path).readlines())
    else:
        # TODO: enable more font types
        data = list(open(font_path).readlines())

    font = {}

    for i, line in enumerate(data[initial:last], initial):
        line = line.split(":")[1].strip()
        line = binascii.unhexlify(line)
        char = "\n".join(f"{bin(v).split('b')[1]}".zfill(8) for v in line)
        char = char.replace("0", ch1).replace("1", ch2)
        font[chr(i)] = char

    return font
Ejemplo n.º 32
0
 def test_open_text_given_encoding(self):
     with resources.open_text(
             self.data, 'utf-16.file', 'utf-16', 'strict') as fp:
         result = fp.read()
     self.assertEqual(result, 'Hello, UTF-16 world!\n')
Ejemplo n.º 33
0
 def execute(self, package, path):
     with resources.open_text(package, path):
         pass
Ejemplo n.º 34
0
 def test_open_text_default_encoding(self):
     with resources.open_text(self.data, 'utf-8.file') as fp:
         result = fp.read()
         self.assertEqual(result, 'Hello, UTF-8 world!\n')