def get_pkg_resource_path(package: str, resource: str) -> Path: """The new package resource API returns a context manager. Use this function to safely get the path. """ file_manager = ExitStack() atexit.register(file_manager.close) return file_manager.enter_context(resource_path(package, resource))
def _query_db(db: str, sql: str, *args, local_: bool = False) -> List: """ Apply a query to DBs that reside in the `hentai.data` namespace. """ with resource_path('src.hentai.data' if local_ else 'hentai.data', db) as resource_handler: with closing(sqlite3.connect(resource_handler)) as connection: with closing(connection.cursor()) as cursor: return cursor.execute(sql, *args).fetchall()
def setup_ui(self): # application data self.setApplicationName(application_name) self.setApplicationVersion(application_version) self.setOrganizationName(organization_name) self.setOrganizationDomain(organization_domain) # icon with resource_path(icons, "application.ico") as _path: icon = QIcon(_path.as_posix()) self.setWindowIcon(icon)
def get_package_data_name(name): """ This function returns the filepath to a given file which are included as part of the pipeline. The choices are: * report_template.ipynb * scaling_factors.txt """ p = resource_path(resources, name) with p as filename: name = filename return name
def get_random_id(make_request: bool = True, handler=RequestHandler()) -> int: """ Return a random ID. Set `make_request` to `False` to randomly select an already validated ID in an internal file. """ if make_request: response = handler.session.get(urljoin(Hentai.HOME, 'random')) return int(urlparse(response.url).path[3:-1]) else: with resource_path('hentai.data', 'ids.csv') as data_path: with open(data_path, mode='r', encoding='utf-8') as file_handler: reader = csv.reader(file_handler) return random.choice([int(row[0]) for row in reader])
def _extract_with_docxtpl(self, tname, scenario, filepath): from jinja2 import Environment from jinja2.ext import i18n, do, loopcontrols, with_ from docxtpl import DocxTemplate info = self.templates[tname] if not hasattr(self, f'_extract_env_{tname}'): e = Environment(extensions=[i18n, do, loopcontrols, with_], **info['env_param']) for n, f in filters.items(): e.filters[n] = f setattr(self, f'_extract_env_{tname}', e) with resource_path(templates, info['filename']) as template_path: doctemplate = DocxTemplate(str(template_path)) doctemplate.render({'scenario': scenario}, getattr(self, f'_extract_env_{tname}')) doctemplate.save(Path(filepath).with_suffix(f'.{info["suffix"]}'))
def exists(id: int, make_request: bool = True) -> bool: """ Check whether or not the ID exists on `nhentai.net`. Set `make_request` to `False` to search for validated IDs in an internal file. """ if make_request: try: return RequestHandler().get(urljoin(Hentai._URL, str(id))).ok except HTTPError: return False else: with resource_path('hentai.data', 'ids.csv') as data_path: with open(data_path, mode='r', encoding='utf-8') as file_handler: reader = csv.reader(file_handler) for row in reader: if id == int(row[0]): return True return False
def _extract_with_jinja2(self, tname, scenario, filepath): from jinja2 import Environment, FileSystemLoader from jinja2.ext import i18n, do, loopcontrols, with_ info = self.templates[tname] print(info) if not hasattr(self, f'_extract_env_{tname}'): with resource_path(templates, '.') as path: e = Environment(loader=FileSystemLoader(str(path)), extensions=[i18n, do, loopcontrols, with_], **info['env_param']) for n, f in filters.items(): e.filters[n] = f setattr(self, f'_extract_env_{tname}', e) template = getattr(self, f'_extract_env_{tname}').get_template( info['filename']) with open(Path(filepath).with_suffix(f'.{info["suffix"]}'), 'w', encoding='utf-8') as file: file.write( template.render({ 'scenario': scenario, 'MacroProcessor': Processor }))
def prepare_export( evaluations: List[Evaluation], summary_evaluations: Dict[str, dict], operations: List[Operation], ) -> dict: """ Structures data results of evaluations for use in an export. The intention of this method is to structure information in a way that makes it easy to use in reporting tools (i.e. exports), as a result there is lots f duplication and simplification of data types for example. Note: The structure and contents of this data have not yet been discussed or agreed. :type evaluations: List[Evaluation] :param evaluations: list of evaluations :type summary_evaluations: Dict :param summary_evaluations: summarised evaluations :type operations: List[Operation] :param operations: list of operations :rtype dict :return: processes data ready for use in exports """ export_format_version: int = 1 export_format_schema_name: str = "export_format_v1_schema.json" _operations: List[dict] = list() _operations_by_id: Dict[str, dict] = dict() _countries: Dict[str, str] = dict() for operation in operations: _operations.append(operation.export()) _operations_by_id[operation.operation_id] = operation.export() _countries[operation.affected_country. alpha_3] = operation.affected_country.name _results_by_operation: Dict[str, dict] = dict() _results_by_layer: Dict[str, dict] = dict() _results_by_result: Dict[str, List[Dict[str, str]]] = { EvaluationResult.NOT_EVALUATED.name: [], EvaluationResult.PASS.name: [], EvaluationResult.PASS_WITH_WARNINGS.name: [], EvaluationResult.FAIL.name: [], EvaluationResult.ERROR.name: [], } _ungrouped_results: List[Dict[str, str]] = list() for evaluation in evaluations: if evaluation.operation_id not in _results_by_operation.keys(): _results_by_operation[evaluation.operation_id] = dict() _results_by_operation[evaluation.operation_id][ evaluation.layer.layer_id] = evaluation.result.name if evaluation.layer.layer_id not in _results_by_layer.keys(): _results_by_layer[evaluation.layer.layer_id] = dict() _results_by_layer[evaluation.layer.layer_id][ evaluation.operation_id] = evaluation.result.name _results_by_result[evaluation.result.name].append({ "operation_id": evaluation.operation_id, "layer_id": evaluation.layer.layer_id, }) _ungrouped_results.append({ "operation_id": evaluation.operation_id, "layer_id": evaluation.layer.layer_id, "result": evaluation.result.name, }) export_data: Dict = { "meta": { "app_version": __version__, "export_version": export_format_version, "export_datetime": datetime.utcnow().isoformat(timespec="milliseconds"), "display_labels": { "result_types": { EvaluationResult.NOT_EVALUATED.name: "Not Evaluated", EvaluationResult.PASS.name: "Pass", EvaluationResult.PASS_WITH_WARNINGS.name: "Warning", EvaluationResult.FAIL.name: "Fail", EvaluationResult.ERROR.name: "Error", }, "layer_aggregation_categories": { "admn": "Admin", "carto": "Cartographic", "elev": "Elevation", "phys": "Physical features", "stle": "Settlements", "tran": "Transport", }, }, }, "data": { "operations": _operations, "operations_by_id": _operations_by_id, "countries": _countries, "results_by_operation": _results_by_operation, "results_by_layer": _results_by_layer, "results_by_result": _results_by_result, "ungrouped_results": _ungrouped_results, "summary_statistics": summary_evaluations, }, } with resource_path( package="mapy_rds_dashboard", resource=export_format_schema_name) as export_format_schema_path: with open(str(export_format_schema_path), mode="r") as export_format_schema_file: export_format_schema: Dict = json.load( fp=export_format_schema_file) jsonschema_validate(instance=export_data, schema=export_format_schema) return export_data
from contextlib import asynccontextmanager app = web.Application() aiohttp_jinja2.setup(app, loader=jinja2.PackageLoader('nbconvert_http', 'jinja_templates')) routes = web.RouteTableDef() pool = None TO_VERSION = 4 DISPOSITION_FIELDS = {'inline', 'attachment'} DEFAULT_HOST = "0.0.0.0" DEFAULT_PORT = 8000 TEMPLATE_PATH_FACTORY = lambda: resource_path( 'nbconvert_http.nbconvert_templates', 'latex_bib_template.tplx') @asynccontextmanager async def render_execution_context(exporter_type: str, config: dict): # For LaTeX exporters, we want to use the bibliography template, which needs to be written to a file if issubclass(nbconvert.get_exporter(exporter_type), nbconvert.LatexExporter): with TEMPLATE_PATH_FACTORY() as path: config['Exporter']['template_file'] = str(path) yield else: yield def get_exporter_names() -> Set[str]:
def get_resource_path(package: str, resource: str) -> Path: """ Get the path to a `resource` located in `package`. """ with resource_path(package, resource) as resource_handler: return Path(resource_handler)
def get_pkg_resource_path(package: str, resource: str) -> Path: """Old API returns a string, so normalize to a path. """ # noinspection PyTypeChecker return Path(resource_path(package, resource))
def query_db(db: str, sql: str, *args, local_: bool = False) -> List: with resource_path('src.lolicon.data' if local_ else 'lolicon.data', db) as resource_handler: with closing(sqlite3.connect(resource_handler)) as connection: with closing(connection.cursor()) as cursor: return cursor.execute(sql, *args).fetchall()
def load_resource(resource: str, package: str) -> List[dict]: with resource_path(resource, package) as resource_handler: with open(resource_handler, mode='r', encoding='utf-8') as file_handler: return json.load(file_handler)
# CONSIDER: combine CommButton and CommModeDropDown in one button # (use .setMouseTracking() to control what subwidget to activate) # ———————————————————————————————————————————————————————————————————————————————————————————————————————————————————— # # Default layout spacing = 5 # Default ContentsMargins = 12 log = Logger("CommPanel") log.setLevel('DEBUG') # True ––► test button and test methods are added DEBUG_MODE = False REFRESH_ICON_RES = resource_path('PyQt5Utils.res', 'refresh.gif') class QDataAction(QAction): def __init__(self, *args, widget: QWidget): super().__init__(*args) self.widget = widget def triggerString(self, data: str): self.setData(data) self.trigger() self.widget.setText(data) class WidgetActions(dict): def __init__(self, owner: QWidget):