def run(self, options): """ Define the code to be run by this plugin app. """ print(Gstr_title) print('Version: %s' % self.get_version()) # fetch input data with open('{}/prediction-default.json'.format(options.inputdir)) as f: classification_data = json.load(f) try: with open('{}/severity.json'.format(options.inputdir)) as f: severityScores = json.load(f) except: severityScores = None template_file = "pdf-covid-positive-template.html" if classification_data[ 'prediction'] != "COVID-19" or severityScores is None: template_file = "pdf-covid-negative-template.html" txt = files('pdfgeneration').joinpath('template').joinpath( template_file).read_text() # replace the values txt = txt.replace("${PATIENT_TOKEN}", options.patientId) txt = txt.replace("${PREDICTION_CLASSIFICATION}", classification_data['prediction']) txt = txt.replace( "${COVID-19}", f"{float(classification_data['COVID-19'])*100:.2f}%") txt = txt.replace("${NORMAL}", f"{float(classification_data['Normal'])*100:.2f}%") txt = txt.replace( "${PNEUMONIA}", f"{float(classification_data['Pneumonia'])*100:.2f}%") txt = txt.replace("${X-RAY-IMAGE}", options.imagefile) time = datetime.datetime.now() txt = txt.replace("${month-date}", time.strftime("%c")) txt = txt.replace("${year}", time.strftime("%Y")) # add the severity value if prediction is covid if template_file == "pdf-covid-positive-template.html": txt = txt.replace("${GEO_SEVERITY}", severityScores["Geographic severity"]) txt = txt.replace("${GEO_EXTENT_SCORE}", severityScores["Geographic extent score"]) txt = txt.replace("${OPC_SEVERITY}", severityScores["Opacity severity"]) txt = txt.replace("${OPC_EXTENT_SCORE}", severityScores['Opacity extent score']) # pdfkit wkhtmltopdf is hard-coded to look in /tmp for assets # when input is a string for asset_file in files('pdfgeneration').joinpath( 'template/assets').iterdir(): os.symlink(asset_file, path.join('/tmp', asset_file.name)) os.symlink(path.join(options.inputdir, options.imagefile), path.join('/tmp', options.imagefile)) pdfkit.from_string( txt, path.join(options.outputdir, 'patient_analysis.pdf'))
def _set_environ_for_briefcase(): package = sys.modules["__main__"].__package__ if package and "Briefcase-Version" in metadata.metadata(package): if sys.platform == "linux": # Use bundled tesseract binary with resources.as_file(resources.files("normcap")) as normcap_path: tesseract_path = normcap_path.parent.parent / "bin" / "tesseract" os.environ["TESSERACT_CMD"] = str(tesseract_path.resolve()) if sys.platform == "darwin": # Use bundled tesseract binary with resources.as_file(resources.files("normcap")) as normcap_path: tesseract_path = ( normcap_path.parent.parent / "app_packages" / "tesseract" ) os.environ["TESSERACT_CMD"] = str(tesseract_path.resolve()) elif sys.platform == "win32": with resources.as_file( resources.files("normcap.resources") ) as resource_path: # Add openssl shipped with briefcase package to path openssl_path = resource_path / "openssl" os.environ["PATH"] += os.pathsep + str(openssl_path.resolve()) # Use bundled tesseract binary tesseract_path = resource_path / "tesseract" / "tesseract.exe" os.environ["TESSERACT_CMD"] = str(tesseract_path.resolve()) os.environ["TESSERACT_VERSION"] = "5.0.0"
def path(package, module, *args, **kw): """Wrap around importlib.resources.path. importlib_resources.path (PyPI package we use for compatibility in Python < 3.7) has now diverged in behavior from importlib.resources.path (in Python >= 3.7), especially in terms of supporting directories. Even though we can just jump to the new version of the library, many distributions packaging Mailman do not package importlib_resources at all and instead patch the source code to simply replace importlib_resources with importlib.resources. This utility method is meant to keep that patching ability without any complicated patches to make Mailman work with standard library importlib.resources. This is only supposed to be used where the divergent behavior causes problems for us. """ # Note to packaging teams: This function will handle both standard library # and 3rd party importlib_resources package. Please do not patch it. if module: module_package = '{}.{}'.format(package, module) else: module_package = package try: if sys.version_info < (3, 9): from importlib.resources import path return path(package, module, *args, **kw) else: from importlib.resources import files # pragma: nocover return files(module_package, *args, **kw) # pragma: nocover except ImportError: # pragma: nocover from importlib_resources import files # pragma: nocover return files(module_package, *args, **kw) # pragma: nocover
def create_refgene( ref: Path = typer.Option( ..., help="Isoform reference file, either in GTF or UCSC format"), formatFile: reference_format = typer.Option( ..., "--format", help="Format of the reference file."), out: Path = typer.Option( ..., help="Where to write the processed reference file"), ) -> None: ucsc_preprocess = resources.files("liqa").joinpath("scripts", "PreProcess.pl") gtf_preprocess = resources.files("liqa").joinpath("scripts", "PreProcess_gtf.pl") if formatFile == "ucsc": try: check_call([PERL, str(ucsc_preprocess), "-r", ref, "-o", out]) except CalledProcessError as cp: print(cp) elif formatFile == "gtf": try: check_call([PERL, str(gtf_preprocess), "-r", ref, "-o", out]) except CalledProcessError as cp: print(cp) else: print("Please specify reference file format: gtf/ucsc")
def test_unrelated_contents(self): """ Test thata zip with two unrelated subpackages return distinct resources. Ref python/importlib_resources#44. """ self.assertEqual( names(resources.files('ziptestdata.one')), {'__init__.py', 'resource1.txt'}, ) self.assertEqual( names(resources.files('ziptestdata.two')), {'__init__.py', 'resource2.txt'}, )
def test_natural_path(self): # Guarantee the internal implementation detail that # file-system-backed resources do not get the tempdir # treatment. target = resources.files(self.data) / 'utf-8.file' with resources.as_file(target) as path: assert 'data' in str(path)
def sso_to_source_spec(self, tax_type): """Returns a SourceSpectrum from the passed <tax_type> if it is found in the Bus-DeMeo taxonomy, otherwise None is returned. The spectrum is produced by multiplying the reflectance spectra by a Kurucz model for the Sun so will need to be normalized""" source_spec = None if tax_type.lower().startswith('sso::') is False: tax_type = 'sso::' + tax_type.strip() config_item = conf.source_mapping.get(tax_type, None) if config_item is not None: filename = config_item() file_path = os.path.expandvars(filename) if not os.path.exists(file_path): file_path = str( pkg_resources.files('etc.data').joinpath(filename)) # Flux unit for LSST throughputs (*almost* FLAM but nm not Angstroms) lsst_funit = u.erg / u.cm**2 / u.s / u.nm source_spec = SourceSpectrum.from_file(file_path, wave_unit=u.nm, flux_unit=lsst_funit, header_start=1) source_spec.meta['header']['source'] = config_item.description source_spec.meta['header']['filename'] = filename return source_spec
def setUp(self): """ Prepares the test fixture before each test method is called. """ self.preferences = set_default_preferences(Preferences()) # The filename of the example preferences file. self.example = os.fspath(files(PKG) / "example.ini")
def test_training_from_simtel(tmp_path): """ check we can write both dl1 and dl2 info (e.g. for training input) """ config = files("ctapipe.tools.tests.resources").joinpath("training_config.json") output = tmp_path / "test_training.DL1DL2.h5" assert ( run_tool( ProcessorTool(), argv=[ f"--config={config}", f"--input={GAMMA_TEST_LARGE}", f"--output={output}", "--max-events=5", "--overwrite", ], cwd=tmp_path, ) == 0 ) # check tables were written with tables.open_file(output, mode="r") as testfile: assert testfile.root.dl1.event.telescope.parameters.tel_002 assert testfile.root.dl2.event.subarray.geometry.HillasReconstructor
def _get_package_data(module, rel_path): """ Return package data in bytes for the given module and resource path. Parameters ---------- module : ModuleType A module from which package data will be discovered. If the module name does not conform to the package requirement, then its "__file__" attribute is used for locating the directory to search for resource files. rel_path : str "/"-separated path for loading data file. Returns ------- data : bytes Loaded data in bytes. Raises ------ OSError If the path referenced does not resolve to an existing file or the file cannot be read. """ if (module.__spec__ is None or module.__spec__.submodule_search_locations is None): module_dir_path = os.path.dirname(module.__file__) path = os.path.join(module_dir_path, *rel_path.split("/")) with open(path, "rb") as fp: return fp.read() return (files(module).joinpath(rel_path).read_bytes())
def main(confdir="/etc/cslbot") -> None: config = configparser.ConfigParser( interpolation=configparser.ExtendedInterpolation()) with open(path.join(confdir, 'config.cfg')) as f: config.read_file(f) parser = argparse.ArgumentParser() parser.add_argument('outdir', help='The output dir.') cmdargs = parser.parse_args() session = get_session(config)() time = strftime('Last Updated at %I:%M %p on %a, %b %d, %Y') if not path.exists(cmdargs.outdir): makedirs(cmdargs.outdir) lockfile = open(path.join(cmdargs.outdir, '.lock'), 'w') fcntl.lockf(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) template_path = resources.files('cslbot') / 'templates' # Copy the js shutil.copy(path.join(template_path, 'sorttable.js'), cmdargs.outdir) env = Environment(loader=FileSystemLoader(template_path)) output_quotes(env, session, cmdargs.outdir, time) output_scores(env, session, cmdargs.outdir, time) output_polls(env, session, cmdargs.outdir, time) output_urls(env, session, cmdargs.outdir, time) fcntl.lockf(lockfile, fcntl.LOCK_UN) lockfile.close()
def available(self) -> bool: try: files = resources.files(self.path) # type: ignore except (ValueError, ModuleNotFoundError, TypeError): return False return any(f.name == "__init__.py" and f.is_file() for f in files.iterdir())
def get_provider_dict(config): handler_scripts = [ *pkg_resources.files( download_providers).iterdir(), # built-in handlers, *(Path(p).resolve() for p in config['download_handlers']), # custom handlers ] # generate provider information provider_dict = {} for entry in handler_scripts: if entry.name.startswith('__'): continue host_list = get_hosts_from_provider_script(entry) if len(host_list) == 0: logger.warning(f'[{entry}] No hosts specified, skipping') for host in host_list: if host in provider_dict: logger.warning(f'[{entry}] Overriding downloader for {host}') provider_dict[host] = {'name': host, 'function': sh.Command(entry)} return provider_dict
def test_entered_path_does_not_keep_open(self): # This is what certifi does on import to make its bundle # available for the process duration. c = resources.as_file(resources.files('ziptestdata') / 'binary.file').__enter__() self.zip_path.unlink() del c
def test_init_tessdata_copies_files(tmp_path, monkeypatch): # Create placeholder for traineddata files, if they don't exist resource_path = Path(resources.files("normcap.resources")) traineddata_files = list( (resource_path / "tessdata").glob("*.traineddata")) if not traineddata_files: (resource_path / "tessdata" / "placeholder_1.traineddata").touch() (resource_path / "tessdata" / "placeholder_2.traineddata").touch() try: monkeypatch.setattr(utils.system_info, "config_directory", lambda: tmp_path) tessdata_path = tmp_path / "tessdata" traineddatas = list(tessdata_path.glob("*.traineddata")) txts = list(tessdata_path.glob("*.txt")) assert not traineddatas assert not txts for _ in range(3): utils.copy_tessdata_files_to_config_dir() traineddatas = list(tessdata_path.glob("*.traineddata")) txts = list(tessdata_path.glob("*.txt")) assert traineddatas assert len(txts) == 1 finally: # Make sure to delete possible placeholder files for f in (resource_path / "tessdata").glob("placeholder_?.traineddata"): f.unlink()
def read_file(filename: str, binary: bool = False) -> Any: """Get the contents of a file contained with qutebrowser. Args: filename: The filename to open as string. binary: Whether to return a binary string. If False, the data is UTF-8-decoded. Return: The file contents as string. """ assert not posixpath.isabs(filename), filename assert os.path.pardir not in filename.split(posixpath.sep), filename if not binary and filename in _resource_cache: return _resource_cache[filename] if hasattr(sys, 'frozen'): # PyInstaller doesn't support pkg_resources :( # https://github.com/pyinstaller/pyinstaller/wiki/FAQ#misc fn = os.path.join(os.path.dirname(sys.executable), filename) if binary: f: IO with open(fn, 'rb') as f: return f.read() else: with open(fn, 'r', encoding='utf-8') as f: return f.read() else: p = importlib_resources.files(qutebrowser) / filename if binary: return p.read_bytes() return p.read_text()
def test_read_text_given_encoding(self): result = ( resources.files(self.data) .joinpath('utf-16.file') .read_text(encoding='utf-16') ) self.assertEqual(result, 'Hello, UTF-16 world!\n')
def test_read_submodule_resource_by_name(self): result = ( resources.files('ziptestdata.subdirectory') .joinpath('binary.file') .read_bytes() ) self.assertEqual(result, b'\0\1\2\3')
def load_config( self, config_path: str, is_primary_config: bool, package_override: Optional[str] = None, ) -> ConfigResult: normalized_config_path = self._normalize_file_name(config_path) res = resources.files(self.path).joinpath( normalized_config_path) # type:ignore if not res.exists(): raise ConfigLoadError( f"Config not found : {normalized_config_path}") with res.open(encoding="utf-8") as f: header_text = f.read(512) header = ConfigSource._get_header_dict(header_text) self._update_package_in_header( header=header, normalized_config_path=normalized_config_path, is_primary_config=is_primary_config, package_override=package_override, ) f.seek(0) cfg = OmegaConf.load(f) defaults_list = self._extract_defaults_list( config_path=config_path, cfg=cfg) return ConfigResult( config=self._embed_config(cfg, header["package"]), path=f"{self.scheme()}://{self.path}", provider=self.provider, header=header, defaults_list=defaults_list, )
def test_submodule_contents_by_name(self): contents = names(resources.files('namespacedata01')) try: contents.remove('__pycache__') except KeyError: pass self.assertEqual(contents, {'binary.file', 'utf-8.file', 'utf-16.file'})
def load_schema(name): """ Load a schema from ./schemas/``name``.json and return it. """ path = resources.files(__package__).joinpath(f"schemas/{name}.json") data = path.read_text(encoding="utf-8") return json.loads(data)
def test_quickstart_templates(filename): """ ensure template configs have an appropriate placeholder for the contact info """ config = files("ctapipe.tools.tests.resources").joinpath(filename) text = config.read_text() assert "YOUR-NAME-HERE" in text, "Missing expected name placeholder" assert "*****@*****.**" in text, "Missing expected email placeholder" assert "YOUR-ORGANIZATION" in text, "Missing expected org placeholder"
def up_all_kolombo_services() -> None: project_name = "kolombo_services" docker_folder = files("kolombo") / "docker" file_path = str(docker_folder / "services" / "docker-compose.yml") compose_command = [ "docker-compose", "-p", project_name, "-f", file_path, "up" ] run([*compose_command, "--force-recreate", "-d"])
def read_text_from_resource(package: str, resource: str) -> str: try: content = pkg_resources.files(package).joinpath( resource).read_text() # type: ignore return content except AttributeError: # Python < 3.9 return pkg_resources.read_text(package, resource)
def stop_kolombo_service(service: str) -> None: project_name = "kolombo_services" docker_folder = files("kolombo") / "docker" file_path = str(docker_folder / "services" / "docker-compose.yml") compose_command = [ "docker-compose", "-p", project_name, "-f", file_path, "rm" ] run([*compose_command, "--stop", "--force", f"kolombo-{service}"])
def __init__(self, name=None, altitude=None, latitude=None, longitude=None, **kwargs): self.name = name if name is not None else "Undefined" self.altitude = altitude * u.m if altitude is not None else altitude self.latitude = latitude * u.deg if latitude is not None else latitude self.longitude = longitude * u.deg if longitude is not None else longitude if 'transmission' in kwargs: modelclass = Empirical1D try: transmission = float(kwargs['transmission']) wavelengths = np.arange(300, 1501, 1) * u.nm throughput = len(wavelengths) * [ transmission, ] header = {} self.transmission = BaseUnitlessSpectrum( modelclass, points=wavelengths, lookup_table=throughput, keep_neg=False, meta={'header': header}) except ValueError: self.transmission = read_element(kwargs['transmission']) # sky_file = str(pkg_resources.files('etc.data').joinpath(os.path.expandvars(kwargs['transmission']))) if 'sky_mag' in kwargs: self.sky_mags = kwargs['sky_mag'] else: file_path = pkg_resources.files('etc.data').joinpath( os.path.expandvars(conf.sky_brightness_file)) self.sky_mags_table = self._read_skybrightness_file(file_path) self.sky_mags = [] if self.sky_mags_table: self.sky_mags = dict( zip(self.sky_mags_table.colnames, self.sky_mags_table[0])) self.radiance = None if 'radiance' in kwargs: # Assume this is from ESO skycalc so flux_units are photons/s/m**2/micron # unless given by the user radiance_unit_str = kwargs.get("radiance_units", "photon/s/m**2/um") try: warnings.simplefilter('ignore', category=u.UnitsWarning) radiance_unit = u.Unit(radiance_unit_str) except ValueError: radiance_unit = u.photon / u.s / u.m**2 / u.um print("Warning: invalid units: {}. Assuming {}".format( radiance_unit_str, radiance_unit)) self.radiance = read_element(kwargs['radiance'], element_type='spectrum', flux_units=radiance_unit)
def load_config(self, config_path: str) -> ConfigResult: normalized_config_path = self._normalize_file_name(config_path) res = resources.files(self.path).joinpath( normalized_config_path) # type:ignore if not res.exists(): raise ConfigLoadError( f"Config not found : {normalized_config_path}") return self._read_config(res)
def resource_path(package: Union[str, types.ModuleType]) -> ReadOnlyPath: """Returns `importlib.resources.files`.""" path = importlib_resources.files(package) # pytype: disable=module-attr if isinstance(path, pathlib.Path): return _Path(path) elif isinstance(path, zipfile.Path): return ResourcePath(path.root, path.at) else: raise TypeError(f'Unknown resource path: {type(path)}: {path}')
def test_stage1_datalevels(tmp_path): """test the dl1 tool on a file not providing r1, dl0 or dl1a""" class DummyEventSource(EventSource): """ for testing """ @staticmethod def is_compatible(file_path): with open(file_path, "rb") as infile: dummy = infile.read(5) return dummy == b"dummy" @property def datalevels(self): return (DataLevel.R0,) @property def is_simulation(self): return True @property def obs_ids(self): return [1] @property def subarray(self): return None def _generator(self): return None dummy_file = tmp_path / "datalevels_dummy.h5" out_file = tmp_path / "datalevels_dummy_stage1_output.h5" with open(dummy_file, "wb") as infile: infile.write(b"dummy") infile.flush() config = files("ctapipe.tools.tests.resources").joinpath("stage1_config.json") tool = ProcessorTool() assert ( run_tool( tool, argv=[ f"--config={config}", f"--input={dummy_file}", f"--output={out_file}", "--write-images", "--overwrite", ], cwd=tmp_path, ) == 1 ) # make sure the dummy event source was really used assert isinstance(tool.event_source, DummyEventSource)
def create_tar(filelst, tarname=DEFAULT_TAR_NAME, workdir="."): print(f"Creating tar '{tarname}'") with tarfile.open(tarname, "w:gz", format=tarfile.GNU_FORMAT) as tar: for f in filelst: tar.add(f) if "setup" not in filelst: print("Adding default 'setup' program") setup = files(MODULE) / DEFAULT_SETUP_NAME tar.add(str(setup), "setup")