def check_versions_short_circuit( pkgs_with_versions: Dict[Package, Tuple[Tuple[Remote, str], ...]]) -> Exit: """Checks if there are any non-empty remotes, and returns the exit code status""" if any(map(lambda _: len(_) > 0, pkgs_with_versions.values())): return Exit(100) else: return Exit(0)
def doi_process(ids: List[str] = Argument( ..., help="Valid DOI/arXivID(s) or file (*.bib, *.txt)"), dir: Path = Option(getcwd, '--dir', '-d', help="Directory to download"), scihub: str = Option(config.__scihub__, '--scihub', '-s', help="Valid Sci-Hub URL")): global operator try: assert not scihub.startswith( "http"), 'Argument Error: Invalid URL, example: sci-hub.tf' scihub = "https://" + scihub assert path.exists(dir), 'Argument Error: Invalid path.' if ids[0].lower().endswith('.bib'): assert path.exists(ids[0]), 'Argument Error: Invalid file path.' ids_list = operator.parseBibTex(ids[0]) elif ids[0].lower().endswith('.txt'): assert path.exists(ids[0]), 'Argument Error: Invalid file path.' ids_list = operator.parseTxt(ids[0]) else: ids_list = ids except AssertionError as e: echo(e.args[0], err=True) raise Exit() if not ids_list: echo("There is no valid DOI.", err=True) raise Exit() articles = list() for d in ids_list: if d.startswith('arXiv'): articles.append({ "article_url": urljoin('https://arxiv.org/abs', d.split(':')[1]), "file_name": f"{d.replace(':', '-')}.pdf", "warning_str": d }) else: articles.append({ "article_url": urljoin(scihub, d), "file_name": f"{d.replace('/', '-')}.pdf", "warning_str": d }) task = "ID" missing, log_file = operator.download(task, articles, dir) echo(f" {' '*len(task)} | {missing} missing: {log_file}")
def cli_upload( path: Path = Argument(..., exists=True, dir_okay=True, file_okay=True, readable=True, resolve_path=True), auth_key: Optional[str] = Option(None, envvar='SMOKESHOW_AUTH_KEY'), root_url: str = Option(ROOT_URL, envvar='SMOKESHOW_ROOT_URL'), github_status_description: Optional[str] = Option( None, envvar='SMOKESHOW_GITHUB_STATUS_DESCRIPTION'), github_coverage_threshold: Optional[float] = Option( None, envvar='SMOKESHOW_GITHUB_COVERAGE_THRESHOLD'), ) -> None: try: asyncio.run( upload( path, auth_key=auth_key, github_status_description=github_status_description, github_coverage_threshold=github_coverage_threshold, root_url=root_url, )) except ValueError as e: print(e, file=sys.stderr) raise Exit(1)
def version_callback(value: bool): # pragma: no cover if not value: return package = "PyPMS" echo(f"{package} version {metadata.version(package)}") raise Exit()
def evaluate( adapter: str = AudioAdapterOption, output_path: Path = AudioOutputOption, stft_backend: STFTBackend = AudioSTFTBackendOption, params_filename: str = ModelParametersOption, mus_dir: Path = MUSDBDirectoryOption, mwf: bool = MWFOption, verbose: bool = VerboseOption, ) -> Dict: """ Evaluate a model on the musDB test dataset """ import numpy as np configure_logger(verbose) try: import musdb import museval except ImportError: logger.error("Extra dependencies musdb and museval not found") logger.error("Please install musdb and museval first, abort") raise Exit(10) # Separate musdb sources. songs = glob(join(mus_dir, EVALUATION_SPLIT, "*/")) mixtures = [join(song, EVALUATION_MIXTURE) for song in songs] audio_output_directory = join(output_path, EVALUATION_AUDIO_DIRECTORY) separate( deprecated_files=None, files=mixtures, adapter=adapter, bitrate="128k", codec=Codec.WAV, duration=600.0, offset=0, output_path=join(audio_output_directory, EVALUATION_SPLIT), stft_backend=stft_backend, filename_format="{foldername}/{instrument}.{codec}", params_filename=params_filename, mwf=mwf, verbose=verbose, ) # Compute metrics with musdb. metrics_output_directory = join(output_path, EVALUATION_METRICS_DIRECTORY) logger.info("Starting musdb evaluation (this could be long) ...") dataset = musdb.DB(root=mus_dir, is_wav=True, subsets=[EVALUATION_SPLIT]) museval.eval_mus_dir( dataset=dataset, estimates_dir=audio_output_directory, output_dir=metrics_output_directory, ) logger.info("musdb evaluation done") # Compute and pretty print median metrics. metrics = _compile_metrics(metrics_output_directory) for instrument, metric in metrics.items(): logger.info(f"{instrument}:") for metric, value in metric.items(): logger.info(f"{metric}: {np.median(value):.3f}") return metrics
def version_callback(value: bool): if value: try: from importlib.metadata import version except ImportError: from importlib_metadata import version echo(f"Spleeter Version: {version('spleeter')}") raise Exit()
def version_callback(version: Optional[bool] = Option(None, '--version', '-V', is_eager=True, help="Show version")): global console if version: console.print( Panel(config.__banner__, title=f'[i b #fcec0c on #58482c]{" "*2} ===== W E L C O M E ! ===== {" "*2}[/]', subtitle=f'[#fcec0c on #58482c]{" "*4}[i]Ver. {config.__version__} [/i]| [link={config.__url__}]Github: f10w3r/sci-clone[/link]{" "*4}', width=70) ) raise Exit()
def issn_process(issn: str = Argument(..., help="Journal ISSN (e.g.: 0002-9602)"), year: List[datetime] = Argument( ..., formats=['%Y'], help="From year to year (e.g.: 2011 2012)"), dir: Path = Option(getcwd, '--dir', '-d', help="Directory to download"), scihub: str = Option(config.__scihub__, '--scihub', '-s', help="Valid Sci-Hub URL")): try: assert len(year) in (1, 2), "Argument Error: 'year' takes 1 or 2 values." if len(year) == 1: year = [year[0], year[0]] assert datetime.strptime( "1665", "%Y") < year[0] <= year[1] <= datetime.now( ), "Argument Error: Invalid 'year', not a time machine." assert not scihub.startswith( "http"), 'Argument Error: Invalid URL, example: sci-hub.tf' scihub = "https://" + scihub assert path.exists(dir), 'Argument Error: Invalid path.' except AssertionError as e: echo(e.args[0], err=True) raise Exit() global operator, console for idx, y in enumerate(range(year[0].year, year[1].year + 1)): doi_list = operator.get_doi_list(y, issn) if not idx: console.print(f" {doi_list[0]['container-title'][0]} ".upper(), style="bold white italic on blue") articles = [{ "article_url": urljoin(scihub, article['DOI']), "file_name": f"VOL{article['volume']}_{article['DOI'].replace('/', '-')}.pdf", "warning_str": f"{article['DOI']} | {issn} | {y}_VOL{article['volume']}" } for article in doi_list] folder = path.join(dir, issn + '_' + str(y)) if not path.exists(folder): mkdir(folder) missing, log_file = operator.download(y, articles, folder) echo(f" {' '*4} | {missing} missing: {log_file}")
def separate( deprecated_files: Optional[str] = AudioInputOption, files: List[Path] = AudioInputArgument, adapter: str = AudioAdapterOption, bitrate: str = AudioBitrateOption, codec: Codec = AudioCodecOption, duration: float = AudioDurationOption, offset: float = AudioOffsetOption, output_path: Path = AudioOutputOption, stft_backend: STFTBackend = AudioSTFTBackendOption, filename_format: str = FilenameFormatOption, params_filename: str = ModelParametersOption, mwf: bool = MWFOption, verbose: bool = VerboseOption, ) -> None: """ Separate audio file(s) """ from .audio.adapter import AudioAdapter from .separator import Separator configure_logger(verbose) if deprecated_files is not None: logger.error( "⚠️ -i option is not supported anymore, audio files must be supplied " "using input argument instead (see spleeter separate --help)" ) raise Exit(20) audio_adapter: AudioAdapter = AudioAdapter.get(adapter) separator: Separator = Separator( params_filename, MWF=mwf, stft_backend=stft_backend ) for filename in files: separator.separate_to_file( str(filename), str(output_path), audio_adapter=audio_adapter, offset=offset, duration=duration, codec=codec, bitrate=bitrate, filename_format=filename_format, synchronous=False, ) separator.join()
def login( email: str = Option( ..., prompt=True, ), password: str = Option(..., prompt=True, hide_input=True), ): """ Login to Papero and get an access_token to use with API/CLI. """ echo(f"Attempting login for {email}.") token_req = httpx.post(api_url("login/access-token"), data={ "username": email, "password": password, }) if token_req.status_code != 200: echo("Invalid password") raise Exit(code=1) echo("Login succesful, use the following environment variable:") echo(f"export PAPERO_API_TOKEN={token_req.json().get('access_token')}")
def main(voting_phase_name: str, voting_module_name: str, update_existing: Optional[bool] = False, out: Optional[str] = None, config: Optional[str] = None): from ekklesia_portal.app import make_wsgi_app app = make_wsgi_app(config) session = Session() voting_phase = session.query(VotingPhase).filter_by( name=voting_phase_name).one() department = voting_phase.department module_config = prepare_module_config(app, department, voting_module_name) election_config = voting_phase_to_vvvote_election_config( module_config, voting_phase).to_json() if out: with open(out, "w") as wf: wf.write(election_config) elif voting_module_name in voting_phase.voting_module_data and not update_existing: config_url = voting_phase.voting_module_data[voting_module_name][ "config_url"] echo( f"VVVote voting already exists for this voting phase at {config_url}, doing nothing. Add --update-existing to create a new voting." ) elif voting_phase.status != VotingStatus.SCHEDULED: echo( f"Voting can only be created for scheduled voting phases, this voting phase is in state '{voting_phase.status}'!" ) raise Exit(1) else: config_url = create_election_in_vvvote(module_config, election_config) voting_phase.voting_module_data[voting_module_name] = { "config_url": config_url } transaction.commit() echo(f"Created election config with id {config_url}.")
def version_callback(value: bool): # pragma: no cover if value: name = __name__.split(".")[0] echo(f"{name} version {__version__}") raise Exit()