To view/run the 'nightly' commands, you need the testing dependencies. Run `pip install [--editable] .[tests]`. """ # Override the class used by the ixmp CLI to load Scenario objects ixmp.cli.ScenarioClass = message_ix.Scenario @main.command("copy-model") @click.option( "--set-default", is_flag=True, help="Set the copy to be the default used when running MESSAGE.", ) @click.option("--overwrite", is_flag=True, help="Overwrite existing files.") @click.argument("path", type=click.Path(file_okay=False)) def copy_model(path, overwrite, set_default): """Copy the MESSAGE GAMS files to a new PATH. To use an existing set of GAMS files, you can also call: $ message-ix config set "message model dir" PATH """ path = Path(path).resolve() src_dir = Path(__file__).parent / "model" for src in src_dir.rglob("*"): # Skip certain files if src.suffix in (".gdx", ".log", ".lst"): continue
import os, sys import click import dentate from dentate import plot, utils from dentate.env import Env script_name = os.path.basename(__file__) @click.command() @click.option("--config", required=True, type=str) @click.option("--config-prefix", required=True, type=click.Path(exists=True, file_okay=False, dir_okay=True), default='config') @click.option("--features-path", '-p', required=True, type=click.Path()) @click.option("--features-namespace", '-n', type=str) @click.option("--arena-id", '-a', type=str, required=True) @click.option("--trajectory-id", '-t', type=str, required=True) @click.option("--coords-path", '-c', required=True, type=click.Path()) @click.option("--distances-namespace", '-d', type=str, default='Arc Distances') @click.option("--include", '-i', type=str, multiple=True) @click.option("--bin-size", type=float, default=100.) @click.option("--from-spikes", type=bool, default=True) @click.option("--normed", type=bool, default=False, is_flag=True) @click.option("--font-size", type=float, default=14) @click.option("--verbose", "-v", is_flag=True) @click.option("--save-fig", is_flag=True) def main(config, config_prefix, features_path, coords_path, features_namespace, arena_id, trajectory_id, distances_namespace, include, bin_size, from_spikes, normed, font_size, verbose, save_fig):
@ops.command() @click.option(*OPTIONS_PROJECT["args"], **OPTIONS_PROJECT["kwargs"]) @click.option(*OPTIONS_RUN_UID["args"], **OPTIONS_RUN_UID["kwargs"]) @click.option( "--copy", "-c", is_flag=True, default=False, help="To copy the run before restarting.", ) @click.option( "-f", "--file", "polyaxonfile", multiple=True, type=click.Path(exists=True), help= "The polyaxonfiles to update with, they should be an operation preset.", ) @click.pass_context @clean_outputs def restart(ctx, project, uid, copy, polyaxonfile): """Restart run. Uses /docs/core/cli/#caching Examples: \b $ polyaxon run --uid=8aac02e3a62a4f0aaa257c59da5eab80 restart """
from histogram import Histogram from optimal_alignment_finder import OptimalAlignmentFinder @click.command() @click.option('--target-sequence', '-t', help='The target DNA sequence as a string.', type=str, required=True) @click.option( '--input-fasta-file', '-i', help= 'Input FASTA file. This sequence will be searched for the target sequence.', type=click.Path(exists=True), required=True) def parse_input(target_sequence: str, input_fasta_file: str) -> None: """ parse_input :param target_sequence: The target sequence as a string. :param input_fasta_file: The path to a FASTA file. """ if not Helpers.valid_dna_sequence(target_sequence): raise click.UsageError( 'The target sequence is not a valid DNA sequence.') input_sequence = Helpers.fasta_file_to_sequence(input_fasta_file) if not input_sequence:
patients do not fit in the scout field of view. Returns image orientation and estimated distance in centimeters. These measurements can be converted into equivalent water volumes using AAPM-published tables. \b $ diana-plus ssde tests/resources/scouts ct_scout_01.dcm ct_scout_02.dcm Measuring scout images ------------------------ ct_scout_01.dcm (AP): 28.0cm ct_scout_02.dcm (LATERAL): 43.0cm """ @click.command(short_help="Estimate patient size from localizer", epilog=epilog) @click.argument('path', type=click.Path(exists=True)) @click.argument('images', nargs=-1) def ssde(path, images): """Estimate patient dimensions from CT-localizer IMAGES for size-specific dose estimation.""" click.echo(click.style('Measuring scout images', underline=True, bold=True)) D = DcmDir(path=path) for image in images: d = D.get(image, view=DixelView.PIXELS) result = d.measure_scout() click.echo("{} ({}): {}cm".format(image, result[0], round(result[1])))
pass_conf = click.make_pass_decorator(Config) @click.group() @click.option('--host', '-h', type=str, help='Hostname of MatterMost server') @click.option('--token', '-t', type=str, help='Your personal access token') @click.option('--port', '-p', type=int, default=443, help='Which port to use. Default 443') @click.option('--config', '-c', type=click.Path(), help='Path to config file for host, port and token') @click.version_option() @click.pass_context def cli(ctx, host, token, port, config): if config: settings = configparser.ConfigParser() settings.read(config) if not host and 'host' in settings['Default']: host = settings['Default']['host'] if not token and 'token' in settings['Default']: token = settings['Default']['token'] if not port and 'port' in settings['Default']: port = int(settings['Default']['port'])
output = format_output(title, cur, headers, None, table_format) for line in output: click.echo(line, nl=new_line) @click.command() @click.option('-h', '--host', envvar='MYSQL_HOST', help='Host address of the database.') @click.option('-P', '--port', envvar='MYSQL_TCP_PORT', type=int, help='Port number to use for connection. Honors ' '$MYSQL_TCP_PORT') @click.option('-u', '--user', help='User name to connect to the database.') @click.option('-S', '--socket', envvar='MYSQL_UNIX_PORT', help='The socket file to use for connection.') @click.option('-p', '--password', 'password', envvar='MYSQL_PWD', type=str, help='Password to connect to the database') @click.option('--pass', 'password', envvar='MYSQL_PWD', type=str, help='Password to connect to the database') @click.option('--ssl-ca', help='CA file in PEM format', type=click.Path(exists=True)) @click.option('--ssl-capath', help='CA directory') @click.option('--ssl-cert', help='X509 cert in PEM format', type=click.Path(exists=True)) @click.option('--ssl-key', help='X509 key in PEM format', type=click.Path(exists=True)) @click.option('--ssl-cipher', help='SSL cipher to use') @click.option('--ssl-verify-server-cert', is_flag=True, help=('Verify server\'s "Common Name" in its cert against ' 'hostname used when connecting. This option is disabled ' 'by default')) # as of 2016-02-15 revocation list is not supported by underling PyMySQL # library (--ssl-crl and --ssl-crlpath options in vanilla mysql client) @click.option('-v', '--version', is_flag=True, help='Version of mycli.') @click.option('-D', '--database', 'dbname', help='Database to use.') @click.option('-R', '--prompt', 'prompt',
if simulation is True: # TODO: Public API for mirroring existing registry self.blockchain.interface._registry._swap_registry(filepath=self.sim_registry_filepath) self.token_agent = NucypherTokenAgent(blockchain=self.blockchain) self.miner_agent = MinerAgent(token_agent=self.token_agent) self.policy_agent = PolicyAgent(miner_agent=self.miner_agent) uses_config = click.make_pass_decorator(NucypherClickConfig, ensure=True) @click.group() @click.option('--version', help="Prints the installed version.", is_flag=True) @click.option('--verbose', help="Enable verbose mode.", is_flag=True) @click.option('--config-file', help="Specify a custom config filepath.", type=click.Path(), default="cool winnebago") @uses_config def cli(config, verbose, version, config_file): """Configure and manage a nucypher nodes""" # validate_nucypher_ini_config(filepath=config_file) click.echo(BANNER) # Store config data config.verbose = verbose config.config_filepath = config_file if config.verbose: click.echo("Running in verbose mode...") if version:
next(reader) # skip headers for company_id, company_name, count in reader: company_id, count = int(company_id), int(count) company_name_norm = cg.preprocessor(company_name) if not company_name_norm: logger.info( f"Skipping {company_name} - it appears in stoplist") continue else: rows.append( NameEdge(source=company_name, target=company_id, n=count)) return rows @click.command() @click.argument("input-file", type=click.Path(exists=True)) @click.argument("output-file", type=click.Path()) @click.option("--min-count", default=10) def append_data(input_file, output_file, min_count): cg = CompanyGraph() new_rows = read_rows(input_file, cg) g = cg.graph for row in new_rows: if g.has_edge(row.source, row.target): g[row.source][row.target]["n"] += row.n else: if row.n >= min_count: g.add_edge(row.source, row.target, n=row.n) # Names should have 1 possible id candidate
# IMX Image: Base options @click.group(context_settings=dict(help_option_names=['-?', '--help']), help=DESCRIP) @click.version_option(VERSION, '-v', '--version') def cli(): click.echo() # IMX Image: List IMX boot img content @cli.command(short_help="List i.MX boot image content") @click.option('-t', '--type', type=click.Choice(['auto', '67RT', '8M', '8QXP_A0', '8QM_A0', '8X']), default='auto', show_default=True, help="Image type") @click.option('-o', '--offset', type=UINT, default=0, show_default=True, help="File Offset") @click.option('-s', '--step', type=UINT, default=0x100, show_default=True, help="Parsing step") @click.argument('file', nargs=1, type=click.Path(exists=True)) def info(offset, type, step, file): """ List i.MX boot image content """ try: with open(file, 'rb') as stream: stream.seek(offset) if type == "auto": boot_image = parse(stream, step) else: img_type = {'67RT': BootImg2, '8M': BootImg2, '8QXP_A0': BootImg3a, '8QM_A0': BootImg3b, '8X': BootImg4} boot_image = img_type[type].parse(stream, step)
def visit_class(self, cls: ClassDefinition) -> bool: if cls.mixin or cls.abstract or not cls.slots: return False if cls.description: for dline in cls.description.split('\n'): print(f'// {dline}') print(f'message {camelcase(cls.name)}') print(" {") self.relative_slot_num = 0 return True def end_class(self, cls: ClassDefinition) -> None: print(" }") def visit_class_slot(self, cls: ClassDefinition, aliased_slot_name: str, slot: SlotDefinition) -> None: qual = 'repeated ' if slot.multivalued else 'optional ' if not slot.required or slot.primary_key else '' slotname = lcamelcase(aliased_slot_name) slot_range = self.obj_name(slot.range) self.relative_slot_num += 1 print(f" {qual}{slotname} {slot_range} = {self.relative_slot_num}") @click.command() @click.argument("yamlfile", type=click.Path(exists=True, dir_okay=False)) @click.option("--format", "-f", default='proto', type=click.Choice(['proto']), help="Output format") def cli(yamlfile, format): """ Generate proto representation of biolink model """ print(ProtoGenerator(yamlfile, format).serialize())
import sys import click import pylint.lint import getaddons import travis_helpers from getaddons import get_modules_changed from git_run import GitRun try: import ConfigParser except ImportError: import configparser as ConfigParser CLICK_DIR = click.Path(exists=True, dir_okay=True, resolve_path=True) def get_extra_params(odoo_version): """Get extra pylint params by odoo version Transform a seudo-pylint-conf to params, it to overwrite base-pylint-conf values. Use a seudo-inherit of configuration file. To avoid have a 2 config files (stable and pr-conf) by each odoo-version Example: pylint_master.conf pylint_master_pr.conf pylint_90.conf pylint_90_pr.conf pylint_80.conf
for infileName in tqdm(self.currentfiles, total=len(self.currentfiles))) yecho("Done converting csv to hdf5.") def mergetelescopefiles(self): yecho("Merging telescope files.") self.get_currentfiles(DIRECTORIES[0]+M12+CSV) files1, files2 = self.split_currentfiles() self.currentfiles = Parallel(n_jobs=20)\ (delayed(mt.mc_append_and_merge) ([m1], [m2], self.rootdir+DIRECTORIES[-3], easy_merge=True) for m1, m2 in tqdm(zip(files1, files2), total=len(files1))) yecho("Done merging telescope files.") @click.command() @click.option('--rootdir', "-rd", default="./", type=click.Path(file_okay=False, writable=True), help='RootDir from which to read the files') @click.option('--subdir', "-sd", default="", type=click.Path(file_okay=False), help='subdir from which to read the files in each processing subdir.') @click.option('--root2csv', "-rtc", default="False", type=bool, help='Convert root to csv or not. Also filters the events. Needs processed star and superstar files.') @click.option('--csv2hdf5', "-cth", default="True", type=bool, help='Convert csv to hdf5 or not.') @click.option('--mergehdf5', "-mh", default="False", type=bool, help='Merge the hdf5files or not.') @click.option('--mergetelescopes', "-mt", default="True", type=bool, help='Merge the telescope files or not.') @click.option('--normalize', "-no", default="True", type=click.Choice(['True', 'False', 'pixelwise', 'camerawise']), help='Whether to merge the telescope files. Defaults to pixelwise. `True` also defaults to pixelwise.') @click.option('--normfile', "-nf", default=None, type=click.Path(dir_okay=False, resolve_path=True, exists=True), help='Location of the normfile.')
for symbol in unique_symbols: if symbol in result: dataset[-1].append(1) else: dataset[-1].append(0) if result[-1] == "SPAM": dataset[-1].append(SPAM_LABEL) else: dataset[-1].append(HAM_LABEL) return dataset @click.command() @click.option('--ham', type=click.Path(exists=True)) @click.option('--spam', type=click.Path(exists=True)) @click.option('--out', type=click.Path()) def main(ham, spam, out): """ Runs data processing scripts to turn raw data from (../raw) into cleaned data ready to be analyzed (saved in ../processed). python make_dataset.py --ham ../../data/processed/ham.scan_results.txt --spam ../../data/processed/spam.scan_results.txt --out ../../data/processed/dataset.txt """ logger = logging.getLogger(__name__) logger.info('making final data set from raw data') ham_scan_results = [] spam_scan_results = [] if ham:
from warnings import warn import os import shutil import click """ Script assumes figures are exactly one directory deep. E.g. under 'figures', not 'figures/eps' """ @click.command() @click.argument('flatdir', type=click.Path(exists=True, file_okay=False, dir_okay=True)) @click.argument('figdir', type=click.Path(exists=True, file_okay=False, dir_okay=True)) def main(flatdir, figdir): figprefix = os.path.basename(figdir) for filename in os.listdir(flatdir): if filename.startswith(figprefix + '_'): targetfilename = os.path.splitext(filename)[0] + '.eps' sourcefilename = targetfilename[len(figprefix) + 1:] sourcepathname = os.path.join(figdir, sourcefilename) targetpathname = os.path.join(flatdir, targetfilename) if os.path.exists(sourcepathname): shutil.copy(sourcepathname, targetpathname, follow_symlinks=True) else: warn("Missing eps file '{}'".format(sourcepathname))
# -*- coding: utf-8 -*- import logging from pathlib import Path import click from dotenv import find_dotenv, load_dotenv @click.command() @click.argument("source_path", type=click.Path(exists=True)) @click.argument("external_path", type=click.Path(exists=True)) @click.argument("processed_path", type=click.Path()) def main(source_path, external_path, processed_path): """Runs data processing scripts to turn source data into processed data ready to be analyzed. """ logger = logging.getLogger(__name__) logger.info("making final data set from raw data") if __name__ == "__main__": log_fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" logging.basicConfig(level=logging.INFO, format=log_fmt) project_dir = Path(__file__).resolve().parents[2] load_dotenv(find_dotenv()) main()
import parglare.termui as t @click.group() @click.option('--debug/--no-debug', default=False, help="Debug/trace output") @click.option('--colors/--no-colors', default=True, help="Output coloring") @click.pass_context def pglr(ctx, debug, colors): """ Command line interface for working with parglare grammars. """ ctx.obj = {'debug': debug, 'colors': colors} @pglr.command() @click.argument('grammar_file', type=click.Path()) @click.pass_context def check(ctx, grammar_file): debug = ctx.obj['debug'] colors = ctx.obj['colors'] check_get_grammar_table(grammar_file, debug, colors) @pglr.command() @click.argument('grammar_file', type=click.Path()) @click.pass_context def viz(ctx, grammar_file): debug = ctx.obj['debug'] colors = ctx.obj['colors'] t.colors = colors grammar, table = check_get_grammar_table(grammar_file, debug, colors)
is_flag=True, callback=set_ctx_obj_option, is_eager=True, expose_value=False, help="Enable safe mode; disables all third party plugins.") ] return bulk_options(options) #~~ helper for settings legacy options we still have to support on "octoprint" legacy_options = bulk_options([ hidden_option("--host", type=click.STRING, callback=set_ctx_obj_option), hidden_option("--port", type=click.INT, callback=set_ctx_obj_option), hidden_option("--logging", type=click.Path(), callback=set_ctx_obj_option), hidden_option("--debug", "-d", is_flag=True, callback=set_ctx_obj_option), hidden_option("--daemon", type=click.Choice(["start", "stop", "restart"]), callback=set_ctx_obj_option), hidden_option("--pid", type=click.Path(), default="/tmp/octoprint.pid", callback=set_ctx_obj_option), hidden_option("--iknowwhatimdoing", "allow_root", is_flag=True, callback=set_ctx_obj_option), hidden_option("--ignore-blacklist", "ignore_blacklist", is_flag=True,
# The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- import click import os from pkg_resources import resource_filename import yaml from ..workflow import integrate, summarize @click.command() @click.option('-i', '--in-seq', type=click.Path(exists=True, dir_okay=False), required=True, help='Input sequence file (can be gzip file.') @click.option('-o', '--out-file', type=click.Path(exists=False, dir_okay=False), required=True, help='Output annotation file.') @click.option('-d', '--annot-dir', type=click.Path(file_okay=False), required=True, help='directory that has the outputs of annotation tools.') @click.option('--out-fmt', type=click.Choice(['gff3', 'genbank']), default='gff3',
import click import pandas as pd @click.command() @click.argument('exp_table', type=click.Path(dir_okay=False, exists=True), required=True) @click.argument('cutoff', type=click.FLOAT, default=1) def main(exp_table, cutoff): exp_df = pd.read_table(exp_table, index_col=0) ubi_genes = exp_df.T.min() > cutoff ubi_genes = ubi_genes[ubi_genes].index for each in ubi_genes: print each if __name__ == '__main__': main()
@click.option("-q", "--psm-fdr-threshold", default=0.05, type=float, help='Minimum FDR Threshold to use for filtering GPSMs when selecting identified glycopeptides') @click.option("-s", "--tandem-scoring-model", default='coverage_weighted_binomial', type=click.Choice( glycopeptide_tandem_scoring_functions.keys()), help="Select a scoring function to use for evaluating glycopeptide-spectrum matches") @click.option("-x", "--oxonium-threshold", default=0.05, type=float, help=('Minimum HexNAc-derived oxonium ion abundance ' 'ratio to filter MS/MS scans. Defaults to 0.05.')) @click.option("-a", "--adduct", 'adducts', multiple=True, nargs=2, help=("Adducts to consider. Specify name or formula, and a" " multiplicity.")) @processes_option @click.option("--export", type=click.Choice( ['csv', 'html']), multiple=True, help="export command to after search is complete") @click.option("-o", "--output-path", default=None, type=click.Path(writable=True), help=( "Path to write resulting analysis to.")) @click.option("-w", "--workload-size", default=500, type=int, help="Number of spectra to process at once") @click.option("--save-intermediate-results", default=None, type=click.Path(), required=False, help='Save intermediate spectrum matches to a file', cls=HiddenOption) def search_glycopeptide(context, database_connection, sample_path, hypothesis_identifier, analysis_name, output_path=None, grouping_error_tolerance=1.5e-5, mass_error_tolerance=1e-5, msn_mass_error_tolerance=2e-5, psm_fdr_threshold=0.05, peak_shape_scoring_model=None, tandem_scoring_model=None, oxonium_threshold=0.15, save_intermediate_results=None, processes=4, workload_size=500, adducts=None, export=None): """Identify glycopeptide sequences from processed LC-MS/MS data """ if output_path is None: output_path = make_analysis_output_path("glycopeptide") if peak_shape_scoring_model is None:
@click.command() @click.option( "--clusters", type=CommaSeparatedValues(), help="Comma separated list of Kubernetes API server URLs (default: {})". format(DEFAULT_CLUSTERS), envvar="CLUSTERS", ) @click.option( "--cluster-registry", metavar="URL", help="URL of Cluster Registry to discover clusters to report on", ) @click.option("--kubeconfig-path", type=click.Path(exists=True), help="Path to kubeconfig file") @click.option( "--kubeconfig-contexts", type=CommaSeparatedValues(), help= "List of kubeconfig contexts to use (default: use all defined contexts)", envvar="KUBECONFIG_CONTEXTS", ) @click.option( "--application-registry", metavar="URL", help="URL of Application Registry to look up team by application ID", ) @click.option("--use-cache", is_flag=True,
) @rnax.command() @pass_pipe @arggist def show(pipe: Pipe): """Prints current selection to stdout, in tsv format.""" # TODO allow storing plain matrix, i.e. without multiindex data = getattr(pipe, pipe.selection, pipe.matrix) data.to_csv(sys.stdout, sep='\t') @rnax.command() @click.argument('out', type=click.Path(writable=True, dir_okay=False, file_okay=True), default='-') @click.option("--mode", "-m", type=click.Choice(['csv', 'pickle', 'auto']), default='auto') @pass_pipe @arggist def store(pipe: Pipe, out: str, mode: str): """Save current selection in given file; in tsv format.""" # TODO allow storing plain matrix, i.e. without multiindex data = getattr(pipe, pipe.selection, pipe.matrix) compression = 'infer' if mode == 'auto': if out.endswith('.gz'): f = out.rstrip('.gz')
import netifaces import os from mixer.config import ConfigCheck from rtmidi import midiutil def print_midiports(ctx, param, value): if not value or ctx.resilient_parsing: return click.echo(midiutil.list_output_ports()) ctx.exit() @click.command() @click.argument('config', type=click.Path(exists=True)) @click.option('--gui', is_flag=True) @click.option('--restapi', is_flag=True) @click.option('--port', default=5000, type=int) @click.option('--debug', is_flag=True) @click.option('--listmidi', is_flag=True, is_eager=True, expose_value=False, callback=print_midiports) def start(config, gui, restapi, port, debug): """ Read config and start the app """ c = ConfigCheck(config)
Log the name of the file being processed and check the input file size against identify.FILE_TOO_LARGE_THRESHOLD for too large input. """ for filename in utils.walk(file_or_dirs): log(f'* {filename:}', nl=False) if os.path.getsize(filename) > identify.FILE_TOO_LARGE_THRESHOLD: log(' ... SKIP') continue yield filename @click.command('extract') @click.argument('src', nargs=-1, type=click.Path(exists=True, resolve_path=True)) @click.option('--output', '-o', type=click.File('w'), default='-', help='Output file.') @click.option('--existing', '-e', type=click.Path(exists=True), help='Existing Beancount ledger for de-duplication.') @click.option('--reverse', '-r', is_flag=True, help='Sort entries in reverse order.') @click.option('--failfast', '-x',
import click from strephit.commons.classification import reverse_gazetteer from sklearn.externals import joblib from sklearn.svm import LinearSVC from strephit.classification.feature_extractors import FactExtractorFeatureExtractor logger = logging.getLogger(__name__) @click.command() @click.argument('training-set', type=click.File('r')) @click.argument('language') @click.option('-o', '--outfile', type=click.Path(dir_okay=False, writable=True), default='output/classifier_model.pkl', help='Where to save the model') @click.option('-c', default=1.0, help='Penalty parameter C of the error term.') @click.option('--loss', default='squared_hinge', help='Specifies the loss function.', type=click.Choice(['hinge', 'squared_hinge'])) @click.option('--penalty', default='l2', help='Specifies the norm used in the penalization.', type=click.Choice(['l1', 'l2'])) @click.option( '--dual', is_flag=True, help=
Command-line interface for the retrieval of a protein<->kcat mapping of a model. """ # IMPORTS # External modules import click # Internal modules from submodules.get_reactions_kcat_mapping import get_reactions_kcat_mapping # Set-up console arguments using click decorators @click.command() @click.option("--sbml_path", required=True, type=click.Path(exists=True, file_okay=True, dir_okay=True), prompt="Analyzed SBML", help="Full path to the SBML of the metabolic model that shall be sMOMENT-enhanced") @click.option("--project_folder", required=True, type=click.Path(exists=True, dir_okay=True), prompt="Project folder", help="Path to the project folder in which the reactions<->kcat mapping JSON will be created.") @click.option("--project_name", required=True, type=str, prompt="Project name", help="Name of the current project. The generated reactions<->kcat mapping JSON will have this name as prefix.") @click.option("--organism", required=True, type=str,
covariate=conditions, condition_type=condition_type, output_dir=output_dir, model_name=model_name) # Group analysis - PyBASC @group.command() @click.argument('group_config') def basc(group_config): import CPAC.pipeline.cpac_group_runner as cpac_group_runner cpac_group_runner.run_basc(group_config) @group.command(name="mdmr") @click.argument("group_config", type=click.Path(exists=True)) def group_mdmr(group_config): from CPAC.pipeline.cpac_group_runner import run_cwas run_cwas(group_config) @group.command(name="isc") @click.argument("group_config", type=click.Path(exists=True)) def group_isc(group_config): from CPAC.pipeline.cpac_group_runner import run_isc run_isc(group_config) # Utilities @main.group() def utils():
def app_group(parent): """App CLI group""" formatter = cli.make_formatter(cli.AppPrettyFormatter) @parent.group(name='app') def app(): """Manage app configuration""" pass @app.command() @cli.admin.ON_EXCEPTIONS def list(): # pylint: disable=W0622 """List apps""" for appname in master.list_scheduled_apps(context.GLOBAL.zk.conn): print(appname) @app.command() @click.option('-m', '--manifest', type=click.Path(exists=True, readable=True), required=True) @click.option('--env', help='Proid environment.', required=True, type=click.Choice(['dev', 'qa', 'uat', 'prod'])) @click.option('--proid', help='Proid.', required=True) @click.option('-n', '--count', type=int, default=1) @click.argument('app') @cli.admin.ON_EXCEPTIONS def schedule(app, manifest, count, env, proid): """Schedule app(s) on the cell master""" with open(manifest, 'rb') as fd: data = yaml.load(fd.read()) # TODO: should we delete all potential attributes starting # with _ ? if '_id' in data: del data['_id'] data['environment'] = env if 'affinity' not in data: # TODO: allow custom affinity formats. data['affinity'] = '{0}.{1}'.format(*app.split('.')) data['proid'] = proid scheduled = master.create_apps(context.GLOBAL.zk.conn, app, data, count) for app_id in scheduled: print(app_id) @app.command() @click.argument('instance') @cli.admin.ON_EXCEPTIONS def configure(instance): """View app instance configuration""" scheduled = master.get_app(context.GLOBAL.zk.conn, instance) cli.out(formatter(scheduled)) @app.command() @click.argument('apps', nargs=-1) @cli.admin.ON_EXCEPTIONS def delete(apps): """Deletes (unschedules) the app by pattern""" master.delete_apps(context.GLOBAL.zk.conn, apps) del list del schedule del delete del configure
def test_data_option(): return planemo_option( "--test_data", type=click.Path(exists=True, file_okay=False, resolve_path=True), help="test-data directory to for specified tool(s).", )