def main(): parser = ArgumentParser() parser.add_argument("--file", type=str, required=True) parser.add_argument("--ignore-case", type=bool, default=DEFAULT_IGNORE_CASE) parser.add_argument("--output-file", type=str, default=DEFAULT_OUTPUT_FILE) parser.add_argument("--prefix", type=str, default=None) parser.add_argument("--title", type=str, default=DEFAULT_TITLE) parser.add_argument("--max-layers", type=int, default=DEFAULT_MAX_LAYERS) parser.add_argument( "--min-display-percentage", type=float, default=DEFAULT_MIN_DISPLAY_PERCENTAGE) parser.add_argument( "--min-label-percentage", type=float, default=DEFAULT_MIN_LABEL_PERCENTAGE) parser.add_argument( "--label-font-size", type=int, default=DEFAULT_LABEL_FONT_SIZE) args = parser.parse_args() try: create_from_file( args.file, args.ignore_case, args.output_file, args.prefix, args.title, args.max_layers, args.min_display_percentage, args.min_label_percentage, args.label_font_size) except VocabPieError as e: e.display()
def parse_arguments(): descriptionTXT = "Lee un dataset versionado (fichero CSV en GIT) y genera un CSV con los últimos valores de cada datos" parser = ArgumentParser(description=descriptionTXT) parser.add('-v', dest='verbose', action="count", env_var='GTS_VERBOSE', required=False, help='', default=0) parser.add('-d', dest='debug', action="store_true", env_var='GTS_DEBUG', required=False, help='', default=False) parser.add('-i', dest='infile', type=str, env_var='GTS_INFILE', help='Fichero de entrada', required=False) parser.add('-o', dest='outfile', type=str, env_var='GTS_OUTFILE', help='Fichero de salida', required=True) parser.add('-r', dest='repoPath', type=str, env_var='GTS_REPOPATH', help='Directorio base del repositorio GIT', required=True) parser.add('-f', dest='csvPath', type=str, env_var='GTS_CSVPATH', help='Ubicación del fichero de dataset dentro del repositorio GIT', required=True) parser.add('-t', dest='colIndice', type=str, env_var='GTS_INDEXCOL', choices=['zona_basica_salud', 'municipio_distrito'], help='Columnas se van a usar como indice del dataframe', required=True) parser.add('-c', dest='create', action="store_true", env_var='GTS_CREATE', required=False, help='Inicializa el fichero si no existe ya', default=False) args = parser.parse_args() return args
def dev_server(): parser = ArgumentParser( auto_env_var_prefix='WOL_', formatter_class=ArgumentDefaultsHelpFormatter, ) parser.add_argument('--bind', '-b', default='127.0.0.1', help="ip address to listen") parser.add_argument('--port', '-p', default=5000, help="port to listen") parser.add_argument('--debug', '-d', action='store_true', default=False, help="run in debug mode") parser.add_argument('--no-db', action='store_true', default=False, help="do not use database and disable CRUD api") parser.add_argument('command', choices=('run', 'initdb'), nargs='?', default='run') args = parser.parse_args() app = create_app(no_db=args.no_db) if args.command == 'run': app.run(host=args.bind, port=args.port, debug=args.debug) elif args.command == 'initdb': if args.no_db: print("incompatible command and \"--no-db\" argument") sys.exit(1) elif not models: print("database deps is not installed (extra \"db\"") sys.exit(1) else: models.init_db() print("db initialized")
def parse_arguments(): parser = ArgumentParser(description='Arguments For edge2vec') group = parser.add_argument_group('Base Configs') group.add_argument('-i', '--input', help='path to the input graph file', type=str, required=True) # group.add_argument('-o', '--output', help='path to the output embedding file', type=str, required=True) group.add_argument('-m', '--model', help='the output directory of model files', type=str, required=True) group.add_argument('-n', '--num', help='the maximum num of the node', type=int, required=True) group.add_argument('-s', '--sample', help='the num of negative samples', type=int, required=True) args = parser.parse_args() return args
def parse_testplan_args(args): parser = ArgumentParser( description="Run a scenario", default_config_files=["config.ini"], args_for_setting_config_path=["-c", "--config"], ) parse_common_args(parser) parser.add_argument("test_plan") return parser.parse_args(args)
def procesaArgumentos(): parser = ArgumentParser() parser.add('-i', dest='infile', type=str, env_var='SM_INFILE', required=True) parser.add('-t', dest='temporada', type=str, env_var='SM_TEMPORADA', required=True) parser.add('-j', dest='jornada', type=int, required=True) parser.add('-l', '--lista-socios', dest='listaSocios', action="store_true", default=False) parser.add("-o", "--output-dir", dest="outputdir", type=str, default=LOCATIONCACHE) parser.add('--nproc', dest='nproc', type=int, default=NJOBS) parser.add('--memworker', dest='memworker', default=MEMWORKER) parser.add('--joblibmode', dest='joblibmode', choices=JOBLIBCHOICES, default='threads') parser.add('-v', dest='verbose', action="count", env_var='SM_VERBOSE', required=False, default=0) parser.add('-d', dest='debug', action="store_true", env_var='SM_DEBUG', required=False, default=False) parser.add('--logdir', dest='logdir', type=str, env_var='SM_LOGDIR', required=False) # args = vars(parser.parse_args()) # return Namespace(**args) return parser.parse_args()
def config(): """Get ConfigargParse based configuration. The config file in /etc gets overriden by the one in $HOME which gets overriden by the one in the current directory. Everything can also be set from environment variables. """ default_config_file = basename(__file__).replace(".py", ".conf") default_config_files = [ "/etc/" + default_config_file, expanduser("~") + "/" + default_config_file, default_config_file, ] parser = ArgumentParser( default_config_files=default_config_files, description="RaBe Intranet Landing Page", ) parser.add_argument("--title", env_var="PAGE_TITLE", default="RaBe Intranet") parser.add_argument( "--background-image", env_var="PAGE_BACKGROUND", default="https://rabe.ch/wp-content/uploads/2016/07/Header.gif", ) parser.add_argument("--links", env_var="PAGE_LINKS", action="append", default=[]) parser.add_argument("--address", env_var="PAGE_ADDRESS", default="0.0.0.0") parser.add_argument("--port", env_var="PAGE_PORT", default=5000) parser.add_argument("--thread-pool", env_var="PAGE_THREADPOOL", default=30) parser.add_argument("--dev", env_var="PAGE_DEVSERVER", default=False) args = parser.parse_args() logger.error(parser.format_values()) if args.links == []: args.links = [ "Studiomail;//studiomail.int.example.org", "Homepage;https://www.rabe.ch", "Intranet;//wiki.int.example.org/", ] if args.links: def link_split(l): s = l.split(";") return {"name": s[0], "target": s[1]} args.links = list(map(link_split, args.links)) if args.port: args.port = int(args.port) return args
def args(): parser = ArgumentParser(default_config_files=['./config.yml']) fl_args(parser) data_args(parser) model_args(parser) optim_args(parser) args = parser.parse_args() if args.split_method == 'b_min-cut' or 'random_choice': pass else: args.n_epochs = int(args.n_epochs*2.5) return args
def parse_scenario_args(args): parser = ArgumentParser( description="Run a scenario", default_config_files=["config.ini"], args_for_setting_config_path=["-c", "--config"], ) parse_common_args(parser) parser.add_argument("scenario") parser.add_argument("scenario_args", nargs="*", help="Arguments for the specific scenario") return parser.parse_args(args)
def load_eval_params(): parser = ArgumentParser(config_file_parser_class=YAMLConfigFileParser) parser.add_argument('-c', '--config', is_config_file=True, required=True, type=str, help='config file path') parser.add_argument('--out_dir', type=str, default='results', help='path to save results') parser.add_argument('--log_dir', type=str, default='logs') parser.add_argument('--num_workers', type=int, default=4) parser.add_argument('--val_workers', type=int, default=2) parser.add_argument('--batch_size', type=int, default=64) parser.add_argument('--num_samples', type=int, default=100000, help='num/samples for p-val permutation test') parser.add_argument( '--test2features_path', type=str, required=True, help='path to JSON file of features, stored by model and test') parser.add_argument('--tests', nargs='+', required=True, help='paths to tests to run') parser.add_argument('--model_type', type=str, required=True, choices=['lxmert', 'visualbert', 'vilbert', 'vlbert']) parser.add_argument('--model_archive', type=str, required=True, help='path to saved model to load') parser.add_argument('--max_seq_length', type=int, default=36) # add model-specific arguments model_type = parser.parse_known_args()[0].model_type TYPE2WRAPPER[model_type].add_model_args(parser) args = parser.parse_args() # additional arguments, check dirs args.num_gpus = torch.cuda.device_count() params = AttrDict({k: getattr(args, k) for k in vars(args)}) # make model directories makedirs(params.out_dir, exist_ok=True) makedirs(path.join(params.out_dir, params.model_type), exist_ok=True) return params
def generate_targets(): p = ArgumentParser('pwngeth generate targets cli', config_file_parser_class=YAMLConfigFileParser) p.add_argument('-c', '--config', required=True, is_config_file=True, help='config file path') p.add_argument('--shodan_key', required=True, help='Shodan API key') options = p.parse_args() gen_targets_main(options.shodan_key)
def get_args(): parser = ArgumentParser(auto_env_var_prefix="TMANAGER_", formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument('--api-address', default='0.0.0.0', help='IPv4/IPv6 address API server would listen on') parser.add_argument('--api-port', default=8081, help='TCP port API server would listen on') parser.add_argument('--log-level', default="DEBUG", help='default logging level') args = parser.parse_args() return args
def test_save(tmp_path): parser = ArgumentParser() parser = get_parser_discrete(parser) options = parser.parse_args("") options.data_dir = tmp_path i = 1 U = [[1, 2]] xs = [1] ys = [2] F = [[1]] layout_pos_list = [1, 2] io.save(options, i, U, xs, ys, F, layout_pos_list) # assert tmp_path == 1 data = io.load_mat(tmp_path / f"{options.prefix}{i}.{options.file_format}") assert data["u"] == pytest.approx(np.array(U))
def pwn_targets(): p = ArgumentParser('pwngeth pwn targets cli', config_file_parser_class=YAMLConfigFileParser) p.add_argument('-c', '--config', required=True, is_config_file=True, help='config file path') p.add_argument('--secure_address', required=True, help='Secure eth wallet address') options = p.parse_args() secure_funds_main(options.secure_address)
def gen_args(args: Optional[str] = None) -> Namespace: parser = ArgumentParser() add_general_args(parser) add_encoder_args(parser) add_pool_args(parser) add_acquisition_args(parser) add_objective_args(parser) add_model_args(parser) add_stopping_args(parser) args = parser.parse_args(args) modify_objective_args(args) cleanup_args(args) return args
def train_idr0017(): parser = ArgumentParser() parser.add('-c', '--config', is_config_file=True, help='config file path') parser.add_argument("--seed", type=int, default=0) parser.add_argument("--experiment_name", type=str, default="betaVAE_MNIST") parser.add_argument("--model_dir", type=str, default="/home/gvisona/Desktop/runs") parser.add_argument("--log_dir", type=str, default="/home/gvisona/Desktop/runs") parser.add_argument("--gpus", type=int, default=0) parser.add_argument("--max_epochs", type=int, default=2) parser = ConvolutionalBetaVAE.add_model_specific_args(parser) hparams = parser.parse_args() # set seeds torch.manual_seed(hparams.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(hparams.seed) tb_logger = loggers.TensorBoardLogger(hparams.log_dir) with open(os.path.join(hparams.log_dir, 'vae_config.yml'), 'w') as outfile: yaml.dump(hparams.__dict__, outfile, default_flow_style=False) transform = lambda im: RandomCropTransform(im, 128) train_ds = IDR0017_FullImgs_ZarrDataset( "/media/gvisona/GioData/idr0017/data/idr0017.zarr", training=True, transform=transform) val_ds = IDR0017_FullImgs_ZarrDataset( "/media/gvisona/GioData/idr0017/data/idr0017.zarr", training=True, transform=transform) model = ConvolutionalBetaVAE(hparams, train_ds, val_ds) print("HELLO")
def _get_config(): cfg_home = os.getenv('XDG_CONFIG_HOME', '~/.config') parser = ArgumentParser(default_config_files=[cfg_home + '/raw2jpg.conf']) parser.add_argument('directories', metavar='DIRECTORY', nargs='*', help='Directory to process') parser.add_argument('-s', '--size', default=1920, type=int, help='Size to generate the jpgs (max edge)') parser.add_argument('--overwrite', default=False, type=_str_to_bool, help='Overwrite generated image if it already exists') parser.add_argument('--clean', default=False, action='store_true', help='Remove RAW files for which the related JPG file ' 'has already been removed') return parser.parse_args()
def procesaArgumentos(): parser = ArgumentParser() parser.add('-i', dest='infile', type=str, env_var='SM_INFILE', required=True) parser.add('-t', dest='temporada', type=str, env_var='SM_TEMPORADA', required=True) parser.add('-j', dest='jornada', type=int, required=True) parser.add('-s', '--include-socio', dest='socioIn', type=str, action="append") parser.add('-e', '--exclude-socio', dest='socioOut', type=str, action="append") parser.add('-l', '--lista-socios', dest='listaSocios', action="store_true", default=False) parser.add("--data-dir", dest="datadir", type=str, default=LOCATIONCACHE) parser.add("--solution-file", dest="solfile", type=str) parser.add('-v', dest='verbose', action="count", env_var='SM_VERBOSE', required=False, default=0) parser.add('-d', dest='debug', action="store_true", env_var='SM_DEBUG', required=False, default=False) parser.add('--logdir', dest='logdir', type=str, env_var='SM_LOGDIR', required=False) parser.add('otherthings', nargs='*') args = parser.parse_args() return args
def gen_args(argv: Optional[str] = None) -> Namespace: parser = ArgumentParser( description='Automate virtual screening of compound libraries.') add_general_args(parser) add_preprocessing_args(parser) add_preparation_args(parser) add_screening_args(parser) add_docking_args(parser) add_postprocessing_args(parser) args = parser.parse_args(argv) if args.name is None: args.name = f'{Path(args.receptor).stem}_{Path(args.ligands).stem}' args.title_line = not args.no_title_line del args.no_title_line args.enclose_spheres = not args.dont_enclose_spheres del args.dont_enclose_spheres return args
def execute(argv: Sequence[str] = None): """Execute the help command.""" from . import available_commands, load_command, PROG parser = ArgumentParser(prog=PROG) parser.add_argument( "-v", "--version", action="store_true", help="print application version and exit", ) subparsers = parser.add_subparsers() for cmd in available_commands(): if cmd["name"] == "help": continue module = load_command(cmd["name"]) subparser = subparsers.add_parser(cmd["name"], help=cmd["summary"]) module.init_argument_parser(subparser) args = parser.parse_args(argv) if args.version: print(__version__) else: parser.print_help()
def parse_configuration(): """Return the configuration for what evolution to run and how.""" parser = ArgumentParser(description=__doc__, default_config_files=['system.params'], formatter_class=DefaultsFormatter, ignore_unknown_config_file_keys=False) parser.add_argument('--config', '-c', is_config_file=True, help='Config file to use instead of default.') add_binary_config(parser) add_evolution_config(parser) parser.add_argument( '--plot', nargs=2, action='append', metavar=('X_EXPR', 'Y_EXPR'), default=[], help='Add another plot to create. Each quantity can be a mathematical ' 'expression involving evolution quantities.') parser.add_argument( '--plot-with-tangents', nargs=4, action='append', metavar=('X_EXPR', 'Y_EXPR', 'DYDX_EXPR', 'NUM_TANGENTS'), default=[], help='Add another plot that will also show tangent lines calculated ' 'assuming `DYDX_EXPR` evaluates to the slope at a given point. Tangent ' 'lines are drawn at the tabulated evolution points closest to ' '`NUM_TANGENTS` evenly spaced values of `X_EXPR` covering the full ' 'range.') return parser.parse_args()
def main(): parser = ArgumentParser() parser.add_argument('-c', '--config', required=True, is_config_file=True, help='Config file path.') parser.add_argument('-t', '--token', metavar='DISCORD_BOT_TOKEN', required=True, env_var='DISCORD_BOT_TOKEN', help='Discord bot token.') parser.add_argument('-p', '--prefix', metavar='COMMAND_PREFIX', default='!', env_var='COMMAND_PREFIX', help='Command prefix.') loaded_extensions = [import_module(e) for e in extensions] for e in loaded_extensions: e.setup_args(parser) args = parser.parse_args() bot = Bot(command_prefix=args.prefix, formatter=Formatter(), description=description.format(version__, args.prefix, source_url)) with aiohttp.ClientSession() as session: bot.aiohttp = session for e in loaded_extensions: e.setup(bot, args) bot.run(args.token)
def load_query_params(): parser = ArgumentParser(config_file_parser_class=YAMLConfigFileParser) # general parser.add_argument('-c', '--config', is_config_file=True, required=True, type=str, help='config file path') parser.add_argument('--out_dir', type=str, default='results', help='path to save results') parser.add_argument('--log_dir', type=str, default='logs') parser.add_argument('--num_workers', type=int, default=4) parser.add_argument('--val_workers', type=int, default=2) parser.add_argument('--batch_size', type=int, default=64) # bias tests and image/text data parser.add_argument('--coco_ontology', type=str, help='only required for COCO dataset') parser.add_argument('--path_to_obj_list', type=str, help='path to list of objects by idx; only needed for ViLBERT') # model parser.add_argument('--model_type', type=str, required=True, choices=['lxmert', 'visualbert', 'vilbert', 'vlbert']) parser.add_argument('--model_archive', type=str, required=True, help='path to saved model to load') parser.add_argument('--model_config', type=str, help='path to additional, model-specific configs') parser.add_argument('--bert_model', type=str, default='bert-case-uncased') parser.add_argument('--do_lower_case', action='store_true') parser.add_argument('--max_seq_length', type=int, default=36) parser.add_argument('--bert_cache', type=str, default='.pytorch_pretrained_bert') args = parser.parse_args() # additional arguments, check dirs args.num_gpus = torch.cuda.device_count() args.is_train = False makedirs(args.out_dir, exist_ok=True) makedirs(path.join(args.out_dir, args.model_type), exist_ok=True) params = AttrDict({k:getattr(args, k) for k in vars(args)}) return params
'epochs': 230, 'height': height, 'width': width, 'ssd_config': '[train:{}, val:{}]'.format(ssd_config_path, ssd_config_path_val) } util.write_config(config, config_path) # write annotation pickle if annot_save is not None: pickle.dump(data, open(annot_save, 'w')) if __name__ == '__main__': parser = ArgumentParser() parser.add_argument('--data_dir', required=True, help='path to directory with vocdevkit data') parser.add_argument('--overwrite', action='store_true', help='overwrite files') parser.add_argument('--height', type=int, default=512, help='height of reshaped image') parser.add_argument('--width', type=int, default=512, help='width of reshape image') parser.add_argument('--train_fraction', type=float, default=0.9, help='width of reshape image') parser.add_argument('--annot_save', type=str, default=None, help='separately save annotations to this file.') args = parser.parse_args() cities = ['AOI_1_Rio', 'AOI_2_Vegas_Train', 'AOI_3_Paris_Train', 'AOI_4_Shanghai_Train', 'AOI_5_Khartoum_Train'] ingest_spacenet(cities=cities, data_dir=args.data_dir, height=args.height, width=args.width, overwrite=args.overwrite, annot_save=args.annot_save)
def parse_args(args): parser = ArgumentParser( config_file_parser_class=YAMLConfigFileParser, formatter_class=argparse.RawDescriptionHelpFormatter, default_config_files=['~/.config/seal', '~/.seal'], description=textwrap.dedent("""\ PowerfulSeal The Chaos Engineering tool for Kubernetes """), ) # General settings parser.add_argument( '-c', '--config', is_config_file=True, env_var="CONFIG", help='Config file path', ) parser.add_argument('-v', '--verbose', action='count', help='Verbose logging.') parser.add_argument('-s', '--silent', action='count', help='Silent logging.') parser.add_argument( '-V', '--version', action='version', version='%(prog)s {version}'.format( version=powerfulseal.version.__version__), help='Version.', ) # subparsers subparsers = parser.add_subparsers( title='MODES OF OPERATION', description=( 'Pick one of the following options to start the Seal in the ' 'specified mode. Learn more at ' 'https://github.com/bloomberg/powerfulseal#introduction'), dest='mode') ########################################################################## # INTERACTIVE MODE ########################################################################## parser_interactive = subparsers.add_parser( 'interactive', help=( 'Starts an interactive CLI, which allows to manually issue ' 'commands on pods and nodes and provides a sweet autocomplete. ' 'If you\'re reading this for the first time, you should probably ' 'start here. '), ) add_common_options(parser_interactive) ########################################################################## # AUTONOMOUS MODE ########################################################################## parser_autonomous = subparsers.add_parser( 'autonomous', help= ('This is the main mode of operation. It reads the policy file and executes it.' ), ) add_common_options(parser_autonomous) add_policy_options(parser_autonomous) add_metrics_options(parser_autonomous) # web ui settings web_args = parser_autonomous.add_argument_group(title='Web UI settings') web_args.add_argument('--headless', help='Doesn\'t start the UI, just runs the policy', action='store_true') web_args.add_argument('--host', help='Specify host for the PowerfulSeal web server', default=os.environ.get('HOST', '0.0.0.0')) web_args.add_argument('--port', help='Specify port for the PowerfulSeal web server', default=int(os.environ.get('PORT', '8000')), type=check_valid_port) web_args.add_argument( '--accept-proxy-headers', help='Set this flag for the webserver to accept X-Forwarded-* headers', action='store_true') ########################################################################## # LABEL MODE ########################################################################## parser_label = subparsers.add_parser( 'label', help=('Starts in label mode. ' 'It reads Kubernetes pods in a specified namespace, and checks ' ' their \'seal/*\' labels to decide which ones to kill.' 'There is no policy needed in this mode. ' 'To learn about supported labels, read more at ' 'https://github.com/bloomberg/powerfulseal/ '), ) add_common_options(parser_label) add_namespace_options(parser_label) add_run_options(parser_label) add_metrics_options(parser_label) ########################################################################## # VALIDATE POLICY MODE ########################################################################## parser_validate_policy = subparsers.add_parser( 'validate', help=('Validates any file against the policy schema, returns.' 'You can use this to check that your policy is correct, ' 'before using it in autonomous mode.')) add_policy_options(parser_validate_policy) return parser.parse_args(args=args)
type=int, choices=[0, 1], help= 'If 0, the model to load stores word information. If 1, the model to load stores ' 'subword (ngrams) information; note that subword information is relevant only to ' 'fasttext models.') arg_parser.add_argument('--topn', default=500, type=int, action=check_size(min_size=1), help='maximal number of expanded terms to return') arg_parser.add_argument('--grouping', action='store_true', default=False, help='grouping mode') args = arg_parser.parse_args() se = SetExpand(np2vec_model_file=args.np2vec_model_file, binary=args.binary, word_ngrams=args.word_ngrams, grouping=args.grouping) enter_seed_str = 'Enter the seed (comma-separated seed terms):' logger.info(enter_seed_str) for seed_str in sys.stdin: seed_list = seed_str.strip().split(',') exp = se.expand(seed_list, args.topn) logger.info('Expanded results:') logger.info(exp) logger.info(enter_seed_str)
def main(argv): """ The main function to invoke the powerfulseal cli """ # Describe our configuration. prog = ArgumentParser( config_file_parser_class=YAMLConfigFileParser, formatter_class=argparse.RawDescriptionHelpFormatter, default_config_files=['~/.config/seal', '~/.seal'], description=textwrap.dedent("""\ PowerfulSeal """), ) # general settings prog.add_argument( '-c', '--config', is_config_file=True, env_var="CONFIG", help='Config file path', ) prog.add_argument('-v', '--verbose', action='count', help='Verbose logging.' ) # inventory related config inventory_options = prog.add_mutually_exclusive_group(required=True) inventory_options.add_argument('-i', '--inventory-file', default=os.environ.get("INVENTORY_FILE"), help='the inventory file of group of hosts to test' ) inventory_options.add_argument('--inventory-kubernetes', default=os.environ.get("INVENTORY_KUBERNETES"), help='will read all cluster nodes as inventory', action='store_true', ) # ssh related options args_ssh = prog.add_argument_group('SSH settings') args_ssh.add_argument( '--remote-user', default=os.environ.get("PS_REMOTE_USER", "cloud-user"), help="the of the user for the ssh connections", ) args_ssh.add_argument( '--ssh-allow-missing-host-keys', default=False, action='store_true', help='Allow connection to hosts not present in known_hosts', ) args_ssh.add_argument( '--ssh-path-to-private-key', default=os.environ.get("PS_PRIVATE_KEY"), help='Path to ssh private key', ) # cloud driver related config cloud_options = prog.add_mutually_exclusive_group(required=True) cloud_options.add_argument('--open-stack-cloud', default=os.environ.get("OPENSTACK_CLOUD"), action='store_true', help="use OpenStack cloud provider", ) cloud_options.add_argument('--aws-cloud', default=os.environ.get("AWS_CLOUD"), action='store_true', help="use AWS cloud provider", ) cloud_options.add_argument('--no-cloud', default=os.environ.get("NO_CLOUD"), action='store_true', help="don't use cloud provider", ) prog.add_argument('--open-stack-cloud-name', default=os.environ.get("OPENSTACK_CLOUD_NAME"), help="the name of the open stack cloud from your config file to use (if using config file)", ) # KUBERNETES CONFIG args_kubernetes = prog.add_argument_group('Kubernetes settings') args_kubernetes.add_argument( '--kube-config', default=None, help='Location of kube-config file', ) # policy-related settings policy_options = prog.add_mutually_exclusive_group(required=True) policy_options.add_argument('--validate-policy-file', help='reads the policy file, validates the schema, returns' ) policy_options.add_argument('--run-policy-file', default=os.environ.get("POLICY_FILE"), help='location of the policy file to read', ) policy_options.add_argument('--interactive', help='will start the seal in interactive mode', action='store_true', ) args = prog.parse_args(args=argv) # Configure logging if not args.verbose: log_level = logging.ERROR elif args.verbose == 1: log_level = logging.WARNING elif args.verbose == 2: log_level = logging.INFO else: log_level = logging.DEBUG logging.basicConfig( stream=sys.stdout, level=log_level ) logger = logging.getLogger(__name__) logger.setLevel(log_level) # build cloud provider driver logger.debug("Building the driver") if args.open_stack_cloud: logger.info("Building OpenStack driver") driver = OpenStackDriver( cloud=args.open_stack_cloud_name, ) elif args.aws_cloud: logger.info("Building AWS driver") driver = AWSDriver() else: logger.info("No driver - some functionality disabled") driver = NoCloudDriver() # build a k8s client kube_config = args.kube_config logger.debug("Creating kubernetes client with config %d", kube_config) k8s_client = K8sClient(kube_config=kube_config) k8s_inventory = K8sInventory(k8s_client=k8s_client) # read the local inventory logger.debug("Fetching the inventory") if args.inventory_file: groups_to_restrict_to = read_inventory_file_to_dict( args.inventory_file ) else: logger.info("Attempting to read the inventory from kubernetes") groups_to_restrict_to = k8s_client.get_nodes_groups() logger.debug("Restricting inventory to %s" % groups_to_restrict_to) inventory = NodeInventory( driver=driver, restrict_to_groups=groups_to_restrict_to, ) inventory.sync() # create an executor executor = RemoteExecutor( user=args.remote_user, ssh_allow_missing_host_keys=args.ssh_allow_missing_host_keys, ssh_path_to_private_key=args.ssh_path_to_private_key, ) if args.interactive: # create a command parser cmd = PSCmd( inventory=inventory, driver=driver, executor=executor, k8s_inventory=k8s_inventory, ) while True: try: cmd.cmdloop() except KeyboardInterrupt: print() print("Ctrl-c again to quit") try: input() except KeyboardInterrupt: sys.exit(0) elif args.validate_policy_file: PolicyRunner.validate_file(args.validate_policy_file) print("All good, captain") elif args.run_policy_file: policy = PolicyRunner.validate_file(args.run_policy_file) PolicyRunner.run(policy, inventory, k8s_inventory, driver, executor)
def eval(): p = ArgumentParser(default_config_files=[join(dirname(__file__), '.eval.cfg')]) p.add('--config', '-c', is_config_file=True, help="config file path") p.add('--weights', type=str, help="path to the classifier weights") p.add('--data', type=str, help="test data, a file with each line as an .npy filename") p.add('--output', '-o', type=str, help="folder to hold outputs", default='.') p.add('--path', type=str, action='append', help='search paths for files') args = p.parse_args() try: os.makedirs(args.output) except OSError: pass # If weights is None, this will default to the weights in the models folder, # or the ones indicated by the environment variable net = i12.net(args.weights) files = [complete(p, args.path) for p in open(args.data).readlines()] summary = OrderedDict() for label in i12.LABELS: summary[label] = OrderedDict() summary[label].update(TP=0) summary[label].update(TN=0) summary[label].update(FP=0) summary[label].update(FN=0) summary[label].update(targets=0) summary[label].update(candidates=0) summary[label].update(hits=0) for file_index, file in enumerate(files): print(file_index+1, "of", len(files), ":", file) cached_file = os.path.join(args.output, "metrics-{:04}.yml".format(file_index)) data = np.load(file) rgb = channels_last(data[:3].astype(np.uint8)) if not os.path.isfile(os.path.join(args.output, 'file-{}.jpg'.format(file_index))): skimage.io.imsave(os.path.join(args.output, 'file-{}.jpg'.format(file_index)), rgb) all_expected = data[3:] if not os.path.isfile(os.path.join(args.output, 'file-{}-windows.jpg'.format(file_index))): skimage.io.imsave(os.path.join(args.output, 'file-{}-windows.jpg'.format(file_index)), skimage.color.gray2rgb((all_expected[ pyfacades.models.independant_12_layers.model.WINDOW] == 2).astype(float))) if not os.path.isfile(cached_file): print("Calculating metrics for file", file) #data = np.load(file) #rgb = channels_last(data[:3].astype(np.uint8)) #all_expected = data[3:] all_predicted = i12.process_strip(channels_first(rgb)) results = OrderedDict() for label_index, label in enumerate(i12.LABELS): if label == 'background': continue # Skip the top row and the bottom row -- I gave those dummy labels to work around a caffe error expected = all_expected[label_index][1:-1] # Uggh! Since the 'UNKNOWN' label is removed, argmax gives the wrong values; I need 0->0, 1->2, 2->3 predicted = all_predicted.features[label_index].argmax(0)[1:-1]+1 predicted[predicted==LABEL_UNKNOWN] = LABEL_NEGATIVE metrics = Metrics(expected, predicted, source=file, feature=label, threshold=0.5) results[label] = metrics.as_dict() viz = viz_pixel_labels(expected, predicted, rgb[1:-1], label_negative=LABEL_NEGATIVE, label_positive=LABEL_POSITIVE) skimage.io.imsave(os.path.join(args.output, 'file-{}-viz-{}.jpg'.format(file_index, label)), viz) with open(cached_file, 'w') as f: yaml.dump(results, f, Dumper=yamlordereddictloader.Dumper) cached = yaml.load(open(cached_file)) print("Cumulative:") for label in i12.LABELS: if label == 'background': continue metrics = Metrics(**cached[label]) assert metrics.source == file summary[label]['TP'] += metrics.TP summary[label]['FP'] += metrics.FP summary[label]['TN'] += metrics.TN summary[label]['FN'] += metrics.FN summary[label]['targets'] += metrics.targets summary[label]['candidates'] += metrics.candidates summary[label]['hits'] += metrics.hits cum = Metrics(**summary[label]) print("{:10}: pix(P:{:2.5f}, R:{:2.5f},F:{:2.5f}), obj:(P:{:2.5f}, R:{:2.5f},F:{:2.5f})".format( label, cum.pixel_precision, cum.pixel_recall, cum.pixel_f1, cum.object_precision, cum.object_recall, cum.object_f1))
action='store_true') arg_parser.add_argument( '--word_ngrams', default=0, type=int, choices=[0, 1], help='If 0, the model to load stores word information. If 1, the model to load stores ' 'subword (ngrams) information; note that subword information is relevant only to ' 'fasttext models.') arg_parser.add_argument( '--mark_char', default='_', type=str, action=check_size(1, 2), help='special character that marks word separator and NP suffix.') arg_parser.add_argument( '--np', default='Intel Corp.', type=str, action=check_size(min=1), help='NP to print its word vector.') args = arg_parser.parse_args() np2vec_model = NP2vec.load( args.np2vec_model_file, binary=args.binary, word_ngrams=args.word_ngrams) print("word vector for the NP \'" + args.np + "\':", np2vec_model[args.mark_char.join( args.np.split()) + args.mark_char])
@app.route('/') def aggregate_info(): return jsonify({'service': 'aggregate'}) @app.errorhandler(requests.RequestException) def http_error(error): response = jsonify({ 'status': 'error', 'code': 500, 'error': error.message }) response.status_code = 500 return response if __name__ == '__main__': parser = ArgumentParser(description='Runs the IP service.') parser.add_argument('--host', help='Specifies the host for the application.', default='127.0.0.1') parser.add_argument('--port', type=int, help='Specifies the port for the application.', default=5000) parser.add_argument('-u', '--users', help='Specifies the address to the users service.', required=True, env_var='USERS_URL') parser.add_argument('-i', '--ip', help='Specifies the address to the ip service.', required=True, env_var='IPDIAG_URL') arguments = parser.parse_args() users_endpoint = arguments.users ip_endpoint = arguments.ip app.run(host=arguments.host, port=arguments.port)
def cli_main(): # Arguments default_config = os.path.join(os.path.split(os.getcwd())[0], 'config.conf') print(default_config) parser = ArgumentParser(description='Pytorch BT', default_config_files=[default_config]) parser.add_argument('-c', '--my-config', required=False, is_config_file=True, help='config file path') parser.add_argument('--finetune', dest='finetune', action='store_true', help='Perform only finetuning (Default: False)') parser.set_defaults(finetune=False) parser.add_argument( '--transfer', dest='transfer', action='store_true', help='Perform transfer learning on linear eval (Default: False)') parser.set_defaults(transfer=False) parser.add_argument('--offline_log', dest='offline_log', action='store_true', help='Do not log online (Default: False)') parser.set_defaults(offline_log=False) parser.add_argument('--pt_checkpoint', type=str, default=None) parser.add_argument('--val_every_n', type=int, default=1) parser.add_argument('--tag', type=str, default=None) parser.add_argument('--resume_ckpt', type=str, default=None) parser.add_argument('--seed', type=int, default=222) parser.add_argument('--project_name', type=str, default=None) # trainer args parser = pl.Trainer.add_argparse_args(parser) # model args parser = BT.add_model_specific_args(parser) parser = SSLLinearEval.add_model_specific_args(parser) args = parser.parse_args() seed_everything(args.seed) args.status = 'Finetune' args.batch_size = args.ft_batch_size # Get DataModule dm, ft_dm, args = get_dm(args) neptune_logger = NeptuneLogger( offline_mode=args.offline_log, api_key=None, project_name=args.project_name, experiment_name='Testing', # Optional, params=vars(args), # Optional, tags=["Test", args.tag], # Optional, upload_source_files=['src/*.py'], close_after_fit=False) # Define model model = BT(**args.__dict__) load_log_file = os.path.join(os.getcwd(), 'log_files.txt') log_dirs = np.genfromtxt(load_log_file, delimiter=" ", dtype='str') print("\n\n Log Dir: {}\n\n".format(log_dirs)) ft_model_dir = log_dirs[1] checkpoint_path = log_dirs[2] print("Loading checkpoint: {}".format(checkpoint_path)) ft_model_checkpoint = pl.callbacks.ModelCheckpoint(filepath=(ft_model_dir + '/'), save_top_k=1, monitor='val_loss') encoder = BT.load_from_checkpoint(checkpoint_path, strict=False) if args.accelerator == 'ddp' or args.accelerator == 'ddp2': replace_sampler = True # False if args.accelerator == 'ddp': args.effective_bsz = args.ft_batch_size * args.num_nodes * args.gpus elif args.accelerator == 'ddp2': args.effective_bsz = args.ft_batch_size * args.num_nodes else: replace_sampler = True args.effective_bsz = args.ft_batch_size ft_model = SSLLinearEval(encoder.encoder_online, **args.__dict__) trainer_ft = pl.Trainer.from_argparse_args( args, max_epochs=args.ft_epochs, logger=neptune_logger, callbacks=[FTPrintingCallback(ft_model_dir, args)], deterministic=True, checkpoint_callback=False, fast_dev_run=False, sync_batchnorm=True, track_grad_norm=-1, replace_sampler_ddp=replace_sampler, progress_bar_refresh_rate=args.print_freq) if trainer_ft.local_rank == 0: if not args.offline_log: print("Experiment: {}".format(str(trainer_ft.logger.experiment))) log_dirs = np.append( log_dirs, str(trainer_ft.logger.experiment).split('(')[1][:-1]) save_log_file = os.path.join(os.getcwd(), 'log_files.txt') np.savetxt(save_log_file, log_dirs, delimiter=" ", fmt="%s") # Fit trainer_ft.fit(ft_model, ft_dm) if args.save_checkpoint: neptune_logger.experiment.log_artifact( os.path.join(ft_model_dir, os.listdir(ft_model_dir + '/')[-1]), os.path.join('finetune/', os.listdir(ft_model_dir + '/')[-1])) neptune_logger.experiment.stop()
import requests import pytest from collections import namedtuple from enum import Enum from configargparse import ArgumentParser parser = ArgumentParser(default_config_files=["tests/test.conf"]) parser.add_argument("--lambda-uri", required=True, type=str, help="http://lambda.aws.com") parser.add_argument("--lambda-api", required=True, type=str, help="/dev/v1/blah") config = parser.parse_args() Kitten = namedtuple("Kitten", "name age") TEST_KITTEN = Kitten("Cleopatra", 3) class Headers(Enum): APPLICATION_JSON = {"Content-Type": "application/json"} class TestKittenCRUD: @pytest.fixture def url(self): return f"{config.lambda_uri}{config.lambda_api}" def test_post(self, url):