def add(context, args): credentials = context.aws.load_credentials() if credentials.has_section(args.profile): raise HandledError('The AWS profile {} already exists.'.format( args.profile)) # do not accept empty strings if not args.profile: raise HandledError('Cannot create a profile with an empty name') credentials.add_section(args.profile) credentials.set(args.profile, constant.SECRET_KEY_OPTION, args.aws_secret_key) credentials.set(args.profile, constant.ACCESS_KEY_OPTION, args.aws_access_key) context.aws.save_credentials(credentials) context.view.added_profile(args.profile) if (args.make_default): nested_args = Args() nested_args.set = args.profile default(context, nested_args)
def get_model(model_args): model_args_ = model_args if isinstance(model_args, argparse.Namespace): model_args_ = Args(vars(model_args)) return globals().copy()[model_args_.get("model")](model_args_)
def train(save_dir="./sandbox", root_dir="./dataset", exp_name="DemoExperiment", model="efficientdet-d0", lr=1e-4, gpus=1, batch_size=16, pretrained=True, num_class=20, log_save_interval=1, distributed_backend="dp", gradient_clip_val=0.5, max_nb_epochs=3, train_percent_check=1, val_percent_check=1, tb_path="./sandbox/tb", debug=False, loss_fn="BCE", ): """ Run the training experiment. Args: save_dir: Path to save the checkpoints and logs exp_name: Name of the experiment model: Model name gpus: int. (ie: 2 gpus) OR list to specify which GPUs [0, 1] OR '0,1' OR '-1' / -1 to use all available gpus batch_size: int. Number of samples in a batch pretrained: Whether or not to use the pretrained model num_class: Number of classes log_save_interval: Logging saving frequency (in batch) distributed_backend: Distributed computing mode gradient_clip_val: Clip value of gradient norm train_percent_check: Proportion of training data to use max_nb_epochs: Max number of epochs tb_path: Path to global tb folder loss_fn: Loss function to use Returns: None """ args = Args(locals()) init_exp_folder(args) m = Model(args) if debug: train_percent_check = val_percent_check = 0.01 trainer = Trainer(distributed_backend=distributed_backend, gpus=gpus, logger=get_logger(save_dir, exp_name), default_save_path=os.path.join(save_dir, exp_name), log_save_interval=log_save_interval, gradient_clip_val=gradient_clip_val, train_percent_check=train_percent_check, val_percent_check=val_percent_check, max_nb_epochs=max_nb_epochs) trainer.fit(m)
def test_model(self): args = Args({ "model": model_name, "pretrained": pretrained, "gpus": None }) if num_classes is not None: args['num_classes'] = num_classes model = get_model(args) self.assertIsNotNone(model)
def train(save_dir="./sandbox", exp_name="DemoExperiment", model="ResNet18", task='classification', gpus=1, pretrained=True, num_classes=1, accelerator=None, gradient_clip_val=0.5, max_epochs=1, patience=10, limit_train_batches=1.0, tb_path="./sandbox/tb", loss_fn="BCE", weights_summary=None, ): """ Run the training experiment. Args: save_dir: Path to save the checkpoints and logs exp_name: Name of the experiment model: Model name gpus: int. (ie: 2 gpus) OR list to specify which GPUs [0, 1] OR '0,1' OR '-1' / -1 to use all available gpus pretrained: Whether or not to use the pretrained model num_classes: Number of classes accelerator: Distributed computing mode gradient_clip_val: Clip value of gradient norm limit_train_batches: Proportion of training data to use max_epochs: Max number of epochs patience: number of epochs with no improvement after which training will be stopped. tb_path: Path to global tb folder loss_fn: Loss function to use weights_summary: Prints a summary of the weights when training begins. Returns: None """ args = Args(locals()) init_exp_folder(args) task = get_task(args) trainer = Trainer(gpus=gpus, accelerator=accelerator, logger=get_logger(save_dir, exp_name), callbacks=[get_early_stop_callback(patience), get_ckpt_callback(save_dir, exp_name)], weights_save_path=os.path.join(save_dir, exp_name), gradient_clip_val=gradient_clip_val, limit_train_batches=limit_train_batches, weights_summary=weights_summary, max_epochs=max_epochs) trainer.fit(task)
def __execute(command, args): requestId = args['request_id'] if 'request_id' in args else 0 print 'command started - {} with args {}'.format(command, args) metricsInterface = MetricsContext('gui') metricsInterface.set_command_name(command) metricsInterface.submit_attempt() try: argsObj = Args(**args) argsObj.no_prompt = True argsObj.is_gui = True context = Context(metricsInterface, view_class=GuiViewContext) context.bootstrap(argsObj) context.initialize(argsObj) # Deprecated in 1.9. TODO: remove. context.hooks.call_module_handlers( 'cli-plugin-code/resource_commands.py', 'add_gui_commands', args=[command_handlers, argsObj], deprecated=True) context.hooks.call_module_handlers( 'cli-plugin-code/resource_commands.py', 'add_gui_view_commands', args=[context.view], deprecated=True) context.hooks.call_module_handlers( 'resource-manager-code/command.py', 'add_gui_commands', kwargs={'handlers': command_handlers}) context.hooks.call_module_handlers( 'resource-manager-code/command.py', 'add_gui_view_commands', kwargs={'view_context': context.view}) handler = command_handlers.get(command, None) if handler is None: raise HandledError('Unknown command: ' + command) if handler != project.update_framework_version: context.config.verify_framework_version() handler(context, argsObj) context.view.success() metricsInterface.submit_success() except HandledError as e: metricsInterface.submit_failure() msg = str(e) print 'command error - {} when executing command {} with args {}.'.format( msg, command, args) args['view_output_function'](requestId, 'error', msg) except NoCredentialsError: metricsInterface.submit_failure() msg = 'No AWS credentials were provided.' print 'command error - {} when executing command {} with args {}.'.format( msg, command, args) args['view_output_function'](requestId, 'error', msg) except (EndpointConnectionError, IncompleteReadError, ConnectionError, UnknownEndpointError) as e: metricsInterface.submit_failure() msg = 'We were unable to contact your AWS endpoint.\nERROR: {0}'.format( e.message) print 'command error - {} when executing command {} with args {}.'.format( msg, command, args) args['view_output_function'](requestId, 'error', msg) except: metricsInterface.submit_failure() info = sys.exc_info() msg = traceback.format_exception(*info) print 'command error - {} when executing command {} with args {}.'.format( msg, command, args) args['view_output_function'](requestId, 'error', msg) print 'command finished - {} with args {}'.format(command, args)
def run(self): self.parseArgs() # GUI, if -g option if self.args.g: self.gui = gui.GUI(self.shutdown) self.gui.start(self.logFiles) # Reader self.reader = self.readerFactory(self.args[0]) # Everything starts with my load method reactor.callWhenRunning(self.load) # GO! reactor.run() args = Args("HTTP logfile analysis") args('-e', '--exclude', "", "Exclude HTTP code(s) (comma separated list, no spaces)") args('-d', '--ruledir', "~/.logalyzer", "Directory for files containing IP, user-agent, and url exclusion rules") args('-y', '--secondary', "Ignore secondary files (css, webfonts, images)") args('-t', '--timestamp', "Compare logfile timestamps to stored versions in the DB and only "+\ "parse if newer") args('-f', '--load', "", "File of blocked IP addresses to pre-load into the sifter. You can "+\ "specify the same file as the file for blocked IP addresses to be "+\ "saved into (with -s). Preloading will speed things up considerably, "+\ "but don't use it in a run immediately after changing rules.") args('-s', '--save', "", "File in which to save a list of blocked IP addresses, in ascending "+\
def setUp(self): self.BCE_args = Args({"loss_fn": "BCE"}) self.CE_args = Args({"loss_fn": "CE"}) self.non_args = Args({"loss_fn": "NonExistLossFn"})