return mae class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count if __name__ == '__main__': tuner_params = nni.get_next_parameter() logger.debug(tuner_params) params = vars(merge_parameter(return_args, tuner_params)) print(params) main(params)
type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--no_cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument( '--log_interval', type=int, default=1000, metavar='N', help='how many batches to wait before logging training status') args, _ = parser.parse_known_args() return args if __name__ == '__main__': try: # get parameters form tuner tuner_params = nni.get_next_parameter() logger.debug(tuner_params) params = vars(merge_parameter(get_params(), tuner_params)) print(params) main(params) except Exception as exception: logger.exception(exception) raise
default='awd', choices=['rnn', 'awd', 'transformer']) # add model specific args parser = LanguageModel.add_model_specific_args(parser) # add args from trainer parser = Trainer.add_argparse_args(parser) hparams = parser.parse_args() tuner_params = nni.get_next_parameter() logger.debug(tuner_params) hparams = merge_parameter(hparams, tuner_params) logger.info(hparams) if tuner_params is None: task = Task.init(project_name="language-model", task_name=hparams.model) else: task = Task.init(project_name="language-model-hp", task_name=hparams.model, continue_last_task=False) task.add_tags(['hp', hparams.model]) datasets, vocab = BRTD.create(hparams.data, vocab=hparams.vocab) hparams.num_tokens = len(vocab)
type=float, help='ignore iou thresh') parser.add_argument('--expand_scale', default=None, type=float, help='image augmentation expand scale') parser.add_argument('--iou_weighting', default=None, type=float, help='iou loss weighting') args = parser.parse_args() return args if __name__ == '__main__': try: # get parameters form tuner tuner_params = nni.get_next_parameter() #logger.debug(tuner_params) print(tuner_params) params = merge_parameter(get_params(), tuner_params) id = get_sequence_id() #params.checkpoint = 'checkpoints/%d' % id #print(params) main(params) except Exception as exception: #logger.exception(exception) raise
image_name = '{}_test_{}.png'.format(test_config.dataset, i) Image.fromarray(cur_input).save( os.path.join(test_config.output_dir, 'input', image_name)) Image.fromarray(cur_label).save( os.path.join(test_config.output_dir, 'label', image_name)) Image.fromarray(cur_output).save( os.path.join(test_config.output_dir, 'output', image_name)) _logger.info("Images successfully saved to " + test_config.output_dir) if __name__ == '__main__': params_from_cl = vars(parse_args()) _, test_params = get_base_params(params_from_cl['dataset'], params_from_cl['checkpoint']) test_params.update(params_from_cl) with open(test_params['parameter_cfg'], 'r') as f: params_from_nni = json.loads(f.readline().strip())['parameters'] test_params = merge_parameter(test_params, params_from_nni) pathlib.Path(params_from_cl['output_dir'] + '/input').mkdir(parents=True, exist_ok=True) pathlib.Path(params_from_cl['output_dir'] + '/label').mkdir(parents=True, exist_ok=True) pathlib.Path(params_from_cl['output_dir'] + '/output').mkdir(parents=True, exist_ok=True) main(test_params)
t_data = iter_start_time - iter_data_time total_iters += train_config.batch_size epoch_iter += train_config.batch_size model.set_input(data) # unpack data from dataset and apply preprocessing model.optimize_parameters() # calculate loss functions, get gradients, update network weights iter_data_time = time.time() _logger.info('End of epoch {} / {} \t Time Taken: {} sec'.format(epoch, train_config.n_epochs + train_config.n_epochs_decay, time.time() - epoch_start_time)) model.save_networks('latest') _logger.info("Training done. Saving the final model.") l1_score = evaluate_L1(test_config, model, test_dataset) _logger.info("The final L1 loss the test set is {}".format(l1_score)) nni.report_final_result(l1_score) if __name__ == '__main__': dataset_name = 'facades' checkpoint_dir = setup_trial_checkpoint_dir() params_from_cl = vars(parse_args()) params_for_tuning = nni.get_next_parameter() train_params, test_params = get_base_params(dataset_name, checkpoint_dir) train_params.update(params_from_cl) test_params.update(params_from_cl) train_params = merge_parameter(train_params, params_for_tuning) main(dataset_name, train_params, test_params)