def test_model(args): """ Main test routine. Parameters: ---------- args : ArgumentParser Main script arguments. Returns: ------- float Main accuracy value. """ ds_metainfo = prepare_dataset_metainfo(args=args) use_cuda, batch_size = prepare_pt_context( num_gpus=args.num_gpus, batch_size=args.batch_size) data_source = prepare_data_source( ds_metainfo=ds_metainfo, data_subset=args.data_subset, batch_size=batch_size, num_workers=args.num_workers) metric = prepare_metric( ds_metainfo=ds_metainfo, data_subset=args.data_subset) net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), use_cuda=use_cuda, num_classes=(args.num_classes if ds_metainfo.ml_type != "hpe" else None), in_channels=args.in_channels, net_extra_kwargs=ds_metainfo.test_net_extra_kwargs, load_ignore_extra=ds_metainfo.load_ignore_extra, remove_module=args.remove_module) input_image_size = update_input_image_size( net=net, input_size=(args.input_size if hasattr(args, "input_size") else None)) if args.show_progress: from tqdm import tqdm data_source = tqdm(data_source) assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only) acc_values = calc_model_accuracy( net=net, test_data=data_source, metric=metric, use_cuda=use_cuda, input_image_size=input_image_size, in_channels=args.in_channels, calc_weight_count=True, calc_flops=args.calc_flops, calc_flops_only=args.calc_flops_only, extended_log=True, ml_type=ds_metainfo.ml_type) return acc_values[ds_metainfo.saver_acc_ind] if len(acc_values) > 0 else None
def main(): args = parse_args() _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) use_cuda, batch_size = prepare_pt_context(num_gpus=args.num_gpus, batch_size=1) net = prepare_model(model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), use_cuda=use_cuda, net_extra_kwargs={ "aux": False, "fixed_size": False }, load_ignore_extra=True, remove_module=args.remove_module) if hasattr(net, 'module'): input_image_size = net.module.in_size[0] if hasattr( net.module, 'in_size') else args.input_size else: input_image_size = net.in_size[0] if hasattr( net, 'in_size') else args.input_size test_data = get_test_data_loader(dataset_name=args.dataset, dataset_dir=args.data_dir, batch_size=batch_size, num_workers=args.num_workers) assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only) test( net=net, test_data=test_data, use_cuda=use_cuda, # calc_weight_count=(not log_file_exist), input_image_size=(input_image_size, input_image_size), in_channels=args.in_channels, num_classes=args.num_classes, calc_weight_count=True, calc_flops=args.calc_flops, calc_flops_only=args.calc_flops_only, extended_log=True, dataset_metainfo=get_metainfo(args.dataset))
def main(): args = parse_args() _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) use_cuda, batch_size = prepare_pt_context(num_gpus=args.num_gpus, batch_size=args.batch_size) net = prepare_model(model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), use_cuda=use_cuda, remove_module=args.remove_module) if hasattr(net, 'module'): input_image_size = net.module.in_size[0] if hasattr( net.module, 'in_size') else args.input_size else: input_image_size = net.in_size[0] if hasattr( net, 'in_size') else args.input_size val_data = get_val_data_loader(data_dir=args.data_dir, batch_size=batch_size, num_workers=args.num_workers, input_image_size=input_image_size, resize_inv_factor=args.resize_inv_factor) assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only) test( net=net, val_data=val_data, use_cuda=use_cuda, # calc_weight_count=(not log_file_exist), input_image_size=(input_image_size, input_image_size), in_channels=args.in_channels, calc_weight_count=True, calc_flops=args.calc_flops, calc_flops_only=args.calc_flops_only, extended_log=True)
def main(): """ Main body of script. """ args = parse_args() os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0" assert (args.batch_size == 1) _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset) ds_metainfo.update(args=args) use_cuda, batch_size = prepare_pt_context( num_gpus=args.num_gpus, batch_size=args.batch_size) net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), use_cuda=use_cuda, net_extra_kwargs=ds_metainfo.net_extra_kwargs, load_ignore_extra=False, num_classes=args.num_classes, in_channels=args.in_channels, remove_module=False) test_data = get_val_data_source( ds_metainfo=ds_metainfo, batch_size=args.batch_size, num_workers=args.num_workers) calc_detector_repeatability( test_data=test_data, net=net, use_cuda=use_cuda)
def main(): args = parse_args() _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) use_cuda, batch_size = prepare_pt_context( num_gpus=args.num_gpus, batch_size=args.batch_size) classes = 1000 net = prepare_model( model_name=args.model, classes=classes, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), use_cuda=use_cuda) train_data, val_data = get_data_loader( data_dir=args.data_dir, batch_size=batch_size, num_workers=args.num_workers) assert (args.use_pretrained or args.resume.strip()) test( net=net, val_data=val_data, use_cuda=use_cuda, # calc_weight_count=(not log_file_exist), calc_weight_count=True, calc_flops=args.calc_flops, extended_log=True)
def main(): """ Main body of script. """ args = parse_args() _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset) ds_metainfo.update(args=args) use_cuda, batch_size = prepare_pt_context( num_gpus=args.num_gpus, batch_size=args.batch_size) net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), use_cuda=use_cuda, net_extra_kwargs=ds_metainfo.net_extra_kwargs, load_ignore_extra=ds_metainfo.load_ignore_extra, num_classes=args.num_classes, in_channels=args.in_channels, remove_module=args.remove_module) real_net = net.module if hasattr(net, "module") else net input_image_size = real_net.in_size[0] if hasattr(real_net, "in_size") else args.input_size if args.data_subset == "val": get_test_data_source_class = get_val_data_source test_metric = get_composite_metric( metric_names=ds_metainfo.val_metric_names, metric_extra_kwargs=ds_metainfo.val_metric_extra_kwargs) else: get_test_data_source_class = get_test_data_source test_metric = get_composite_metric( metric_names=ds_metainfo.test_metric_names, metric_extra_kwargs=ds_metainfo.test_metric_extra_kwargs) test_data = get_test_data_source_class( ds_metainfo=ds_metainfo, batch_size=args.batch_size, num_workers=args.num_workers) if not args.not_show_progress: test_data = tqdm(test_data) assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only) test( net=net, test_data=test_data, metric=test_metric, use_cuda=use_cuda, input_image_size=(input_image_size, input_image_size), in_channels=args.in_channels, calc_weight_count=True, calc_flops=args.calc_flops, calc_flops_only=args.calc_flops_only, extended_log=True, show_bad_samples=args.show_bad_samples)
def main(): args = parse_args() args.seed = init_rand(seed=args.seed) _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) use_cuda, batch_size = prepare_pt_context(num_gpus=args.num_gpus, batch_size=args.batch_size) net = prepare_model(model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), use_cuda=use_cuda) if hasattr(net, 'module'): input_image_size = net.module.in_size[0] if hasattr( net.module, 'in_size') else args.input_size else: input_image_size = net.in_size[0] if hasattr( net, 'in_size') else args.input_size train_data, val_data = get_data_loader( data_dir=args.data_dir, batch_size=batch_size, num_workers=args.num_workers, input_image_size=input_image_size, resize_inv_factor=args.resize_inv_factor) # num_training_samples = 1281167 optimizer, lr_scheduler, start_epoch = prepare_trainer( net=net, optimizer_name=args.optimizer_name, wd=args.wd, momentum=args.momentum, lr_mode=args.lr_mode, lr=args.lr, lr_decay_period=args.lr_decay_period, lr_decay_epoch=args.lr_decay_epoch, lr_decay=args.lr_decay, # warmup_epochs=args.warmup_epochs, # batch_size=batch_size, num_epochs=args.num_epochs, # num_training_samples=num_training_samples, state_file_path=args.resume_state) # if start_epoch is not None: # args.start_epoch = start_epoch if args.save_dir and args.save_interval: lp_saver = TrainLogParamSaver( checkpoint_file_name_prefix='imagenet_{}'.format(args.model), last_checkpoint_file_name_suffix="last", best_checkpoint_file_name_suffix=None, last_checkpoint_dir_path=args.save_dir, best_checkpoint_dir_path=None, last_checkpoint_file_count=2, best_checkpoint_file_count=2, checkpoint_file_save_callback=save_params, checkpoint_file_exts=('.pth', '.states'), save_interval=args.save_interval, num_epochs=args.num_epochs, param_names=['Val.Top1', 'Train.Top1', 'Val.Top5', 'Train.Loss'], acc_ind=2, # bigger=[True], # mask=None, score_log_file_path=os.path.join(args.save_dir, 'score.log'), score_log_attempt_value=args.attempt, best_map_log_file_path=os.path.join(args.save_dir, 'best_map.log')) else: lp_saver = None train_net(batch_size=batch_size, num_epochs=args.num_epochs, start_epoch1=args.start_epoch, train_data=train_data, val_data=val_data, net=net, optimizer=optimizer, lr_scheduler=lr_scheduler, lp_saver=lp_saver, log_interval=args.log_interval, use_cuda=use_cuda)
def main(): """ Main body of script. """ args = parse_args() if args.disable_cudnn_autotune: os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0" _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset) ds_metainfo.update(args=args) assert (ds_metainfo.ml_type != "imgseg") or (args.batch_size == 1) assert (ds_metainfo.ml_type != "imgseg") or args.disable_cudnn_autotune use_cuda, batch_size = prepare_pt_context( num_gpus=args.num_gpus, batch_size=args.batch_size) net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), use_cuda=use_cuda, net_extra_kwargs=ds_metainfo.net_extra_kwargs, load_ignore_extra=ds_metainfo.load_ignore_extra, num_classes=args.num_classes, in_channels=args.in_channels, remove_module=args.remove_module) real_net = net.module if hasattr(net, "module") else net input_image_size = real_net.in_size[0] if hasattr(real_net, "in_size") else args.input_size if args.data_subset == "val": get_test_data_source_class = get_val_data_source test_metric = get_composite_metric( metric_names=ds_metainfo.val_metric_names, metric_extra_kwargs=ds_metainfo.val_metric_extra_kwargs) else: get_test_data_source_class = get_test_data_source test_metric = get_composite_metric( metric_names=ds_metainfo.test_metric_names, metric_extra_kwargs=ds_metainfo.test_metric_extra_kwargs) test_data = get_test_data_source_class( ds_metainfo=ds_metainfo, batch_size=args.batch_size, num_workers=args.num_workers) if args.show_progress: from tqdm import tqdm test_data = tqdm(test_data) assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only) test( net=net, test_data=test_data, metric=test_metric, use_cuda=use_cuda, input_image_size=(input_image_size, input_image_size), in_channels=args.in_channels, # calc_weight_count=(not log_file_exist), calc_weight_count=True, calc_flops=args.calc_flops, calc_flops_only=args.calc_flops_only, extended_log=True)
def main(): """ Main body of script. """ args = parse_args() args.seed = init_rand(seed=args.seed) _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) use_cuda, batch_size = prepare_pt_context(num_gpus=args.num_gpus, batch_size=args.batch_size) net = prepare_model(model_name=args.model, use_pretrained=args.use_pretrained, pretrained_model_file_path=args.resume.strip(), use_cuda=use_cuda) real_net = net.module if hasattr(net, "module") else net assert (hasattr(real_net, "num_classes")) num_classes = real_net.num_classes ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset) ds_metainfo.update(args=args) train_data = get_train_data_source(ds_metainfo=ds_metainfo, batch_size=batch_size, num_workers=args.num_workers) val_data = get_val_data_source(ds_metainfo=ds_metainfo, batch_size=batch_size, num_workers=args.num_workers) optimizer, lr_scheduler, start_epoch = prepare_trainer( net=net, optimizer_name=args.optimizer_name, wd=args.wd, momentum=args.momentum, lr_mode=args.lr_mode, lr=args.lr, lr_decay_period=args.lr_decay_period, lr_decay_epoch=args.lr_decay_epoch, lr_decay=args.lr_decay, num_epochs=args.num_epochs, state_file_path=args.resume_state) if args.save_dir and args.save_interval: param_names = ds_metainfo.val_metric_capts + ds_metainfo.train_metric_capts + [ "Train.Loss", "LR" ] lp_saver = TrainLogParamSaver( checkpoint_file_name_prefix="{}_{}".format(ds_metainfo.short_label, args.model), last_checkpoint_file_name_suffix="last", best_checkpoint_file_name_suffix=None, last_checkpoint_dir_path=args.save_dir, best_checkpoint_dir_path=None, last_checkpoint_file_count=2, best_checkpoint_file_count=2, checkpoint_file_save_callback=save_params, checkpoint_file_exts=(".pth", ".states"), save_interval=args.save_interval, num_epochs=args.num_epochs, param_names=param_names, acc_ind=ds_metainfo.saver_acc_ind, # bigger=[True], # mask=None, score_log_file_path=os.path.join(args.save_dir, "score.log"), score_log_attempt_value=args.attempt, best_map_log_file_path=os.path.join(args.save_dir, "best_map.log")) else: lp_saver = None train_net(batch_size=batch_size, num_epochs=args.num_epochs, start_epoch1=args.start_epoch, train_data=train_data, val_data=val_data, net=net, optimizer=optimizer, lr_scheduler=lr_scheduler, lp_saver=lp_saver, log_interval=args.log_interval, num_classes=num_classes, val_metric=get_composite_metric( ds_metainfo.val_metric_names, ds_metainfo.val_metric_extra_kwargs), train_metric=get_composite_metric( ds_metainfo.train_metric_names, ds_metainfo.train_metric_extra_kwargs), use_cuda=use_cuda)
def main(): """ Main body of script. """ args = parse_args() args.seed = init_rand(seed=args.seed) # args.num_gpus = 1 # args.num_epochs = 500 # args.dataset = "CIFAR10" # args.model = "resnet20_cifar10" args.use_pretrained = False num_non_res = 3 pretrained_model_file_path = '/exdrive/resnet20-cifar10/unfrozen-training/by-stack/non-res-stack-3-models/cifar10-non-res-stack-2-teacher.pth' _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=args.log_packages, log_pip_packages=args.log_pip_packages) use_cuda, batch_size = prepare_pt_context(num_gpus=args.num_gpus, batch_size=args.batch_size) # Get model net = prepare_model( model_name=args.model, use_pretrained=args.use_pretrained, # pretrained_model_file_path=pretrained_model_file_path, pretrained_model_file_path=args.resume.strip(), use_cuda=use_cuda, remove_module=True ) # True if using our own trained model waits; o.w. False real_net = net.module if hasattr(net, "module") else net assert (hasattr(real_net, "num_classes")) num_classes = real_net.num_classes ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset) ds_metainfo.update(args=args) train_data = get_train_data_source(ds_metainfo=ds_metainfo, batch_size=batch_size, num_workers=args.num_workers) val_data = get_val_data_source(ds_metainfo=ds_metainfo, batch_size=batch_size, num_workers=args.num_workers) optimizer, lr_scheduler, start_epoch = prepare_trainer( net=net, optimizer_name=args.optimizer_name, wd=args.wd, momentum=args.momentum, lr_mode=args.lr_mode, lr=args.lr, lr_decay_period=args.lr_decay_period, lr_decay_epoch=args.lr_decay_epoch, lr_decay=args.lr_decay, num_epochs=args.num_epochs, state_file_path=args.resume_state) if args.save_dir and args.save_interval: param_names = ds_metainfo.val_metric_capts + ds_metainfo.train_metric_capts + [ "Train.Loss", "LR" ] lp_saver = TrainLogParamSaver( checkpoint_file_name_prefix="{}_{}".format(ds_metainfo.short_label, args.model), last_checkpoint_file_name_suffix="last", best_checkpoint_file_name_suffix=None, last_checkpoint_dir_path=args.save_dir, best_checkpoint_dir_path=None, last_checkpoint_file_count=2, best_checkpoint_file_count=2, checkpoint_file_save_callback=save_params, checkpoint_file_exts=(".pth", ".states"), save_interval=args.save_interval, num_epochs=args.num_epochs, param_names=param_names, acc_ind=ds_metainfo.saver_acc_ind, # bigger=[True], # mask=None, score_log_file_path=os.path.join(args.save_dir, "score.log"), score_log_attempt_value=args.attempt, best_map_log_file_path=os.path.join(args.save_dir, "best_map.log")) else: lp_saver = None print(net) # summary(net, (3, 32, 32)) if torch.cuda.is_available(): print('cuda available - sending net to gpu') print('training using cuda = ', use_cuda) net.cuda() print('\n\nTraining nonresnet20 on cifar10 with num_non_res=', num_non_res) train_net(batch_size=batch_size, num_epochs=args.num_epochs, start_epoch1=args.start_epoch, train_data=train_data, val_data=val_data, net=net, optimizer=optimizer, lr_scheduler=lr_scheduler, lp_saver=lp_saver, log_interval=args.log_interval, num_classes=num_classes, val_metric=get_composite_metric( ds_metainfo.val_metric_names, ds_metainfo.val_metric_extra_kwargs), train_metric=get_composite_metric( ds_metainfo.train_metric_names, ds_metainfo.train_metric_extra_kwargs), use_cuda=use_cuda)