def run_test(args): """ Uses the following arguments: args.backup folder - folder path of the format "train_res18_taskonomy_2019-10-10_15:54:09" args.dataset - name of the dataset args.arch - Architecture of the model :param args: :return: """ # Read the argument values model_name = args.arch dataset = args.dataset experiment_backup_folder = args.backup_output_dir # Backup Folder - save logs, stats and TensorBoard logging here. # Get model, val_loader model = get_model(model_name, args) if torch.cuda.is_available(): model.cuda() val_loader = get_loader(args, split='val', out_name=True) # Define the path where models reside path_folder_models = experiment_backup_folder + "/savecheckpoint/" test_saved_models(path_folder_models, model, val_loader, args)
def test_one_checkpoint(path_model, model_name, task_set_whole, test_task_set, test_batch_size, steps, debug, epsilon, step_size, output_dir='./', norm_type='Linf', dataset="taskonomy"): parser = argparse.ArgumentParser( description='Run Experiments with Checkpoint Models') args = parser.parse_args() args.dataset = dataset args.arch = model_name import socket, json config_file_path = "config/{}_{}_config.json".format( args.arch, args.dataset) with open(config_file_path) as config_file: config = json.load(config_file) if socket.gethostname() == "deep": args.data_dir = config['data-dir_deep'] elif socket.gethostname() == "amogh": args.data_dir = config['data-dir_amogh'] elif socket.gethostname() == 'hulk': args.data_dir = '/local/rcs/ECCV/Taskonomy/taskonomy-sample-model-1-small-master/' else: args.data_dir = config['data-dir'] args.task_set = task_set_whole args.test_task_set = test_task_set # args.step_size = step_size args.test_batch_size = test_batch_size args.classes = config['classes'] # args.epsilon = epsilon args.workers = config['workers'] args.pixel_scale = config['pixel_scale'] args.steps = steps args.debug = debug args.epsilon = epsilon args.step_size = step_size # ADDED FOR CITYSCAPES args.random_scale = config['random-scale'] args.random_rotate = config['random-rotate'] args.crop_size = config['crop-size'] args.list_dir = config['list-dir'] num_being_tested = len(test_task_set) print("PRINTING ARGUMENTS \n") for k, v in args.__dict__.items( ): # Prints arguments and contents of config file print(k, ':', v) dict_args = vars(args) dict_summary = {} dict_summary['config'] = dict_args dict_summary['results'] = {} dict_model_summary = {} model_checkpoint_name = path_model.split('/') path_folder_experiment_summary = os.path.join( output_dir, 'test_summary' + model_checkpoint_name[0]) if not os.path.exists(path_folder_experiment_summary): os.makedirs(path_folder_experiment_summary) model = get_model(model_name, args) if torch.cuda.is_available(): model.cuda() val_loader = get_loader(args, split='val', out_name=False) print("=> Loading checkpoint '{}'".format(path_model)) if torch.cuda.is_available(): checkpoint_model = torch.load(path_model) else: checkpoint_model = torch.load( path_model, map_location=lambda storage, loc: storage) start_epoch = checkpoint_model['epoch'] epoch = checkpoint_model['epoch'] # best_prec = checkpoint_model['best_prec'] model.load_state_dict(checkpoint_model['state_dict']) # , strict=False print('epoch is {}'.format(epoch)) # Initialise the data structures in which we are going to save the statistics # Assign the variables that would be used ny eval function # Mtask_forone_grad → returns the avg gradient for that task during validation. from models.mtask_losses import get_losses_and_tasks # taskonomy_loss, losses, criteria, taskonomy_tasks = get_losses_and_tasks(args) criteria, taskonomy_tasks = get_losses_and_tasks(args) info = get_info(args.dataset) # mtask_forone_advacc → Calculates the IoU but does not return it. from learning.mtask_validate import mtask_test_clean advacc_result = mtask_forone_advacc(val_loader, model, criteria, args.test_task_set, args, info, epoch, test_flag=True, norm=norm_type) dict_model_summary['advacc'] = advacc_result dict_summary['results'][path_model] = dict_model_summary # break # show_loss_plot(dict_summary) timestamp = datetime.datetime.fromtimestamp( time.time()).strftime('%Y-%m-%d_%H:%M:%S') path_summary_json = "summary_" + args.arch + "_" + args.dataset + "_" + timestamp + '.json' path_summary_json = os.path.join(path_folder_experiment_summary, path_summary_json) with open(path_summary_json, 'w') as fp: json.dump(dict_summary, fp, indent=4, separators=(',', ': '), sort_keys=True) fp.write('\n') print("json Dumped at", path_summary_json) return dict_model_summary
def test_ensemble(path_model_list, model_name, task_set_whole_list, test_task_set, test_batch_size, steps, debug, epsilon, step_size, dataset="taskonomy", default_suffix="/savecheckpoint/checkpoint_150.pth.tar", use_noise=True, momentum=False, use_houdini=False): print('task_set_whole_list', task_set_whole_list) print('test_task_set', test_task_set) for i, each in enumerate(path_model_list): path_model_list[i] = each + default_suffix parser = argparse.ArgumentParser( description='Run Experiments with Checkpoint Models') args = parser.parse_args() args.dataset = dataset args.arch = model_name args.use_noise = use_noise args.momentum = momentum import socket, json config_file_path = "config/{}_{}_config.json".format( args.arch, args.dataset) with open(config_file_path) as config_file: config = json.load(config_file) if socket.gethostname() == "deep": args.data_dir = config['data-dir_deep'] elif socket.gethostname() == 'hulk': args.data_dir = '/local/rcs/ECCV/Cityscape/cityscape_dataset' else: args.data_dir = config['data-dir'] args.task_set = task_set_whole_list args.test_task_set = test_task_set args.test_batch_size = test_batch_size args.classes = config['classes'] args.workers = config['workers'] args.pixel_scale = config['pixel_scale'] args.steps = steps args.debug = debug args.epsilon = epsilon args.step_size = step_size # ADDED FOR CITYSCAPES args.random_scale = config['random-scale'] args.random_rotate = config['random-rotate'] args.crop_size = config['crop-size'] args.list_dir = config['list-dir'] num_being_tested = len(test_task_set) print("PRINTING ARGUMENTS \n") for k, v in args.__dict__.items( ): # Prints arguments and contents of config file print(k, ':', v) dict_args = vars(args) dict_summary = {} dict_summary['config'] = dict_args dict_summary['results'] = {} dict_model_summary = {} model_list = [] criteria_list = [] task_list_set = [] for each, path_model in zip(task_set_whole_list, path_model_list): model = get_submodel_ensemble(model_name, args, each) if torch.cuda.is_available(): model.cuda() print("=> Loading checkpoint '{}'".format(path_model)) if torch.cuda.is_available(): checkpoint_model = torch.load(path_model) else: checkpoint_model = torch.load( path_model, map_location=lambda storage, loc: storage) model.load_state_dict(checkpoint_model['state_dict']) # , strict=False model_list.append(model) from models.mtask_losses import get_losses_and_tasks print("each ", each) criteria, taskonomy_tasks = get_losses_and_tasks( args, customized_task_set=each) print("criteria got", criteria) criteria_list.append(criteria) task_list_set.extend(taskonomy_tasks) task_list_set = list(set(task_list_set)) # print('dataloader will load these tasks', task_list_set) val_loader = get_loader(args, split='val', out_name=False, customized_task_set=task_list_set) from models.ensemble import Ensemble model_whole = Ensemble(model_list) from learning.test_ensemble import mtask_ensemble_test # mtask_ensemble_test(val_loader, model_ensemble, criterion_list, task_name, args, info) # print('mid test task', args.test_task_set) advacc_result = mtask_ensemble_test(val_loader, model_whole, criteria_list, args.test_task_set, args, use_houdini=use_houdini) print( "Results: epsilon {} step {} step_size {} Acc for task {} ::".format( args.epsilon, args.steps, args.step_size, args.test_task_set), advacc_result)
def train_seg_adv(args): batch_size = args.batch_size num_workers = args.workers crop_size = args.crop_size print(' '.join(sys.argv)) for k, v in args.__dict__.items(): print(k, ':', v) single_model = DRNSeg(args.arch, args.classes, None, pretrained=True) if args.pretrained and args.loading: print('args.pretrained', args.pretrained) single_model.load_state_dict(torch.load(args.pretrained)) out_dir = 'output/{}_{:03d}_{}'.format(args.arch, 0, args.phase) model = torch.nn.DataParallel(single_model) criterion = nn.NLLLoss(ignore_index=255) if torch.cuda.is_available(): model.cuda() criterion.cuda() # Data loading code info = get_info(args.dataset) train_loader = get_loader(args, "train") val_loader = get_loader(args, "val", out_name=True) adv_val_loader = get_loader(args, "adv_val", out_name=True) # define loss function (criterion) and pptimizer optimizer = torch.optim.SGD(single_model.optim_parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) cudnn.benchmark = True best_prec1 = 0 start_epoch = 0 # Backup files before resuming/starting training backup_output_dir = args.backup_output_dir if os.path.exists(backup_output_dir): timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S') experiment_backup_folder = "adv_train_" + args.arch + "_" + args.dataset + "_" + timestamp experiment_backup_folder = os.path.join(backup_output_dir, experiment_backup_folder) print(experiment_backup_folder) shutil.copytree('.', experiment_backup_folder, ignore=include_patterns('*.py', '*.json')) else: experiment_backup_folder = "" print("backup_output_dir does not exist") #Logging with TensorBoard log_dir = experiment_backup_folder+"/runs/" writer = SummaryWriter(log_dir=log_dir) val_writer = SummaryWriter(log_dir=log_dir+'/validate_runs/') fh = logging.FileHandler(experiment_backup_folder + '/log.txt') fh.setLevel(logging.DEBUG) logger.addHandler(fh) # optionally resume from a checkpoint if args.resume: print("resuming", args.resume) if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) print("=> loaded checkpoint '{}' (epoch {})" .format(args.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.resume)) if args.evaluate: validate(val_loader, model, criterion,args=args, log_dir=experiment_backup_folder, eval_score=accuracy,info=info) return for epoch in range(start_epoch, args.epochs): lr = adjust_learning_rate(args, optimizer, epoch) logger.info('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr)) # train for one epoch adv_train(train_loader, model, criterion, optimizer, epoch, args, info, writer, args.dataset, eval_score=accuracy) # evaluate on validation set #TODO: definitely uncomment this. prec = validate(val_loader, model, criterion, args=args, log_dir=experiment_backup_folder, eval_score=accuracy, info=info, epoch=epoch, writer=val_writer) #To see the accuracy on clean images as well. from learning.validate import validate_adv mAP = validate_adv(adv_val_loader, model, args.classes, save_vis=True, log_dir=experiment_backup_folder, has_gt=True, output_dir=out_dir, downsize_scale=args.downsize_scale, args=args, info=info, writer=val_writer, epoch=epoch) logger.info('adv mAP: %f', mAP) # writer.add_scalar('Adv_Validate/prec', prec, epoch) # writer.add_scalar('Adv_Validate/mAP', mAP, epoch) is_best = mAP > best_prec1 if is_best: best_prec1 = max(mAP, best_prec1) # checkpoint_path = 'checkpoint_latest.pth.tar' save_model_path = os.path.join(experiment_backup_folder, 'savecheckpoint') os.makedirs(save_model_path, exist_ok=True) checkpoint_path = os.path.join(save_model_path, 'checkpoint_latest.pth.tar') save_checkpoint({ 'epoch': epoch + 1, 'arch': args.arch, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, }, is_best, filename=checkpoint_path,save_model_path = save_model_path) if (epoch + 1) % 10 == 0: # history_path = os.path.join(save_model_path, 'checkpoint_{:03d}.pth.tar'.format(epoch + 1)) history_path = os.path.join(save_model_path, 'checkpoint_{:03d}.pth.tar'.format(epoch + 1)) shutil.copyfile(checkpoint_path, history_path) writer.close()
def test_seg(args): batch_size = args.batch_size num_workers = args.workers phase = args.phase for k, v in args.__dict__.items(): print(k, ':', v) # model specific model_arch = args.arch task_set_present = hasattr(args, 'task_set') # if (model_arch.startswith('drn')): # if task_set_present: # from models.DRNSegDepth import DRNSegDepth # print("LENGTH OF TASK SET IN CONFIG>1 => LOADING DRNSEGDEPTH model for multitask, to load DRNSEG, remove the task_set from config args.") # single_model = DRNSegDepth(args.arch, # classes=19, # pretrained_model=None, # pretrained=False, # tasks=args.task_set) # else: # single_model = DRNSeg(args.arch, args.classes, pretrained_model=None, # pretrained=False) # elif (model_arch.startswith('fcn32')): # # define the architecture for FCN. # single_model = FCN32s(args.classes) # else: single_model = DRNSeg(args.arch, args.classes, pretrained_model=None, pretrained=False) # Replace with some other model print("Architecture unidentifiable, please choose between : fcn32s, dnn_") if args.pretrained: print('args.pretrained', args.pretrained) single_model.load_state_dict(torch.load(args.pretrained)) model = torch.nn.DataParallel(single_model) if torch.cuda.is_available(): model.cuda() data_dir = args.data_dir # info = json.load(open(join(data_dir, 'info.json'), 'r')) # normalize = transforms.Normalize(mean=info['mean'], std=info['std']) # scales = [0.5, 0.75, 1.25, 1.5, 1.75] # if args.ms: # dataset = SegListMS(data_dir, phase, transforms.Compose([ # transforms.ToTensor(), # normalize, # ]), scales, list_dir=args.list_dir) # else: # # dataset = SegList(data_dir, phase, transforms.Compose([ # transforms.ToTensor(), # normalize, # ]), list_dir=args.list_dir, out_name=True) # test_loader = torch.utils.data.DataLoader( # dataset, # batch_size=batch_size, shuffle=False, num_workers=num_workers, # pin_memory=False # ) test_loader = get_loader(args, phase, out_name=True) info = get_info(args.dataset) cudnn.benchmark = True # Backup files before resuming/starting training backup_output_dir = args.backup_output_dir os.makedirs(backup_output_dir, exist_ok=True) if os.path.exists(backup_output_dir): timestamp = datetime.datetime.fromtimestamp( time.time()).strftime('%Y-%m-%d_%H:%M:%S') experiment_backup_folder = "test_" + args.arch + "_" + args.dataset + "_" + timestamp experiment_backup_folder = os.path.join(backup_output_dir, experiment_backup_folder) os.makedirs(experiment_backup_folder) print(experiment_backup_folder) fh = logging.FileHandler(experiment_backup_folder + '/log.txt') fh.setLevel(logging.DEBUG) logger.addHandler(fh) # optionally resume from a checkpoint start_epoch = 0 if args.resume: if os.path.isfile(args.resume): logger.info("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) logger.info("=> loaded checkpoint '{}' (epoch {})".format( args.resume, checkpoint['epoch'])) else: logger.info("=> no checkpoint found at '{}'".format(args.resume)) # Make sure the name of the dataset and model are included in the output file. out_dir = 'output/{}_{:03d}_{}'.format(args.arch, start_epoch, phase) if len(args.test_suffix) > 0: out_dir += '_' + args.test_suffix if args.ms: out_dir += '_ms' if args.adv_test: from learning.validate import validate_adv_test mAP = validate_adv_test(test_loader, model, args.classes, save_vis=True, has_gt=True, output_dir=out_dir, downsize_scale=args.downsize_scale, args=args, info=info) elif args.ms: mAP = test_ms(test_loader, model, args.classes, save_vis=True, has_gt=phase != 'test' or args.with_gt, output_dir=out_dir, scales=scales) else: if args.test_acc_output_dim: test_drnseg_masked_attack(test_loader, model, args.classes, save_vis=True, has_gt=phase != 'test' or args.with_gt, output_dir=out_dir, downsize_scale=args.downsize_scale, args=args) # test_masked_accuracy_outdim(test_loader, model, args.classes, save_vis=True, # has_gt=phase != 'test' or args.with_gt, output_dir=out_dir, # downsize_scale=args.downsize_scale, # args=args) else: mAP = test_grad_diffoutdim(test_loader, model, args.classes, save_vis=True, has_gt=phase != 'test' or args.with_gt, output_dir=out_dir, downsize_scale=args.downsize_scale, args=args) logger.info('mAP: %f', mAP)
def test_seg(args): batch_size = args.batch_size num_workers = args.workers phase = args.phase for k, v in args.__dict__.items(): print(k, ':', v) # model specific model_arch = args.arch if (model_arch.startswith('drn')): single_model = DRNSeg(args.arch, args.classes, pretrained_model=None, pretrained=False) elif (model_arch.startswith('fcn32')): # define the architecture for FCN. single_model = FCN32s(args.classes) else: single_model = DRNSeg( args.arch, args.classes, pretrained_model=None, pretrained=False) # Replace with some other model print( "Architecture unidentifiable, please choose between : fcn32s, dnn_" ) model = torch.nn.DataParallel(single_model) print('loading model from path : ', args.pretrained) if '.tar' in args.pretrained: model_load = torch.load(args.pretrained) # print('model load', model_load.keys()) print('model epoch', model_load['epoch'], 'precision', model_load['best_prec1']) model.load_state_dict(model_load['state_dict']) else: print(torch.load(args.pretrained).keys()) model.load_state_dict(torch.load(args.pretrained)) if torch.cuda.is_available(): model.cuda() test_loader = get_loader(args, phase, out_name=True) info = get_info(args.dataset) cudnn.benchmark = True if args.adv_test: # if args.select_class: mAP = eval_adv(test_loader, model, args.classes, args=args, info=info, eval_score=accuracy, calculate_specified_only=args.select_class) # from learning.validate import validate_adv_test # mAP = validate_adv_test(test_loader, model, args.classes, save_vis=True, # has_gt=True, output_dir=None, downsize_scale=args.downsize_scale, # args=args, info=info) elif args.select_class: test_selected_class_grad(test_loader, model, args.classes, args) else: mAP = test_mask_rand(test_loader, model, args.classes, save_vis=True, has_gt=phase != 'test' or args.with_gt, output_dir=None, downsize_scale=args.downsize_scale, args=args)