def main(): torch.manual_seed(args.seed) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices use_gpu = torch.cuda.is_available() sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt')) print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU (GPU is highly recommended)") print('Initializing image data manager') dm = DataManager(args, use_gpu) trainloader, testloader = dm.return_dataloaders() model = Model(scale_cls=args.scale_cls, num_classes=args.num_classes) # load the model checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['state_dict']) print("Loaded checkpoint from '{}'".format(args.resume)) if use_gpu: model = model.cuda() test(model, testloader, use_gpu)
def main(): torch.manual_seed(args.seed) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices use_gpu = torch.cuda.is_available() args.save_dir = osp.join(args.save_dir, str(args.nExemplars) + '-shot') sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt')) print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU (GPU is highly recommended)") print('Initializing image data manager') dm = DataManager(args, use_gpu) trainloader, testloader = dm.return_dataloaders() model = Model(args) criterion = CrossEntropyLoss() optimizer = init_optimizer(args.optim, model.parameters(), args.lr, args.weight_decay) if use_gpu: model = model.cuda() start_time = time.time() train_time = 0 best_acc = -np.inf best_epoch = 0 print("==> Start training") for epoch in range(args.max_epoch): learning_rate = adjust_learning_rate(optimizer, epoch, args.LUT_lr) start_train_time = time.time() train(epoch, model, criterion, optimizer, trainloader, learning_rate, use_gpu) train_time += round(time.time() - start_train_time) if epoch == 0 or epoch > (args.stepsize[0] - 1) or (epoch + 1) % 10 == 0: acc = test(model, testloader, use_gpu) is_best = acc > best_acc if is_best: best_acc = acc best_epoch = epoch + 1 save_checkpoint( { 'state_dict': model.state_dict(), 'acc': acc, 'epoch': epoch, }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar')) print("==> Test 5-way Best accuracy {:.2%}, achieved at epoch {}". format(best_acc, best_epoch)) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) train_time = str(datetime.timedelta(seconds=train_time)) print( "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.". format(elapsed, train_time)) print("==========\nArgs:{}\n==========".format(args))
def main(test_dataset_name): args.dataset = test_dataset_name torch.manual_seed(args.seed) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices use_gpu = torch.cuda.is_available() sys.stdout = Logger( osp.join( args.save_dir, f'log_test_{args.dataset}_{args.nKnovel}way_{args.nExemplars}shot16.txt' )) print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU (GPU is highly recommended)") print('Initializing image data manager') dm = DataManager(args, use_gpu) trainloader, testloader = dm.return_dataloaders() # if model_type == 'FM_LRPcos': # model = ModelwithFMLRPcos(scale_cls=args.scale_cls, num_classes=args.num_classes) # elif model_type == 'FM_LRPattcos': # model = ModelwithFMLRPAttendedcos(scale_cls=args.scale_cls, num_classes=args.num_classes) # else: # model = ModelwithLRP(scale_cls=args.scale_cls, num_classes=args.num_classes) model = Model(scale_cls=args.scale_cls, num_classes=args.num_classes) # load the model checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['state_dict']) # print(checkpoint['state_dict']) print("Loaded checkpoint from '{}'".format(args.resume)) if use_gpu: model = model.cuda() test(model, testloader, use_gpu)
def main(): torch.manual_seed(args.seed) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices use_gpu = torch.cuda.is_available() sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt')) print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU (GPU is highly recommended)") print('Initializing image data manager') dm = DataManager(args, use_gpu) trainloader, testloader = dm.return_dataloaders() #print(args.scale_cls,args.num_classes) #exit(0) GNN_model = create_models(args, 512) if args.use_similarity: model = Model(args, GNN_model, scale_cls=args.scale_cls, num_classes=args.num_classes) else: model = Model(scale_cls=args.scale_cls, num_classes=args.num_classes) if only_test: params = torch.load('result/%s/CAM/1-shot-seed112/%s' % (args.dataset, 'best_model.pth.tar')) print(type(params)) #exit(0) #for key in params.keys(): #print(type(key)) #exit(0) model.load_state_dict(params['state_dict']) #exit(0) criterion = CrossEntropyLoss() optimizer = init_optimizer(args.optim, model.parameters(), args.lr, args.weight_decay) if use_gpu: model = model.cuda() start_time = time.time() train_time = 0 best_acc = -np.inf best_epoch = 0 print("==> Start training") for epoch in range(args.max_epoch): learning_rate = adjust_learning_rate(optimizer, epoch, args.LUT_lr) start_train_time = time.time() #exit(0) #print(not True) #exit(0) if not only_test: #print(';;;;;;;;;;;') #exit(0) train(epoch, model, criterion, optimizer, trainloader, learning_rate, use_gpu) train_time += round(time.time() - start_train_time) if epoch == 0 or epoch > (args.stepsize[0] - 1) or (epoch + 1) % 10 == 0: print('enter test code') #exit(0) acc = test(model, testloader, use_gpu) is_best = acc > best_acc #print(acc) #exit(0) if is_best: best_acc = acc best_epoch = epoch + 1 if not only_test: save_checkpoint( { 'state_dict': model.state_dict(), 'acc': acc, 'epoch': epoch, }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar')) print("==> Test 5-way Best accuracy {:.2%}, achieved at epoch {}". format(best_acc, best_epoch)) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) train_time = str(datetime.timedelta(seconds=train_time)) print( "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.". format(elapsed, train_time)) print("==========\nArgs:{}\n==========".format(args))
def main(): torch.manual_seed(args.seed) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices use_gpu = torch.cuda.is_available() model_name = '{}-{}shot-{}kernel-{}group'.format(args.dataset, args.nExemplars, args.kernel, args.groups) if args.suffix is not None: model_name = model_name + '-{}'.format(args.suffix) save_dir = os.path.join(args.save_dir, model_name) os.makedirs(save_dir, exist_ok=True) sys.stdout = Logger(osp.join(save_dir, 'log_train.txt')) print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU (GPU is highly recommended)") print('Initializing image data manager') dm = DataManager(args, use_gpu) trainloader, testloader = dm.return_dataloaders() model = Model(num_classes=args.num_classes, groups=args.groups, kernel=args.kernel) #model = nn.DataParallel(model) criterion = CrossEntropyLoss() optimizer = init_optimizer(args.optim, model.parameters(), args.lr, args.weight_decay) if use_gpu: model = model.cuda() start_time = time.time() train_time = 0 best_acc = -np.inf best_epoch = 0 print("==> Start training") warmup_epoch = 5 scheduler = warmup_scheduler(base_lr=args.lr, iter_per_epoch=len(trainloader), max_epoch=args.max_epoch + warmup_epoch, multi_step=[], warmup_epoch=warmup_epoch) for epoch in range(args.max_epoch + warmup_epoch): start_train_time = time.time() train(epoch, model, criterion, optimizer, trainloader, scheduler, use_gpu) train_time += round(time.time() - start_train_time) if epoch == 0 or epoch > (args.stepsize[0] - 1) or (epoch + 1) % 10 == 0: acc = test(model, testloader, use_gpu) is_best = acc > best_acc if is_best: best_acc = acc best_epoch = epoch + 1 if is_best or epoch > args.max_epoch: save_checkpoint( { 'state_dict': model.state_dict(), 'acc': acc, 'epoch': epoch, }, is_best, osp.join(save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar')) print("==> Test 5-way Best accuracy {:.2%}, achieved at epoch {}". format(best_acc, best_epoch)) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) train_time = str(datetime.timedelta(seconds=train_time)) print( "Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.". format(elapsed, train_time)) print("==========\nArgs:{}\n==========".format(args))
use_gpu = torch.cuda.is_available() args.resume = 'please define your model path' with open('path to /class_to_readablelabel.json', 'r') as f: class_to_readable = json.load(f) sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt')) print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU (GPU is highly recommended)") print('Initializing image data manager') dm = DataManager(args, use_gpu) trainloader, testloader = dm.return_dataloaders() model = net.Model(scale_cls=args.scale_cls, num_classes=args.num_classes) # load the model checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['state_dict']) # print(checkpoint['state_dict']) print("Loaded checkpoint from '{}'".format(args.resume)) if use_gpu: model = model.cuda() model.eval() base = model.base preset = lrp_presets.SequentialPresetA() lrp_wrapper.add_lrp(base, preset=preset)