def run(args: DictConfig) -> None: cuda_available = torch.cuda.is_available() torch.manual_seed(args.seed) device = "cuda" if cuda_available and args.device == 'cuda' else "cpu" n_classes = args.get(args.dataset).n_classes rep_size = args.get(args.dataset).rep_size margin = args.get(args.dataset).margin classifier = load_pretrained_model(args) if args.dataset == 'tiny_imagenet': args.data_dir = 'tiny_imagenet' sdim = SDIM(disc_classifier=classifier, n_classes=n_classes, rep_size=rep_size, mi_units=args.mi_units, margin=margin, alpha=args.alpha, beta=args.beta, gamma=args.gamma).to(args.device) optimizer = Adam(sdim.parameters(), lr=args.learning_rate) if args.inference: save_name = 'SDIM_{}.pth'.format(args.classifier_name) sdim.load_state_dict( torch.load(save_name, map_location=lambda storage, loc: storage)) thresholds1, thresholds2 = extract_thresholds(sdim, args) clean_eval(sdim, args, thresholds1, thresholds2) else: train(sdim, optimizer, args)
def run(args: DictConfig) -> None: cuda_available = torch.cuda.is_available() torch.manual_seed(args.seed) device = "cuda" if cuda_available and args.device == 'cuda' else "cpu" n_classes = args.get(args.dataset).n_classes if args.dataset == 'tiny_imagenet': args.epochs = 20 args.learning_rate = 0.001 classifier = get_model_for_tiny_imagenet(args.classifier_name, n_classes).to(device) args.data_dir = 'tiny_imagenet' else: classifier = get_model(name=args.classifier_name, n_classes=n_classes).to(device) # if device == 'cuda' and args.n_gpu > 1: # classifier = torch.nn.DataParallel(classifier, device_ids=list(range(args.n_gpu))) logger.info('Base classifier name: {}, # parameters: {}'.format( args.classifier_name, cal_parameters(classifier))) data_dir = hydra.utils.to_absolute_path(args.data_dir) train_data = get_dataset(data_name=args.dataset, data_dir=data_dir, train=True, crop_flip=True) test_data = get_dataset(data_name=args.dataset, data_dir=data_dir, train=False, crop_flip=False) train_loader = DataLoader(dataset=train_data, batch_size=args.n_batch_train, shuffle=True) test_loader = DataLoader(dataset=test_data, batch_size=args.n_batch_test, shuffle=False) if args.inference: save_name = '{}.pth'.format(args.classifier_name) classifier.load_state_dict( torch.load(save_name, map_location=lambda storage, loc: storage)) loss, acc = run_epoch(classifier, test_loader, args) logger.info('Inference loss: {:.4f}, acc: {:.4f}'.format(loss, acc)) else: train(classifier, train_loader, test_loader, args)