def test(args): # 1. get dataset train_loader, val_loader, test_loader, class_list = return_dataset(args) # 2. generator if args.net == 'resnet50': G = ResBase50() inc = 2048 elif args.net == 'resnet101': G = ResBase101() inc = 2048 elif args.net == "alexnet": G = AlexNetBase() inc = 4096 elif args.net == "vgg": G = VGGBase() inc = 4096 elif args.net == "inception_v3": G = models.inception_v3(pretrained=True) inc = 1000 elif args.net == "googlenet": G = models.googlenet(pretrained = True) inc = 1000 elif args.net == "densenet": G = models.densenet161(pretrained = True) inc = 1000 elif args.net == "resnext": G = models.resnext50_32x4d(pretrained = True) inc = 1000 elif args.net == "squeezenet": G = models.squeezenet1_0(pretrained = True) inc = 1000 else: raise ValueError('Model cannot be recognized.') G.cuda() G.train() # 3. classifier F = Predictor(num_class=len(class_list), inc=inc, temp=args.T) F.cuda() F.train() # 4. load pre-trained model G.load_state_dict(torch.load(os.path.join(args.checkpath, "G_net_{}_loss_{}.pth".format(args.net, args.loss)))) F.load_state_dict(torch.load(os.path.join(args.checkpath, "F_net_{}_loss_{}.pth".format(args.net, args.loss)))) # 5. testing acc_test = eval(test_loader, G, F, class_list) print('Testing accuracy: {:.3f}\n'.format(acc_test)) return acc_test
if 'classifier' not in key: params += [{'params': [value], 'lr': args.multi, 'weight_decay': 0.0005}] else: params += [{'params': [value], 'lr': args.multi * 10, 'weight_decay': 0.0005}] if "resnet" in args.net: F1 = Predictor_deep(num_class=len(class_list), inc=inc) else: F1 = Predictor(num_class=len(class_list), inc=inc, temp=args.T) weights_init(F1) lr = args.lr G.cuda() F1.cuda() im_data_s = torch.FloatTensor(1) im_data_t = torch.FloatTensor(1) im_data_tu = torch.FloatTensor(1) gt_labels_s = torch.LongTensor(1) gt_labels_t = torch.LongTensor(1) sample_labels_t = torch.LongTensor(1) sample_labels_s = torch.LongTensor(1) im_data_s = im_data_s.cuda() im_data_t = im_data_t.cuda() im_data_tu = im_data_tu.cuda() gt_labels_s = gt_labels_s.cuda() gt_labels_t = gt_labels_t.cuda()
def test_ensemble(args, alphas=None): # 1. get dataset #problem: inception_v3 crops dataset differently than all the others args.net = args.ensemble[0] #test, might introduce problem print("args .net when loading: ", args.net) train_loader, val_loader, test_loader, class_list = return_dataset(args) print("Loading in ") # 2. generator G_list = [] #use a list of models F_list = [] #use a list of predictors, one for each classifier in args.ensemble for classifier in args.ensemble: print("classifier: ", classifier) if classifier == 'resnet50': G = ResBase50() inc = 2048 elif classifier == 'resnet101': G = ResBase101() inc = 2048 elif classifier == "alexnet": G = AlexNetBase() inc = 4096 elif classifier == "vgg": G = VGGBase() inc = 4096 elif classifier == "inception_v3": G = models.inception_v3(pretrained=True) inc = 1000 elif classifier == "googlenet": G = models.googlenet(pretrained = True) inc = 1000 elif classifier == "densenet": G = models.densenet161(pretrained = True) inc = 1000 elif classifier == "resnext": G = models.resnext50_32x4d(pretrained = True) inc = 1000 elif classifier == "squeezenet": G = models.squeezenet1_0(pretrained = True) inc = 1000 else: raise ValueError('Model cannot be recognized.') G.cuda() G.train() # 3. classifier F = Predictor(num_class=len(class_list), inc=inc, temp=args.T) F.cuda() F.train() # 4. load pre-trained model G.load_state_dict(torch.load(os.path.join(args.checkpath, "G_net_{}_loss_{}.pth".format(classifier, args.loss)))) F.load_state_dict(torch.load(os.path.join(args.checkpath, "F_net_{}_loss_{}.pth".format(classifier, args.loss)))) G_list.append(G) F_list.append(F) # 5. testing print("evaluating accuracy") acc_test = eval_ensemble(args,test_loader, G_list, F_list, class_list,alphas) print('Testing accuracy: {:.3f}\n'.format(acc_test)) return acc_test
def train(args,weights=None): if os.path.exists(args.checkpath) == False: os.mkdir(args.checkpath) # 1. get dataset train_loader, val_loader, test_loader, class_list = return_dataset(args) # 2. generator if args.net == 'resnet50': G = ResBase50() inc = 2048 elif args.net == 'resnet101': G = ResBase101() inc = 2048 elif args.net == "alexnet": G = AlexNetBase() inc = 4096 elif args.net == "vgg": G = VGGBase() inc = 4096 elif args.net == "inception_v3": G = models.inception_v3(pretrained=True) inc = 1000 elif args.net == "googlenet": G = models.googlenet(pretrained = True) inc = 1000 elif args.net == "densenet": G = models.densenet161(pretrained = True) inc = 1000 elif args.net == "resnext": G = models.resnext50_32x4d(pretrained = True) inc = 1000 elif args.net == "squeezenet": G = models.squeezenet1_0(pretrained = True) inc = 1000 else: raise ValueError('Model cannot be recognized.') params = [] for key, value in dict(G.named_parameters()).items(): if value.requires_grad: if 'classifier' not in key: params += [{'params': [value], 'lr': args.multi, 'weight_decay': 0.0005}] else: params += [{'params': [value], 'lr': args.multi * 10, 'weight_decay': 0.0005}] G.cuda() G.train() # 3. classifier F = Predictor(num_class=len(class_list), inc=inc, temp=args.T) weights_init(F) F.cuda() F.train() # 4. optimizer optimizer_g = optim.SGD(params, momentum=0.9, weight_decay=0.0005, nesterov=True) optimizer_f = optim.SGD(list(F.parameters()), lr=1.0, momentum=0.9, weight_decay=0.0005, nesterov=True) optimizer_g.zero_grad() optimizer_f.zero_grad() param_lr_g = [] for param_group in optimizer_g.param_groups: param_lr_g.append(param_group["lr"]) param_lr_f = [] for param_group in optimizer_f.param_groups: param_lr_f.append(param_group["lr"]) # 5. training data_iter_train = iter(train_loader) len_train = len(train_loader) best_acc = 0 for step in range(args.steps): # update optimizer and lr optimizer_g = inv_lr_scheduler(param_lr_g, optimizer_g, step, init_lr=args.lr) optimizer_f = inv_lr_scheduler(param_lr_f, optimizer_f, step, init_lr=args.lr) lr = optimizer_f.param_groups[0]['lr'] if step % len_train == 0: data_iter_train = iter(train_loader) # forwarding data = next(data_iter_train) im_data = data[0].cuda() gt_label = data[1].cuda() feature = G(im_data) if args.net == 'inception_v3': #its not a tensor output but some 'inceptionOutput' object feature = feature.logits #get the tensor object if args.loss=='CrossEntropy': #call with weights if present loss = crossentropy(F, feature, gt_label, None if (weights == None) else weights[step % len_train]) #although the weights might be defaulting to none elif args.loss=='FocalLoss': loss = focal_loss(F, feature, gt_label, None if (weights == None) else weights[step % len_train]) elif args.loss=='ASoftmaxLoss': loss = asoftmax_loss(F, feature, gt_label, None if (weights == None) else weights[step % len_train]) elif args.loss=='SmoothCrossEntropy': loss = smooth_crossentropy(F, feature, gt_label, None if (weights == None) else weights[step % len_train]) else: print('To add new loss') loss.backward() # backpropagation optimizer_g.step() optimizer_f.step() optimizer_g.zero_grad() optimizer_f.zero_grad() G.zero_grad() F.zero_grad() if step%args.log_interval==0: log_train = 'Train iter: {} lr{} Loss Classification: {:.6f}\n'.format(step, lr, loss.data) print(log_train) if step and step%args.save_interval==0: # evaluate and save acc_val = eval(val_loader, G, F, class_list) G.train() F.train() if args.save_check and acc_val >= best_acc: best_acc = acc_val print('saving model') print('best_acc: '+str(best_acc) + ' acc_val: '+str(acc_val)) torch.save(G.state_dict(), os.path.join(args.checkpath, "G_net_{}_loss_{}.pth".format(args.net, args.loss))) torch.save(F.state_dict(), os.path.join(args.checkpath, "F_net_{}_loss_{}.pth".format(args.net, args.loss))) if (weights is not None): print("computing error rate") error_rate = eval_adaboost_error_rate(train_loader, G, F, class_list, weights) model_importance = torch.log((1-error_rate)/error_rate)/2 #now update the weights print("updating weights") update_weights_adaboost(train_loader, G, F, class_list, weights, model_importance) return error_rate, model_importance
elif args.net == "alexnet": G = AlexNetBase() inc = 4096 elif args.net == "vgg": G = VGGBase() inc = 4096 else: raise ValueError('Model cannot be recognized.') if "resnet" in args.net: F1 = Predictor_deep(num_class=len(class_list), inc=inc) else: F1 = Predictor(num_class=len(class_list), inc=inc, cosine=True, temp=args.T) """ G.cuda() F1.cuda() """ G.load_state_dict(torch.load(os.path.join(args.checkpath, "G_iter_model_{}_{}_to_{}_step_{}.pth.tar".format(args.method, args.source, args.target, args.step)))) F1.load_state_dict(torch.load(os.path.join(args.checkpath, "F1_iter_model_{}_{}_to_{}_step_{}.pth.tar".format(args.method, args.source, args.target, args.step)))) im_data_t = torch.FloatTensor(1) gt_labels_t = torch.LongTensor(1) """" im_data_t = im_data_t.cuda() gt_labels_t = gt_labels_t.cuda() """ im_data_t = Variable(im_data_t)