Пример #1
0
			for t, batch in target_iter:
				x, y, _ = batch
				if self.cuda:
					x = x.cuda()
					y = y.cuda()
				
				features = self.feature_extractor.forward(x)
				task_out = self.task_classifier.forward(features)
				class_output = F.softmax(task_out, dim=1)
				pred_task = class_output.data.max(1, keepdim=True)[1]
				n_correct += pred_task.eq(y.data.view_as(pred_task)).cpu().sum()
				n_total += x.size(0)
				
			acc = n_correct.item() * 1.0 / n_total			
			
			return acc

if __name__ == '__main__':
	dataset = 'LABELME'
	data_path =  './vlcs/'+dataset+'/test/'
	img_transform = transforms.Compose([transforms.Resize((225, 225)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
	dataset = Loader_validation(path=data_path, transform=img_transform)
	loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=64, shuffle=True, num_workers=4)
	cp_path = './'
	cp_name = '_checkpoint_9ep'
	cuda = True
	test_object = OffTest(cp_path, cp_name, loader, cuda)
	test_object.load_checkpoint()
	acc = test_object.test()
	print(acc)
Пример #2
0
    source_loader = torch.utils.data.DataLoader(dataset=source_dataset,
                                                batch_size=args.batch_size,
                                                shuffle=True,
                                                num_workers=args.workers)

    test_source_dataset = Loader_unif_sampling(hdf_path1=test_source_1,
                                               hdf_path2=test_source_2,
                                               hdf_path3=test_source_3,
                                               transform=img_transform_test)
    test_source_loader = torch.utils.data.DataLoader(
        dataset=test_source_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.workers)

    target_dataset = Loader_validation(hdf_path=target_path,
                                       transform=img_transform_test)
    target_loader = torch.utils.data.DataLoader(dataset=target_dataset,
                                                batch_size=args.batch_size,
                                                shuffle=True,
                                                num_workers=args.workers)

    task_classifier = models.task_classifier()
    domain_discriminator_list = []
    for i in range(3):
        if args.rp_size == 4096:
            disc = models.domain_discriminator_ablation_RP(
                optim.SGD, args.lr_domain, args.momentum_domain,
                args.l2).train()
        else:
            disc = models.domain_discriminator(args.rp_size, optim.SGD,
                                               args.lr_domain,
Пример #3
0
	# Setting seed
	random.seed(seeds[run])
	torch.manual_seed(seeds[run])
	checkpoint_path = os.path.join(args.checkpoint_path, args.target+'_seed'+str(seeds[run]))

	if args.cuda:
		torch.cuda.manual_seed(args.seed)

	img_transform_train = transforms.Compose([transforms.RandomResizedCrop(225, scale=(0.7,1.0)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
	img_transform_test = transforms.Compose([transforms.Resize((225, 225)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])

	train_source_1 = args.data_path + 'train_' + args.source1 + '.hdf'
	test_source_1 = args.data_path + 'test_' + args.source1 + '.hdf'

	source_dataset = Loader_validation(hdf_path=train_source_1, transform=img_transform_train)
	source_loader = torch.utils.data.DataLoader(dataset=source_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers)

	test_source_dataset = Loader_validation(hdf_path=test_source_1, transform=img_transform_test)
	test_source_loader = torch.utils.data.DataLoader(dataset=test_source_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers)

	model = models.AlexNet(num_classes = 7, baseline = True)
	state_dict = torch.load(args.model_path+'alexnet_caffe.pth.tar')
	del state_dict["classifier.fc8.weight"]
	del state_dict["classifier.fc8.bias"]
	not_loaded = model.load_state_dict(state_dict, strict = False)

	optimizer = optim.SGD(list(model.classifier.parameters()), lr=args.lr, momentum=args.momentum, weight_decay=args.l2, nesterov=True)

	if args.cuda:
		model = model.cuda()