def load_and_preprocess(image_file, size=256): img = Image.open(image_file).convert('RGB') transform = transforms.Compose([ transforms.Resize([size, size]), transforms.ToTensor(), tensor_normalizer() ]) img_tensor = transform(img).unsqueeze(0) return img_tensor
# torch.cuda.manual_seed(SEED) # torch.set_default_tensor_type('torch.cuda.FloatTensor') # kwargs = {'num_workers': 4, 'pin_memory': True} # else: # torch.set_default_tensor_type('torch.FloatTensor') # kwargs = {} kwargs = {'num_workers': 4, 'pin_memory': True} IMAGE_SIZE = 256 BATCH_SIZE = 4 DATASET = "./coco/" transform = transforms.Compose([ transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)), transforms.CenterCrop(IMAGE_SIZE), transforms.ToTensor(), tensor_normalizer() ]) # http://pytorch.org/docs/master/torchvision/datasets.html#imagefolder train_dataset = datasets.ImageFolder(DATASET, transform) # http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, **kwargs) # LossOutput = namedtuple("LossOutput", ["conv2_0", "conv2_1", "conv2_2", "conv2_3"]) # # # # https://discuss.pytorch.org/t/how-to-extract-features-of-an-image-from-a-trained-model/119/3 # class LossNetwork(torch.nn.Module): # def __init__(self, resnet_model):
torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) torch.set_default_tensor_type('torch.cuda.FloatTensor') kwargs = {'num_workers': 4, 'pin_memory': True} else: torch.set_default_tensor_type('torch.FloatTensor') kwargs = {} # Data Loading data_transform = transforms.Compose([ transforms.Scale(args.image_size), transforms.CenterCrop(args.image_size), transforms.ToTensor(), tensor_normalizer() ]) print('=====> Load train images') train_dataset = datasets.ImageFolder(args.data, data_transform) train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs) print('Number of train images:', len(train_dataset)) vgg_model = models.vgg16(pretrained=True) if args.cuda: vgg_model.cuda() vgg = Vgg16(vgg_model)
###################################################################### SEED = 1080 np.random.seed(SEED) torch.manual_seed(SEED) ################################## # build transforms for ImageLoader ################################## IMAGE_SIZE = 256 transform_list = [] # (1) cut out the extra parts of pictures transform = transforms.Compose([ transforms.Resize(IMAGE_SIZE), transforms.CenterCrop(IMAGE_SIZE), transforms.ToTensor(), tensor_normalizer() ]) transform_list.append(transform) # (2) padding zeros in the edges of picture transform = transforms.Compose( [ZeroPadding(IMAGE_SIZE), transforms.ToTensor(), tensor_normalizer()]) transform_list.append(transform) print('transforms done') #################################### # construct ImageLoaders and dataloader #################################### DATADIR = os.path.join('..', 'Datasets') BATCHSIZE = 4
def load_and_preprocess(image_file): img = Image.open(image_file).convert('RGB') transform = transforms.Compose( [transforms.ToTensor(), tensor_normalizer()]) img_tensor = transform(img).unsqueeze(0) return Variable(img_tensor, volatile=True)
# torch.cuda.manual_seed(SEED) # torch.set_default_tensor_type('torch.cuda.FloatTensor') # kwargs = {'num_workers': 4, 'pin_memory': True} # else: # torch.set_default_tensor_type('torch.FloatTensor') # kwargs = {} kwargs = {'num_workers': 4, 'pin_memory': True} IMAGE_SIZE = 256 BATCH_SIZE = 4 DATASET = "./coco/" transform = transforms.Compose([ transforms.Resize(IMAGE_SIZE), transforms.CenterCrop(IMAGE_SIZE), transforms.ToTensor(), tensor_normalizer() ]) # http://pytorch.org/docs/master/torchvision/datasets.html#imagefolder train_dataset = datasets.ImageFolder(DATASET, transform) # http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, **kwargs) LossOutput = namedtuple( "LossOutput", ["conv0_0", "conv1_2", "conv2_3", "conv3_5", "conv4_2"]) # https://discuss.pytorch.org/t/how-to-extract-features-of-an-image-from-a-trained-model/119/3 class LossNetwork(torch.nn.Module):