def load_and_preprocess(image_file, size): img = Image.open(image_file).convert('RGB') if size != "None": transform = transforms.Compose([ transforms.Resize(int(size)), transforms.ToTensor(), tensor_normalizer()]) else: transform = transforms.Compose([ transforms.ToTensor(), tensor_normalizer()]) img_tensor = transform(img).unsqueeze(0) return img_tensor
import cv2 import skvideo.io import numpy as np import torch from torchvision import transforms from torch.autograd import Variable from PIL import Image from fast_neural_style.transformer_net import TransformerNet from fast_neural_style.utils import recover_image, tensor_normalizer from tqdm import tqdm # Preprocess Pipeline preprocess = transforms.Compose( [transforms.Resize(1024), transforms.ToTensor(), tensor_normalizer()]) # Setup the Model Architecture device = torch.device("cuda" if torch.cuda.is_available() else "cpu") transformer = TransformerNet() transformer.to(device) # Load the Model and Stylize the Content Video data_path = '/tmp2/vincentwu929/DIP_final/Sky/content/sky2/Video2_HR.avi' mask_path = '/tmp2/vincentwu929/DIP_final/Sky/mask/mask_HR2.png' video_name = 'Video2_HR' style_name = 'ZaoWouKi' #'crayon', 'fountainpen', 'ZaoWouKi' BATCH_SIZE = 8 mask = cv2.imread(mask_path).astype(np.bool) save_model_path = "./models/" + style_name + "_10000_unstable_vgg19.pth"
if torch.cuda.is_available(): torch.cuda.manual_seed(SEED) kwargs = {'num_workers': 4, 'pin_memory': True} else: kwargs = {} # In[3]: IMAGE_SIZE = 224 BATCH_SIZE = 4 DATASET = "../coco_2017/" transform = transforms.Compose([ transforms.Resize(IMAGE_SIZE), transforms.CenterCrop(IMAGE_SIZE), transforms.ToTensor(), tensor_normalizer() ]) # http://pytorch.org/docs/master/torchvision/datasets.html#imagefolder train_dataset = datasets.ImageFolder(DATASET, transform) # http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, **kwargs) # In[4]: device = torch.device("cuda" if torch.cuda.is_available() else "cpu") with torch.no_grad(): loss_network = LossNetwork() loss_network.to(device)
get_ipython().run_line_magic('matplotlib', 'inline') from PIL import Image import skvideo.io import numpy as np import torch from torchvision import transforms from tqdm import tqdm_notebook from fast_neural_style.transformer_net import TransformerNet from fast_neural_style.utils import recover_image, tensor_normalizer # In[2]: preprocess = transforms.Compose([transforms.ToTensor(), tensor_normalizer()]) # In[3]: transformer = TransformerNet() # ## Low Resolution GIF Animation # Convert gif file to video file: # ``` # ffmpeg -f gif -i cat.gif cat.mp4 # ``` # In[ ]: skvideo.io.ffprobe("videos/cat.mp4")