from torchvision import transforms
from torch.autograd import Variable
from PIL import Image
from fast_neural_style.transformer_net import TransformerNet
from fast_neural_style.utils import recover_image, tensor_normalizer
from tqdm import tqdm

# Preprocess Pipeline
preprocess = transforms.Compose(
    [transforms.Resize(1024),
     transforms.ToTensor(),
     tensor_normalizer()])

# Setup the Model Architecture
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
transformer = TransformerNet()
transformer.to(device)

# Load the Model and Stylize the Content Video
data_path = '/tmp2/vincentwu929/DIP_final/Sky/content/sky2/Video2_HR.avi'
mask_path = '/tmp2/vincentwu929/DIP_final/Sky/mask/mask_HR2.png'
video_name = 'Video2_HR'
style_name = 'ZaoWouKi'  #'crayon', 'fountainpen', 'ZaoWouKi'
BATCH_SIZE = 8

mask = cv2.imread(mask_path).astype(np.bool)
save_model_path = "./models/" + style_name + "_10000_unstable_vgg19.pth"
transformer.load_state_dict(torch.load(save_model_path))
transformer.eval()
batch = []
videogen = skvideo.io.FFmpegReader(data_path)
Example #2
0
def load_model(model_file):
    transformer = TransformerNet()
    transformer.load_state_dict(torch.load(model_file))
    return transformer
Example #3
0
import skvideo.io
import numpy as np
import torch
from torchvision import transforms
from tqdm import tqdm_notebook

from fast_neural_style.transformer_net import TransformerNet
from fast_neural_style.utils import recover_image, tensor_normalizer

# In[2]:

preprocess = transforms.Compose([transforms.ToTensor(), tensor_normalizer()])

# In[3]:

transformer = TransformerNet()

# ## Low Resolution GIF Animation

# Convert gif file to video file:
# ```
# ffmpeg -f gif -i cat.gif cat.mp4
# ```

# In[ ]:

skvideo.io.ffprobe("videos/cat.mp4")

# In[2]:

transformer.load_state_dict(torch.load("../models/udine_10000.pth"))
    parser.add_argument('-with_mask', action='store_true', help='Apply mask?')
    parser.add_argument(
        '-mask_path',
        type=str,
        default=
        "/home/vincentwu-cmlab/Downloads/DIP_final/Sky/mask/mask_HR1.png",
        help='Path of the mask data')
    parser.add_argument('-output_name',
                        type=str,
                        default="Video1_HR",
                        help='Name of the ouput file')

    opts = parser.parse_args()

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    transformer = TransformerNet()
    transformer.to(device)

    if opts.mode == 'train' or opts.mode == 'all':
        np.random.seed(opts.seed)
        torch.manual_seed(opts.seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed(opts.seed)
            torch.set_default_tensor_type('torch.cuda.FloatTensor')
            kwargs = {'num_workers': 4, 'pin_memory': True}
        else:
            torch.set_default_tensor_type('torch.FloatTensor')
            kwargs = {}

        # Dataloader
        transform = transforms.Compose([
Example #5
0
    for model in style_models:
        styled = style_utils.tensor_to_numpy_image(
            forward_pass(model, pil_image))
        styled_images.append(
            cv2.resize(styled, (numpy_image.shape[1], numpy_image.shape[0])))

    styled_images.append(grayscale(numpy_image))

    return mask, scored_masks, styled_images


if __name__ == '__main__':
    #load style models
    models = []

    mosaic = TransformerNet()
    mosaic.load_state_dict(torch.load('models/mosaic/mosaic_0.4.1.model'))
    mosaic.eval()
    mosaic.cuda()
    models.append(mosaic)

    starry = TransformerNet()
    starry.load_state_dict(torch.load('models/starry_night/starry01.model'))
    starry.eval()
    starry.cuda()
    models.append(starry)

    starry2 = TransformerNet()
    starry2.load_state_dict(torch.load('models/starry_night/starry02.model'))
    starry2.eval()
    starry2.cuda()