def main():
    model = Model()
    loader = model.load_model('./train_log')
    model.eval()
    model.device()

    fixargs(args)
    return
    img0 = cv2.imread(args.img[0])
    img1 = cv2.imread(args.img[1])
    img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device) /
            255.).unsqueeze(0)

    img1 = (torch.tensor(img1.transpose(2, 0, 1)).to(device) /
            255.).unsqueeze(0)
    n, c, h, w = img0.shape
    ph = ((h - 1) // 32 + 1) * 32
    pw = ((w - 1) // 32 + 1) * 32
    padding = (0, pw - w, 0, ph - h)
    img0 = F.pad(img0, padding)
    img1 = F.pad(img1, padding)

    img_list = [img0, img1]
    for i in range(args.exp):
        tmp = []
        for j in range(len(img_list) - 1):
            mid = model.inference(img_list[j], img_list[j + 1])
            tmp.append(img_list[j])
            tmp.append(mid)
        tmp.append(img1)
        img_list = tmp

    if not os.path.exists('output'):
        os.mkdir('output')
    for i in range(len(img_list)):
        cv2.imwrite('output/img{}.png'.format(i),
                    (img_list[i][0] * 255).byte().cpu().numpy().transpose(
                        1, 2, 0)[:h, :w])
Esempio n. 2
0
import os
import sys
sys.path.append('.')
import cv2
import math
import torch
import argparse
import numpy as np
from torch.nn import functional as F
from model.pytorch_msssim import ssim_matlab
from model.RIFE import Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model = Model()
model.load_model('train_log')
model.eval()
model.device()

name = [
    'Beanbags', 'Dimetrodon', 'DogDance', 'Grove2', 'Grove3', 'Hydrangea',
    'MiniCooper', 'RubberWhale', 'Urban2', 'Urban3', 'Venus', 'Walking'
]
IE_list = []
for i in name:
    i0 = cv2.imread('other-data/{}/frame10.png'.format(i)).transpose(2, 0,
                                                                     1) / 255.
    i1 = cv2.imread('other-data/{}/frame11.png'.format(i)).transpose(2, 0,
                                                                     1) / 255.
    gt = cv2.imread('other-gt-interp/{}/frame10i11.png'.format(i))
    h, w = i0.shape[1], i0.shape[2]
    imgs = torch.zeros([1, 6, 480, 640]).to(device)
Esempio n. 3
0
import os
import sys
sys.path.append('.')
import cv2
import math
import torch
import argparse
import numpy as np
from torch.nn import functional as F
from pytorch_msssim import ssim_matlab
from model.RIFE import Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model = Model()
model.load_model(
    os.path.join(os.path.dirname(os.path.realpath(__file__)), 'train_log'))
model.eval()
model.device()

name = [
    'Beanbags', 'Dimetrodon', 'DogDance', 'Grove2', 'Grove3', 'Hydrangea',
    'MiniCooper', 'RubberWhale', 'Urban2', 'Urban3', 'Venus', 'Walking'
]
IE_list = []
for i in name:
    i0 = cv2.imread('other-data/{}/frame10.png'.format(i)).transpose(2, 0,
                                                                     1) / 255.
    i1 = cv2.imread('other-data/{}/frame11.png'.format(i)).transpose(2, 0,
                                                                     1) / 255.
    gt = cv2.imread('other-gt-interp/{}/frame10i11.png'.format(i))
    h, w = i0.shape[1], i0.shape[2]
                    dest='png',
                    action='store_true',
                    help='whether to vid_out png format vid_outs')
parser.add_argument('--ext',
                    dest='ext',
                    type=str,
                    default='mp4',
                    help='vid_out video extension')
parser.add_argument('--exp', dest='exp', type=int, default=1)
args = parser.parse_args()
assert (args.exp == 1 or args.exp == 2)
args.exp = 2**args.exp

from model.RIFE import Model
model = Model()
model.load_model('./train_log', -1)
model.eval()
model.device()

videoCapture = cv2.VideoCapture(args.video)
fps = videoCapture.get(cv2.CAP_PROP_FPS)
tot_frame = videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)
videoCapture.release()
if args.fps is None:
    fpsNotAssigned = True
    args.fps = fps * args.exp
else:
    fpsNotAssigned = False
videogen = skvideo.io.vreader(args.video)
lastframe = next(videogen)
h, w, _ = lastframe.shape
Esempio n. 5
0
import sys
sys.path.append('.')
import cv2
import math
import torch
import argparse
import numpy as np
from torch.nn import functional as F
from model.pytorch_msssim import ssim_matlab
from model.RIFE import Model
from skimage.color import rgb2yuv, yuv2rgb
from yuv_frame_io import YUV_Read,YUV_Write
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model = Model(arbitrary=True)
model.load_model('RIFE_m_train_log')
model.eval()
model.device()

name_list = [
    ('HD_dataset/HD720p_GT/parkrun_1280x720_50.yuv', 720, 1280),
    ('HD_dataset/HD720p_GT/shields_1280x720_60.yuv', 720, 1280),
    ('HD_dataset/HD720p_GT/stockholm_1280x720_60.yuv', 720, 1280),
    ('HD_dataset/HD1080p_GT/BlueSky.yuv', 1080, 1920),
    ('HD_dataset/HD1080p_GT/Kimono1_1920x1080_24.yuv', 1080, 1920),
    ('HD_dataset/HD1080p_GT/ParkScene_1920x1080_24.yuv', 1080, 1920),
    ('HD_dataset/HD1080p_GT/sunflower_1080p25.yuv', 1080, 1920),
    ('HD_dataset/HD544p_GT/Sintel_Alley2_1280x544.yuv', 544, 1280),
    ('HD_dataset/HD544p_GT/Sintel_Market5_1280x544.yuv', 544, 1280),
    ('HD_dataset/HD544p_GT/Sintel_Temple1_1280x544.yuv', 544, 1280),
    ('HD_dataset/HD544p_GT/Sintel_Temple2_1280x544.yuv', 544, 1280),