コード例 #1
0
def main():
    model = Model()
    loader = model.load_model('./train_log')
    model.eval()
    model.device()

    fixargs(args)
    return
    img0 = cv2.imread(args.img[0])
    img1 = cv2.imread(args.img[1])
    img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device) /
            255.).unsqueeze(0)

    img1 = (torch.tensor(img1.transpose(2, 0, 1)).to(device) /
            255.).unsqueeze(0)
    n, c, h, w = img0.shape
    ph = ((h - 1) // 32 + 1) * 32
    pw = ((w - 1) // 32 + 1) * 32
    padding = (0, pw - w, 0, ph - h)
    img0 = F.pad(img0, padding)
    img1 = F.pad(img1, padding)

    img_list = [img0, img1]
    for i in range(args.exp):
        tmp = []
        for j in range(len(img_list) - 1):
            mid = model.inference(img_list[j], img_list[j + 1])
            tmp.append(img_list[j])
            tmp.append(mid)
        tmp.append(img1)
        img_list = tmp

    if not os.path.exists('output'):
        os.mkdir('output')
    for i in range(len(img_list)):
        cv2.imwrite('output/img{}.png'.format(i),
                    (img_list[i][0] * 255).byte().cpu().numpy().transpose(
                        1, 2, 0)[:h, :w])
コード例 #2
0
import os
import sys
sys.path.append('.')
import cv2
import math
import torch
import argparse
import numpy as np
from torch.nn import functional as F
from model.pytorch_msssim import ssim_matlab
from model.RIFE import Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model = Model()
model.load_model('train_log')
model.eval()
model.device()

name = [
    'Beanbags', 'Dimetrodon', 'DogDance', 'Grove2', 'Grove3', 'Hydrangea',
    'MiniCooper', 'RubberWhale', 'Urban2', 'Urban3', 'Venus', 'Walking'
]
IE_list = []
for i in name:
    i0 = cv2.imread('other-data/{}/frame10.png'.format(i)).transpose(2, 0,
                                                                     1) / 255.
    i1 = cv2.imread('other-data/{}/frame11.png'.format(i)).transpose(2, 0,
                                                                     1) / 255.
    gt = cv2.imread('other-gt-interp/{}/frame10i11.png'.format(i))
    h, w = i0.shape[1], i0.shape[2]
    imgs = torch.zeros([1, 6, 480, 640]).to(device)
コード例 #3
0
import os
import cv2
import math
import torch
import argparse
import numpy as np
from torch.nn import functional as F
from pytorch_msssim import ssim_matlab
from model.RIFE import Model

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model = Model()
model.load_model('./train_log')
model.eval()
model.device()

path = 'vimeo_interp_test/'
f = open(path + 'tri_testlist.txt', 'r')
psnr_list = []
ssim_list = []
for i in f:
    name = str(i).strip()
    if (len(name) <= 1):
        continue
    print(path + 'target/' + name + '/im1.png')
    I0 = cv2.imread(path + 'target/' + name + '/im1.png')
    I1 = cv2.imread(path + 'target/' + name + '/im2.png')
    I2 = cv2.imread(path + 'target/' + name + '/im3.png')
    I0 = (torch.tensor(I0.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)
    I2 = (torch.tensor(I2.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)
コード例 #4
0
import cv2
import sys
sys.path.append('.')
import time
import torch
import torch.nn as nn
from model.RIFE import Model

model = Model()
model.eval()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.set_grad_enabled(False)
if torch.cuda.is_available():
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True
       
I0 = torch.rand(1, 3, 480, 640).to(device)
I1 = torch.rand(1, 3, 480, 640).to(device)
with torch.no_grad():
    for i in range(100):
        pred = model.inference(I0, I1)
    if torch.cuda.is_available():
        torch.cuda.synchronize()
    time_stamp = time.time()
    for i in range(100):
        pred = model.inference(I0, I1)
    if torch.cuda.is_available():
        torch.cuda.synchronize()
    print((time.time() - time_stamp) / 100)
コード例 #5
0
                              np.array(loss_flow_list).mean(), nr_eval)
        writer_val.add_scalar('loss_cons',
                              np.array(loss_cons_list).mean(), nr_eval)
        writer_val.add_scalar('loss_ter',
                              np.array(loss_ter_list).mean(), nr_eval)
        writer_val.add_scalar('psnr', np.array(psnr_list).mean(), nr_eval)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='slomo')
    parser.add_argument('--epoch', default=300, type=int)
    parser.add_argument('--batch_size',
                        default=16,
                        type=int,
                        help='minibatch size')
    parser.add_argument('--local_rank', default=0, type=int, help='local rank')
    parser.add_argument('--world_size', default=4, type=int, help='world size')
    args = parser.parse_args()
    torch.distributed.init_process_group(backend="nccl",
                                         world_size=args.world_size)
    torch.cuda.set_device(args.local_rank)
    device = torch.device("cuda", args.local_rank)
    seed = 1234
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.benchmark = True
    model = Model(args.local_rank)
    train(model, args.local_rank)
コード例 #6
0
parser.add_argument('--png',
                    dest='png',
                    action='store_true',
                    help='whether to vid_out png format vid_outs')
parser.add_argument('--ext',
                    dest='ext',
                    type=str,
                    default='mp4',
                    help='vid_out video extension')
parser.add_argument('--exp', dest='exp', type=int, default=1)
args = parser.parse_args()
assert (args.exp == 1 or args.exp == 2)
args.exp = 2**args.exp

from model.RIFE import Model
model = Model()
model.load_model('./train_log')
model.eval()
model.device()

videoCapture = cv2.VideoCapture(args.video)
fps = videoCapture.get(cv2.CAP_PROP_FPS)
tot_frame = videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)
videoCapture.release()
if args.fps is None:
    fpsNotAssigned = True
    args.fps = fps * args.exp
else:
    fpsNotAssigned = False
videogen = skvideo.io.vreader(args.video)
lastframe = next(videogen)
コード例 #7
0
import os
import sys
sys.path.append('.')
import cv2
import math
import torch
import argparse
import numpy as np
from torch.nn import functional as F
from model.pytorch_msssim import ssim_matlab
from model.RIFE import Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model = Model()
model.load_model('train_log')
model.eval()
model.device()

path = 'UCF101/ucf101_interp_ours/'
dirs = os.listdir(path)
psnr_list = []
ssim_list = []
print(len(dirs))
for d in dirs:
    img0 = (path + d + '/frame_00.png')
    img1 = (path + d + '/frame_02.png')
    gt = (path + d + '/frame_01_gt.png')
    img0 = (torch.tensor(cv2.imread(img0).transpose(2, 0, 1) /
                         255.)).to(device).float().unsqueeze(0)
    img1 = (torch.tensor(cv2.imread(img1).transpose(2, 0, 1) /
                         255.)).to(device).float().unsqueeze(0)
コード例 #8
0
import os
import sys
sys.path.append('.')
import cv2
import math
import torch
import argparse
import numpy as np
from torch.nn import functional as F
from pytorch_msssim import ssim_matlab
from model.RIFE import Model
from skimage.color import rgb2yuv, yuv2rgb
from yuv_frame_io import YUV_Read,YUV_Write
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model = Model()
model.load_model('train_log')
model.eval()
model.device()

name_list = [
    ('HD_dataset/HD720p_GT/parkrun_1280x720_50.yuv', 720, 1280),
    ('HD_dataset/HD720p_GT/shields_1280x720_60.yuv', 720, 1280),
    ('HD_dataset/HD720p_GT/stockholm_1280x720_60.yuv', 720, 1280),
    ('HD_dataset/HD1080p_GT/BlueSky.yuv', 1080, 1920),
    ('HD_dataset/HD1080p_GT/Kimono1_1920x1080_24.yuv', 1080, 1920),
    ('HD_dataset/HD1080p_GT/ParkScene_1920x1080_24.yuv', 1080, 1920),
    ('HD_dataset/HD1080p_GT/sunflower_1080p25.yuv', 1080, 1920),
    ('HD_dataset/HD544p_GT/Sintel_Alley2_1280x544.yuv', 544, 1280),
    ('HD_dataset/HD544p_GT/Sintel_Market5_1280x544.yuv', 544, 1280),
    ('HD_dataset/HD544p_GT/Sintel_Temple1_1280x544.yuv', 544, 1280),
コード例 #9
0
import os
import sys
sys.path.append('.')
import cv2
import math
import torch
import argparse
import numpy as np
from torch.nn import functional as F
from pytorch_msssim import ssim_matlab
from model.RIFE import Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model = Model()
model.load_model(
    os.path.join(os.path.dirname(os.path.realpath(__file__)), 'train_log'))
model.eval()
model.device()

name = [
    'Beanbags', 'Dimetrodon', 'DogDance', 'Grove2', 'Grove3', 'Hydrangea',
    'MiniCooper', 'RubberWhale', 'Urban2', 'Urban3', 'Venus', 'Walking'
]
IE_list = []
for i in name:
    i0 = cv2.imread('other-data/{}/frame10.png'.format(i)).transpose(2, 0,
                                                                     1) / 255.
    i1 = cv2.imread('other-data/{}/frame11.png'.format(i)).transpose(2, 0,
                                                                     1) / 255.
    gt = cv2.imread('other-gt-interp/{}/frame10i11.png'.format(i))
    h, w = i0.shape[1], i0.shape[2]
コード例 #10
0
import os
import cv2
import torch
import argparse
from torch.nn import functional as F
from model.RIFE import Model

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

parser = argparse.ArgumentParser(
    description='Interpolation for a pair of images')
parser.add_argument('--img', dest='img', nargs=2, required=True)
parser.add_argument('--times', default=4, type=int)
args = parser.parse_args()

model = Model()
model.load_model('./train_log')
model.eval()
model.device()

img0 = cv2.imread(args.img[0])
img1 = cv2.imread(args.img[1])

img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)
img1 = (torch.tensor(img1.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)
n, c, h, w = img0.shape
ph = ((h - 1) // 32 + 1) * 32
pw = ((w - 1) // 32 + 1) * 32
padding = (0, pw - w, 0, ph - h)
img0 = F.pad(img0, padding)
img1 = F.pad(img1, padding)
コード例 #11
0
import os
import sys
sys.path.append('.')
import cv2
import math
import torch
import argparse
import numpy as np
from torch.nn import functional as F
from model.pytorch_msssim import ssim_matlab
from model.RIFE import Model
from skimage.color import rgb2yuv, yuv2rgb
from yuv_frame_io import YUV_Read,YUV_Write
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model = Model(arbitrary=True)
model.load_model('RIFE_m_train_log')
model.eval()
model.device()

name_list = [
    ('HD_dataset/HD720p_GT/parkrun_1280x720_50.yuv', 720, 1280),
    ('HD_dataset/HD720p_GT/shields_1280x720_60.yuv', 720, 1280),
    ('HD_dataset/HD720p_GT/stockholm_1280x720_60.yuv', 720, 1280),
    ('HD_dataset/HD1080p_GT/BlueSky.yuv', 1080, 1920),
    ('HD_dataset/HD1080p_GT/Kimono1_1920x1080_24.yuv', 1080, 1920),
    ('HD_dataset/HD1080p_GT/ParkScene_1920x1080_24.yuv', 1080, 1920),
    ('HD_dataset/HD1080p_GT/sunflower_1080p25.yuv', 1080, 1920),
    ('HD_dataset/HD544p_GT/Sintel_Alley2_1280x544.yuv', 544, 1280),
    ('HD_dataset/HD544p_GT/Sintel_Market5_1280x544.yuv', 544, 1280),
    ('HD_dataset/HD544p_GT/Sintel_Temple1_1280x544.yuv', 544, 1280),