示例#1
0
def main():

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = Model()
    model.load_model('../train_log/HDv2', -1)
    model.eval()
    model.device()

    path = '../../data/UCF101/ucf101_interp_ours/'
    dirs = os.listdir(path)

    psnr_list = []
    ssim_list = []
    time_list = []
    # print('=========>Start Calculate PSNR and SSIM')
    for d in tqdm(dirs):
        img0 = (path + d + '/frame_00.png')
        img1 = (path + d + '/frame_02.png')
        gt = (path + d + '/frame_01_gt.png')
        img0 = (torch.tensor(cv2.imread(img0).transpose(2, 0, 1) /
                             255.)).to(device).float().unsqueeze(0)
        img1 = (torch.tensor(cv2.imread(img1).transpose(2, 0, 1) /
                             255.)).to(device).float().unsqueeze(0)
        gt = (torch.tensor(cv2.imread(gt).transpose(2, 0, 1) /
                           255.)).to(device).float().unsqueeze(0)
        # Avg PSNR: 35.243666269214145 SSIM: 0.9683315742368154 Time: 0.133457749911717
        # inference
        pred = model.inference(img0, img1)[0]

        # Calculate indicators
        out = pred.detach().cpu().numpy().transpose(1, 2, 0)
        out = np.round(out * 255) / 255.
        gt = gt[0].cpu().numpy().transpose(1, 2, 0)
        psnr = compute_psnr(gt, out)
        ssim = compute_ssim(gt, out)
        psnr_list.append(psnr)
        ssim_list.append(ssim)
    # print("Avg PSNR: {} SSIM: {}".format(np.mean(psnr_list), np.mean(ssim_list)))
    # print('=========>Start Calculate Inference Time')

    # inference time
    for i in range(100):
        start = torch.cuda.Event(enable_timing=True)
        end = torch.cuda.Event(enable_timing=True)
        start.record()
        pred = model.inference(img0, img1)[0]
        end.record()
        torch.cuda.synchronize()
        time_list.append(start.elapsed_time(end))
    time_list.remove(min(time_list))
    time_list.remove(max(time_list))
    print("Avg PSNR: {} SSIM: {} Time: {}".format(np.mean(psnr_list),
                                                  np.mean(ssim_list),
                                                  np.mean(time_list) / 100))
示例#2
0
parser.add_argument('--rmaxcycles',
                    default=8,
                    type=int,
                    help='limit max number of bisectional cycles')
parser.add_argument('--model',
                    dest='modelDir',
                    type=str,
                    default='train_log',
                    help='directory with trained model files')

args = parser.parse_args()

try:
    try:
        from model.RIFE_HDv2 import Model
        model = Model()
        model.load_model(args.modelDir, -1)
        print("Loaded v2.x HD model.")
    except:
        from train_log.RIFE_HDv3 import Model
        model = Model()
        model.load_model(args.modelDir, -1)
        print("Loaded v3.x HD model.")
except:
    from model.RIFE_HD import Model
    model = Model()
    model.load_model(args.modelDir, -1)
    print("Loaded v1.x HD model")
model.eval()
model.device()
    args.scale = 0.5
assert args.scale in [0.25, 0.5, 1.0, 2.0, 4.0]
if not args.img is None:
    args.png = True
    
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.set_grad_enabled(False)
if torch.cuda.is_available():
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True
    if(args.fp16):
        torch.set_default_tensor_type(torch.cuda.HalfTensor)

try:
    from model.RIFE_HDv2 import Model
    model = Model()
    model.load_model(args.modelDir, -1)
    print("Loaded v2.x HD model.")
except:
    from model.RIFE_HD import Model
    model = Model()
    model.load_model(args.modelDir, -1)
    print("Loaded v1.x HD model")
model.eval()
model.device()

if not args.video is None:
    videoCapture = cv2.VideoCapture(args.video)
    fps = videoCapture.get(cv2.CAP_PROP_FPS)
    tot_frame = videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)
    videoCapture.release()
示例#4
0
parser.add_argument('--output', dest='output', type=str, default=None)
parser.add_argument('--img', dest='img', type=str, default=None)
parser.add_argument('--montage', dest='montage', action='store_true', help='montage origin video')
parser.add_argument('--UHD', dest='UHD', action='store_true', help='support 4k video')
parser.add_argument('--skip', dest='skip', action='store_true', help='whether to remove static frames before processing')
parser.add_argument('--fps', dest='fps', type=int, default=None)
parser.add_argument('--png', dest='png', action='store_true', help='whether to vid_out png format vid_outs')
parser.add_argument('--ext', dest='ext', type=str, default='mp4', help='vid_out video extension')
parser.add_argument('--exp', dest='exp', type=int, default=1)
args = parser.parse_args()
assert (not args.video is None or not args.img is None)
if not args.img is None:
    args.png = True

from model.RIFE_HDv2 import Model
model = Model()
model.load_model(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'train_log'), -1)
model.eval()
model.device()

if not args.video is None:
    videoCapture = cv2.VideoCapture(args.video)
    fps = videoCapture.get(cv2.CAP_PROP_FPS)
    tot_frame = videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)
    videoCapture.release()
    if args.fps is None:
        fpsNotAssigned = True
        args.fps = fps * (2 ** args.exp)
    else:
        fpsNotAssigned = False
    videogen = skvideo.io.vreader(args.video)
示例#5
0
    eval_time_interval = time.time() - time_stamp
    if local_rank == 0:
        print('eval time: {}'.format(eval_time_interval))
        writer_val.add_scalar('loss_l1', np.array(loss_l1_list).mean(), nr_eval)
        writer_val.add_scalar('loss_flow', np.array(loss_flow_list).mean(), nr_eval)
        writer_val.add_scalar('loss_cons', np.array(loss_cons_list).mean(), nr_eval)
        writer_val.add_scalar('loss_ter', np.array(loss_ter_list).mean(), nr_eval)
        writer_val.add_scalar('psnr', np.array(psnr_list).mean(), nr_eval)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='slomo')
    parser.add_argument('--epoch', default=300, type=int)
    parser.add_argument('--batch_size', default=16, type=int, help='minibatch size')
    parser.add_argument('--local_rank', default=0, type=int, help='local rank')
    parser.add_argument('--world_size', default=4, type=int, help='world size')
    args = parser.parse_args()
    torch.distributed.init_process_group(backend="nccl", world_size=args.world_size)
    torch.cuda.set_device(args.local_rank)
    device = torch.device("cuda", args.local_rank)
    seed = 1234
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.benchmark = True
    model = Model(args.local_rank)
    model.load_model(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'train_log'), 0)
    train(model, args.local_rank)
示例#6
0
parser.add_argument('--ratio',
                    default=0,
                    type=float,
                    help='inference ratio between two images with 0 - 1 range')
parser.add_argument(
    '--rthreshold',
    default=0.02,
    type=float,
    help='returns image when actual ratio falls in given range threshold')
parser.add_argument('--rmaxcycles',
                    default=8,
                    type=int,
                    help='limit max number of bisectional cycles')
args = parser.parse_args()

model = Model()
model.load_model(
    os.path.join(os.path.dirname(os.path.realpath(__file__)), 'train_log'), -1)
model.eval()
model.device()

if args.img[0].endswith('.exr') and args.img[1].endswith('.exr'):
    img0 = cv2.imread(args.img[0], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
    img1 = cv2.imread(args.img[1], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
    img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device)).unsqueeze(0)
    img1 = (torch.tensor(img1.transpose(2, 0, 1)).to(device)).unsqueeze(0)

else:
    img0 = cv2.imread(args.img[0])
    img1 = cv2.imread(args.img[1])
    img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device) /