def imgint(img1, img2):
    warnings.filterwarnings("ignore")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    torch.set_grad_enabled(False)
    if torch.cuda.is_available():
        torch.backends.cudnn.enabled = True
        torch.backends.cudnn.benchmark = True

    # parser = argparse.ArgumentParser(description='Interpolation for a pair of images')
    # parser.add_argument('--img', dest='img', nargs=2, required=True)
    # parser.add_argument('--exp', default=4, type=int)
    # args = parser.parse_args()

    model = Model()
    model.load_model('./train_log', -1)
    model.eval()
    model.device()
    open_cv_image1 = np.array(img1) 
    # Convert RGB to BGR 
    open_cv_image1 = open_cv_image1[:, :, ::-1].copy() 
    open_cv_image2 = np.array(img2) 
    # Convert RGB to BGR 
    open_cv_image2 = open_cv_image2[:, :, ::-1].copy() 
    img0 = open_cv_image1
    img1 = open_cv_image2

    img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)
    img1 = (torch.tensor(img1.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)
    n, c, h, w = img0.shape
    ph = ((h - 1) // 32 + 1) * 32
    pw = ((w - 1) // 32 + 1) * 32
    padding = (0, pw - w, 0, ph - h)
    img0 = F.pad(img0, padding)
    img1 = F.pad(img1, padding)

    img_list = [img0, img1]
    for i in range(4):
        tmp = []
        for j in range(len(img_list) - 1):
            mid = model.inference(img_list[j], img_list[j + 1])
            tmp.append(img_list[j])
            tmp.append(mid)
        tmp.append(img1)
        img_list = tmp

    # if not os.path.exists('output'):
    #     os.mkdir('output')
    return img_list, h, w
        write_buffer = Queue(maxsize=mp.cpu_count() - 3)
        incoming_read_buffer = Queue(maxsize=500)
        outgoing_read_buffer = Queue(maxsize=500)

        _thread.start_new_thread(
            build_read_buffer,
            (args.incoming, incoming_read_buffer, incoming_files_list))
        _thread.start_new_thread(
            build_read_buffer,
            (args.outgoing, outgoing_read_buffer, outgoing_files_list))

        if 'v1.8.model' in args.model:
            from model.RIFE_HD import Model  # type: ignore
        else:
            from model.RIFE_HDv2 import Model  # type: ignore
        model = Model()
        model.load_model(args.model, -1)
        model.eval()
        model.device()
        print('Trained model loaded: %s' % args.model)

        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        if torch.cuda.is_available():
            torch.set_grad_enabled(False)
            torch.backends.cudnn.enabled = True
            torch.backends.cudnn.benchmark = True

        _thread.start_new_thread(clear_write_buffer,
                                 (args.output, write_buffer, input_duration))

        rstep = 1 / (input_duration + 1)
Exemple #3
0
        writer_val.add_scalar('psnr', np.array(psnr_list).mean(), nr_eval)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='slomo')
    parser.add_argument('--epoch', default=300, type=int)
    parser.add_argument('--batch_size',
                        default=16,
                        type=int,
                        help='minibatch size')
    parser.add_argument('--local_rank', default=0, type=int, help='local rank')
    parser.add_argument('--world_size', default=4, type=int, help='world size')
    parser.add_argument('--model',
                        dest='model',
                        type=str,
                        default='./trained_models/default/v1.8.model')
    args = parser.parse_args()
    torch.distributed.init_process_group(backend="nccl",
                                         world_size=args.world_size)
    torch.cuda.set_device(args.local_rank)
    device = torch.device("cuda", args.local_rank)
    seed = 1234
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.benchmark = True
    model = Model(args.local_rank)
    model.load_model(args.model, args.local_rank)
    train(model, args.local_rank)
Exemple #4
0
import warnings
warnings.filterwarnings("ignore")

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.set_grad_enabled(False)
if torch.cuda.is_available():
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True

parser = argparse.ArgumentParser(
    description='Interpolation for a pair of images')
parser.add_argument('--img', dest='img', nargs=2, required=True)
parser.add_argument('--exp', default=4, type=int)
args = parser.parse_args()

model = Model()
model.load_model('./train_log', -1)
model.eval()
model.device()

if args.img[0].endswith('.exr') and args.img[1].endswith('.exr'):
    img0 = cv2.imread(args.img[0], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
    img1 = cv2.imread(args.img[1], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
    img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device)).unsqueeze(0)
    img1 = (torch.tensor(img1.transpose(2, 0, 1)).to(device)).unsqueeze(0)

else:
    img0 = cv2.imread(args.img[0])
    img1 = cv2.imread(args.img[1])
    img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device) /
            255.).unsqueeze(0)
Exemple #5
0
                    dest='png',
                    action='store_true',
                    help='whether to vid_out png format vid_outs')
parser.add_argument('--ext',
                    dest='ext',
                    type=str,
                    default='mp4',
                    help='vid_out video extension')
parser.add_argument('--exp', dest='exp', type=int, default=1)
args = parser.parse_args()
assert (not args.video is None or not args.img is None)
if not args.img is None:
    args.png = True

from model.RIFE_HD import Model
model = Model()
model.load_model('./train_log', -1)
model.eval()
model.device()

if not args.video is None:
    videoCapture = cv2.VideoCapture(args.video)
    fps = videoCapture.get(cv2.CAP_PROP_FPS)
    tot_frame = videoCapture.get(cv2.CAP_PROP_FRAME_COUNT)
    videoCapture.release()
    if args.fps is None:
        fpsNotAssigned = True
        args.fps = fps * (2**args.exp)
    else:
        fpsNotAssigned = False
    videogen = skvideo.io.vreader(args.video)
Exemple #6
0
    # interpolation progression
    first_half = make_inference(I0, middle, exp=exp - 1, sec_batch=True)
    second_half = make_inference(middle, I1, exp=exp - 1, sec_batch=True)
    # return 3 imgs
    return [*first_half, middle, *second_half]


if args.img:
    args.png = True

if args.model == 1:
    from model.RIFE_HD import Model
else:
    from model.RIFE_HDv2 import Model

model = Model()
model_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                          'train_log')

# Pls Manually Mkdir for train_log\1.8, train_log\2.0 to store datasets for RIFE
model_v1 = os.path.join(model_path, "1.8")
model_v2 = os.path.join(model_path, "2.0")
if os.path.exists(model_v1) and args.model == 1:
    model.load_model(model_v1, -1)
elif os.path.exists(model_v2) and args.model == 2:
    model.load_model(model_v2, -1)
else:
    model.load_model(
        os.path.join(os.path.dirname(os.path.realpath(__file__)), 'train_log'),
        -1)
model.eval()
Exemple #7
0
import warnings
warnings.filterwarnings("ignore")

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.set_grad_enabled(False)
if torch.cuda.is_available():
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True

parser = argparse.ArgumentParser(
    description='Interpolation for a pair of images')
parser.add_argument('--img', dest='img', nargs=2, required=True)
parser.add_argument('--exp', default=4, type=int)
args = parser.parse_args()

model = Model()
model.load_model(
    os.path.join(os.path.dirname(os.path.realpath(__file__)), 'train_log'), -1)
model.eval()
model.device()

if args.img[0].endswith('.exr') and args.img[1].endswith('.exr'):
    img0 = cv2.imread(args.img[0], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
    img1 = cv2.imread(args.img[1], cv2.IMREAD_COLOR | cv2.IMREAD_ANYDEPTH)
    img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device)).unsqueeze(0)
    img1 = (torch.tensor(img1.transpose(2, 0, 1)).to(device)).unsqueeze(0)

else:
    img0 = cv2.imread(args.img[0])
    img1 = cv2.imread(args.img[1])
    img0 = (torch.tensor(img0.transpose(2, 0, 1)).to(device) /