Ejemplo n.º 1
0
def test_net(model):
    test_LR_dir = '../../dataset/test_imgs/youku_00150_00199_l'
    test_HR_dir = '../../dataset/test_imgs/youku_00150_00199_h_GT'
    video_list  = []
    for _ in os.listdir(test_LR_dir):
        if osp.isdir(osp.join(test_LR_dir, _ )):
            video_list.append(_)

    for video_name in video_list:
        print('testing  ', video_name) 
        imglist = glob.glob(osp.join(test_LR_dir, video_name, '*.bmp'))
        result_img_dir = osp.join(result_dir, video_name)
        if not osp.exists(result_img_dir):
            os.mkdir(result_img_dir)
        for img_path in imglist:
            img  = cv2.imread(img_path) / 255
            img_name = img_path.split('/')[-1]
            img = torch.as_tensor(img.transpose(2, 0 ,1)[None]).float().cuda()
            HR_img = model(img) * 255
            HR_img  = HR_img.cpu().detach().numpy().squeeze().transpose(1,2,0)
            HR_img = np.clip(HR_img, 0, 255)
            cv2.imwrite(osp.join(result_img_dir, img_name), HR_img)

        video_path = frames_to_video(result_img_dir)
    #     embed()
        video_path = y4m2yuv(video_path)
        HR_video_path = osp.join(test_HR_dir, video_name.replace('l','h_GT')+ '.yuv')
        cmd = 'vmafossexec yuv420p {} {}  {} {} ../../vmaf/model/vmaf_v0.6.1.pkl   --log {}.xml   --psnr --ssim --ms-ssim   --thread 2 --subsample 1'.format(1920, 1080, video_path, HR_video_path, osp.join(result_dir,video_name))
        os.system(cmd)
Ejemplo n.º 2
0
def test_net(gpu_id, model_path, video_list, timeline, nframes):
    os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(gpu_id)
    device = torch.device('cuda:0')
    model = construct_model(model_path, device)

    for video_name in video_list:
        print('testing  ', video_name)
        imglist = glob.glob(osp.join(test_LR_dir, video_name, '*.bmp'))
        imglist.sort(key=lambda x: (x.split('/')[-1][:-4]))
        result_img_dir = osp.join(result_dir, video_name)
        if not osp.exists(result_img_dir):
            os.mkdir(result_img_dir)

        for i in range(len(imglist)):
            img = cv2.imread(imglist[i]) / 255
            img_name = imglist[i].split('/')[-1]
            img = torch.as_tensor(img.transpose(2, 0, 1)[None]).float().cuda()
            HR_img = model(img).cpu().detach().numpy() * 255

            HR_img = np.squeeze(HR_img).transpose(1, 2, 0)
            HR_img = np.clip(HR_img, 0, 255)
            cv2.imwrite(osp.join(result_img_dir, img_name), HR_img)

        video_path = frames_to_video(result_img_dir)
        #     embed()
        video_path = y4m2yuv(video_path)
        HR_video_path = osp.join(test_HR_dir,
                                 video_name.replace('l', 'h_GT') + '.yuv')
        cmd = 'vmafossexec yuv420p {} {}  {} {} ../../vmaf/model/vmaf_v0.6.1.pkl   --log {}_{}.xml   --psnr --ssim --ms-ssim   --thread 2 --subsample 1'.format(
            1920, 1080, video_path, HR_video_path,
            osp.join(result_dir, video_name), timeline)
        os.system(cmd)
Ejemplo n.º 3
0
def test_net(gpu_id, model_path, video_list, timeline, nframes):
    os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(gpu_id)
    device = torch.device('cuda:0')
    model = construct_model(model_path, device)

    for video_name in video_list:
        print('testing  ', video_name) 
        imglist = glob.glob(osp.join(test_LR_dir, video_name, '*.bmp'))
        result_img_dir = osp.join(result_dir, video_name)
        if not osp.exists(result_img_dir):
            os.mkdir(result_img_dir)
            
        for i in range(len(imglist)):
            img = cv2.imread(imglist[i]) / 255
            img_name = imglist[i].split('/')[-1]
            img = torch.as_tensor(img.transpose(2, 0, 1)[None]).float().cuda()
            HR_img = model(img).cpu().detach().numpy() * 255
                           
            HR_img  = np.squeeze(HR_img).transpose(1,2,0)
            HR_img = np.clip(HR_img, 0, 255)
            cv2.imwrite(osp.join(result_img_dir, img_name), HR_img)

        video_path = frames_to_video(result_img_dir)
Ejemplo n.º 4
0
    print('testing  ', video_name)
    imglist = glob.glob(osp.join(test_LR_dir, video_name, '*.bmp'))
    result_img_dir = osp.join(result_dir, video_name)
    if not osp.exists(result_img_dir):
        os.mkdir(result_img_dir)
    for img_path in imglist:
        img = cv2.imread(img_path)
        img_name = img_path.split('/')[-1]
        HR_img = cv2.resize(img,
                            dsize=None,
                            fx=4,
                            fy=4,
                            interpolation=cv2.INTER_CUBIC)
        cv2.imwrite(osp.join(result_img_dir, img_name), HR_img)

    video_path = frames_to_video(result_img_dir)
    #     embed()
    video_path = y4m2yuv(video_path)
    HR_video_path = osp.join(test_HR_dir,
                             video_name.replace('l', 'h_GT') + '.yuv')
    cmd = 'vmafossexec yuv420p {} {}  {} {} ../../vmaf/model/vmaf_v0.6.1.pkl   --log {}.xml   --psnr --ssim --ms-ssim   --thread 2 --subsample 1'.format(
        1920, 1080, video_path, HR_video_path,
        osp.join(result_dir, video_name))
    os.system(cmd)

results_xml = glob.glob(osp.join(result_dir, '*.xml'))

psnr = 0.0
vmaf = 0.0
ssim = 0.0
ms_ssim = 0.0
Ejemplo n.º 5
0
def test_net(gpu_id, model_path, video_list, timeline, nframes):
    os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(gpu_id)
    device = torch.device('cuda:0')
    model = construct_model(model_path, device)

    for video_name in video_list:
        print('testing  ', video_name)
        imglist = glob.glob(osp.join(test_LR_dir, video_name, '*.bmp'))
        result_img_dir = osp.join(result_dir, video_name)
        if not osp.exists(result_img_dir):
            os.mkdir(result_img_dir)
        for i in range(nframes // 2):
            img = cv2.imread(imglist[i])
            img_name = imglist[i].split('/')[-1]
            img = cv2.resize(img,
                             dsize=None,
                             fx=4,
                             fy=4,
                             interpolation=cv2.INTER_CUBIC)
            cv2.imwrite(osp.join(result_img_dir, img_name), img)

            img = cv2.imread(imglist[-i])
            img_name = imglist[-i].split('/')[-1]
            img = cv2.resize(img,
                             dsize=None,
                             fx=4,
                             fy=4,
                             interpolation=cv2.INTER_CUBIC)
            cv2.imwrite(osp.join(result_img_dir, img_name), img)

        for i in range(len(imglist) - nframes):
            img = []
            for j in range(nframes):
                img.append(cv2.imread(imglist[i + j]) / 255)
            img = np.stack(img, 0).transpose(3, 0, 1, 2)[None]
            img_name = imglist[i + nframes // 2].split('/')[-1]

            h = img.shape[3]
            w = img.shape[4]
            HR_img = np.zeros(shape=(1, 3, h * 4, w * 4))
            start_row = [_ * 64 for _ in range(h // 64)]
            start_row.append(h - 64)
            start_col = [_ * 64 for _ in range(w // 64)]
            start_col.append(w - 64)

            for row in start_row:
                for col in start_col:
                    patch = torch.as_tensor(img[:, :, :, row:row + 64,
                                                col:col + 64]).float().cuda()
                    HR_img[:, :, row * 4:row * 4 + 64 * 4, col * 4:col * 4 +
                           64 * 4] = model(patch).cpu().detach().numpy() * 255

            HR_img = np.squeeze(HR_img).transpose(1, 2, 0)
            HR_img = np.clip(HR_img, 0, 255)
            cv2.imwrite(osp.join(result_img_dir, img_name), HR_img)

        video_path = frames_to_video(result_img_dir)
        #     embed()
        video_path = y4m2yuv(video_path)
        HR_video_path = osp.join(test_HR_dir,
                                 video_name.replace('l', 'h_GT') + '.yuv')
        cmd = 'vmafossexec yuv420p {} {}  {} {} ../../vmaf/model/vmaf_v0.6.1.pkl   --log {}_{}.xml   --psnr --ssim --ms-ssim   --thread 2 --subsample 1'.format(
            1920, 1080, video_path, HR_video_path,
            osp.join(result_dir, video_name), timeline)
        os.system(cmd)
Ejemplo n.º 6
0
def test_net(gpu_id, model_path, video_list, timeline, nframes):
    os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(gpu_id)
    device = torch.device('cuda:0')
    model = construct_model(model_path, device, nframes)

    for video_name in video_list:
        if osp.isfile(osp.join(test_LR_dir, video_name)):
            continue
        print('testing  ', video_name)
        imglist = glob.glob(osp.join(test_LR_dir, video_name, '*.bmp'))
        imglist.sort(key=lambda x: (x.split('/')[-1][:-4]))
        result_img_dir = osp.join(result_dir, video_name)
        if not osp.exists(result_img_dir):
            os.mkdir(result_img_dir)
        for i in range(nframes // 2):
            img = cv2.imread(imglist[i]) / 255
            img_name = imglist[i].split('/')[-1]
            img = cv2.resize(img,
                             dsize=None,
                             fx=4,
                             fy=4,
                             interpolation=cv2.INTER_CUBIC)
            cv2.imwrite(osp.join(result_img_dir, img_name), img * 255)

            img = cv2.imread(imglist[-i]) / 255
            img_name = imglist[-i].split('/')[-1]
            img = cv2.resize(img,
                             dsize=None,
                             fx=4,
                             fy=4,
                             interpolation=cv2.INTER_CUBIC)
            cv2.imwrite(osp.join(result_img_dir, img_name), img * 255)

        for i in range(len(imglist) - nframes):
            img = []
            for j in range(nframes):
                img.append(cv2.imread(imglist[i + j]) / 255)
            img = np.stack(img, 0).transpose(3, 0, 1, 2)[None]
            img_name = imglist[i + nframes // 2].split('/')[-1]

            h = img.shape[3]
            w = img.shape[4]
            HR_img = np.zeros(shape=(1, 3, h * 4, w * 4))
            start_row = [_ * 100 for _ in range(h // 100)]
            start_row.append(h - 100)
            start_col = [_ * 100 for _ in range(w // 100)]
            start_col.append(w - 100)

            for row in start_row:
                for col in start_col:
                    patch = torch.as_tensor(img[:, :, :, row:row + 100,
                                                col:col + 100]).float().cuda()
                    HR_img[:, :, row * 4:row * 4 + 100 * 4, col * 4:col * 4 +
                           100 * 4] = model(patch).cpu().detach().numpy() * 255

            HR_img = np.squeeze(HR_img).transpose(1, 2, 0)
            HR_img = np.clip(HR_img, 0, 255)
            cv2.imwrite(osp.join(result_img_dir, img_name), HR_img)

        video_path = frames_to_video(result_img_dir)
        new_video_path = video_path.split('/')
        new_video_path[-1] = video_name.replace('_l', '_h_Res.y4m')
        os.rename(video_path, osp.join(*new_video_path))
Ejemplo n.º 7
0
def test_net(gpu_id, model_path, video_list, timeline, nframes, patch_size):
    os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(gpu_id)
    device = torch.device('cuda:0')
    model = construct_model(model_path, device)

    for video_name in video_list:
        if osp.isfile(osp.join(test_LR_dir, video_name)):
            continue
        print('testing  ', video_name)
        imglist = glob.glob(osp.join(test_LR_dir, video_name, '*.bmp'))
        imglist.sort(key=lambda x: (x.split('/')[-1][:-4]))
        result_img_dir = osp.join(result_dir, video_name)
        if not osp.exists(result_img_dir):
            os.mkdir(result_img_dir)


#         for i in range(nframes//2):
#             img = cv2.imread(imglist[i]) / 255
#             img_name = imglist[i].split('/')[-1]
#             img = np.stack([img]*nframes,0).transpose(0, 3, 1, 2)[None]
#             HR_img = compute_hr(img, model)
#             cv2.imwrite(osp.join(result_img_dir, img_name), HR_img)

#             img = cv2.imread(imglist[-i]) / 255
#             img_name = imglist[-i].split('/')[-1]
#             img = np.stack([img]*nframes,0).transpose(0, 3, 1, 2)[None]
#             HR_img = compute_hr(img, model)
#             cv2.imwrite(osp.join(result_img_dir, img_name), HR_img)
        if ('200' in video_name) or ('201' in video_name) or (
                '202' in video_name) or ('203'
                                         in video_name) or ('204'
                                                            in video_name):
            space = 1
        else:
            space = 25
        count = 1
        for i in range(0, len(imglist), space):
            img = []
            for j in range(-(nframes // 2), (nframes // 2) + 1, 1):
                index = abs(i +
                            j) if (i +
                                   j) < 100 else 2 * len(imglist) - 1 - (i + j)
                img.append(cv2.imread(imglist[index]) / 255)
            img = np.stack(img, 0).transpose(0, 3, 1, 2)[None]
            img_name = imglist[i].split('/')[-1]

            HR_img = compute_hr(img, patch_size, model)
            img_flip = img[:, ::-1] - np.zeros_like(img)
            HR_img = HR_img + compute_hr(img_flip, patch_size, model)
            img_flip = img[:, :, :, ::-1] - np.zeros_like(img)
            HR_img = HR_img + compute_hr(img_flip, patch_size, model)[::-1]
            img_flip = img[:, :, :, :, ::-1] - np.zeros_like(img)
            HR_img = HR_img + compute_hr(img_flip, patch_size, model)[:, ::-1]

            HR_img = HR_img / 4

            cv2.imwrite(osp.join(result_img_dir,
                                 str(count).zfill(3) + '.bmp'), HR_img)
            count += 1

        video_path = frames_to_video(result_img_dir)
        new_video_path = video_path.split('/')
        new_video_path[-1] = video_name.replace('_l', '_h_Res.y4m')
        os.rename(video_path, osp.join(*new_video_path))
Ejemplo n.º 8
0
def test_net(gpu_id, model_path, video_list, timeline, nframes):
    os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(gpu_id)
    device = torch.device('cuda:0')
    model, single_model = construct_model(model_path, device)

    for video_name in video_list:
        print('testing  ', video_name)
        imglist = glob.glob(osp.join(test_LR_dir, video_name, '*.bmp'))
        result_img_dir = osp.join(result_dir, video_name)
        imglist.sort(key=lambda x: (x.split('/')[-1][:-4]))
        if not osp.exists(result_img_dir):
            os.mkdir(result_img_dir)
        for i in range(nframes // 2):
            img = cv2.imread(imglist[i])
            img_name = imglist[i].split('/')[-1]
            img = torch.as_tensor(img.transpose(2, 0,
                                                1)[None]).float().cuda() / 255
            img = single_model(img).detach().cpu().squeeze().numpy().transpose(
                1, 2, 0) * 255
            cv2.imwrite(osp.join(result_img_dir, img_name), img)

            img = cv2.imread(imglist[-i])
            img_name = imglist[-i].split('/')[-1]
            img = torch.as_tensor(img.transpose(2, 0,
                                                1)[None]).float().cuda() / 255
            img = single_model(img).detach().cpu().squeeze().numpy().transpose(
                1, 2, 0) * 255
            cv2.imwrite(osp.join(result_img_dir, img_name), img)

        for i in range(len(imglist) - nframes):
            img = []
            for j in range(nframes):
                img.append(cv2.imread(imglist[i + j]) / 255)
            img = np.stack(img, -1)
            dis = np.max(
                np.mean((img - img[:, :, :, nframes // 2:nframes // 2 + 1])**2,
                        (0, 1, 2)))
            if dis > 100 / 255**2:
                print("single model")
                HR_img = single_model(
                    torch.as_tensor(img[:, :, :, nframes // 2].transpose(
                        2, 0,
                        1)[None]).float().cuda()).cpu().detach().numpy() * 255
            else:
                print("multi model")
                img = img.reshape(img.shape[0], img.shape[1],
                                  -1).transpose(2, 0, 1)[None]
                HR_img = model(torch.as_tensor(
                    img).float().cuda()).cpu().detach().numpy() * 255
            img_name = imglist[i + nframes // 2].split('/')[-1]
            HR_img = np.squeeze(HR_img).transpose(1, 2, 0)
            HR_img = np.clip(HR_img, 0, 255)
            cv2.imwrite(osp.join(result_img_dir, img_name), HR_img)

        video_path = frames_to_video(result_img_dir)
        #     embed()
        video_path = y4m2yuv(video_path)
        HR_video_path = osp.join(test_HR_dir,
                                 video_name.replace('l', 'h_GT') + '.yuv')
        cmd = 'vmafossexec yuv420p {} {}  {} {} ../../vmaf/model/vmaf_v0.6.1.pkl   --log {}_{}.xml   --psnr --ssim --ms-ssim   --thread 2 --subsample 1'.format(
            1920, 1080, video_path, HR_video_path,
            osp.join(result_dir, video_name), timeline)
        os.system(cmd)