Example #1
0
 def test_run_vmafossexec_nonexist_dis_file(self):
     exe = ExternalProgram.vmafossexec
     cmd = "{exe} yuv420p 576 324 {ref} {dis} {model} > /dev/null 2>&1".format(
         exe=exe, ref=VmafConfig.test_resource_path("yuv", "src01_hrc00_576x324.yuv"),
         dis=VmafConfig.resource_path("yuv", "src01_hrc01_576x324_XXX.yuv"), model=VmafConfig.model_path("other_models", "vmaf_v0.6.0.json"))
     ret = subprocess.call(cmd, shell=True)
     self.assertEqual(ret, self.RC_ARGUMENT_ISSUE)
Example #2
0
 def setUp(self):
     self.raw_dataset_filepath = VmafConfig.resource_path(
         "dataset", "NFLX_dataset_public_raw.py")
     self.derived_dataset_path = VmafConfig.workdir_path(
         "test_derived_dataset.py")
     self.derived_dataset_path_pyc = VmafConfig.workdir_path(
         "test_derived_dataset.pyc")
Example #3
0
def main():

    dataset_filepaths = [
        VmafConfig.resource_path('dataset', 'NFLX_dataset_public_raw_last4outliers.py'),
        VmafConfig.resource_path('dataset', 'VQEGHD3_dataset_raw.py'),
    ]

    # ============ sample results =================

    subjective_model_classes = [
        MaximumLikelihoodEstimationModel,
        MosModel,

        # MaximumLikelihoodEstimationDmosModel,
        # DmosModel,
    ]

    # plot_sample_results(dataset_filepaths, subjective_model_classes)

    # ============ plot trends =================

    # ===== datasize growth =====
    # run_datasize_growth(dataset_filepaths)

    # ===== corrpution growth =====
    run_subject_corruption_growth(dataset_filepaths)
    # run_random_corruption_growth(dataset_filepaths)

    # run_subject_partial_corruption_growth(dataset_filepaths)

    # ===== random missing growth =====
    # run_missing_growth(dataset_filepaths)

    # ===== synthetic data =====
    # validate_with_synthetic_dataset()

    plt.show()
Example #4
0
def validate_with_synthetic_dataset():

    # use the dataset_filepath only for its dimensions and reference video mapping
    dataset_filepath = VmafConfig.resource_path('dataset', 'NFLX_dataset_public_raw_last4outliers.py')
    np.random.seed(0)
    _validate_with_synthetic_dataset(
        subjective_model_classes=[
            MaximumLikelihoodEstimationModel
        ],
        dataset_filepath=dataset_filepath,
        synthetic_result={
            'quality_scores': np.random.uniform(1, 5, 79),
            'observer_bias': np.random.normal(0, 1, 30),
            'observer_inconsistency': np.abs(np.random.uniform(0.0, 0.4, 30)),
            'content_bias': np.random.normal(0, 0.00001, 9),
            'content_ambiguity': np.abs(np.random.uniform(0.4, 0.6, 9)),
        }
    )
def main():

    subsamples = [1, 2, 4, 8, 16, 32, 64, 128]
    elapsed_times = []
    pccs = []
    for subsample in subsamples:
        elapsed_time, srcc, pcc, rmse = run_vmafossexec_with_subsample(
            VmafConfig.resource_path('dataset', 'NFLX_dataset_public.py'), subsample)
        elapsed_times.append(elapsed_time)
        pccs.append(pcc)
        print("SRCC: {}, PCC: {}, RMSE: {}, time: {}".format(srcc, pcc, rmse, elapsed_time))

    fig, ax = plt.subplots(1, 1, figsize=[8, 5])
    ax.plot(subsamples, 6*24*79 / np.array(elapsed_times), 'x-')
    ax.set_xlabel("Subsample")
    ax.set_ylabel("Processing Speed (Frms/Sec)")
    ax.grid(True)

    plt.tight_layout()

    DisplayConfig.show()
dataset_name = 'example_image'

yuv_fmt = 'notyuv'
quality_width = 1920
quality_height = 1080
workfile_yuv_type = 'yuv444p'

from vmaf.config import VmafConfig

ref_videos = [
    {
        'content_id': 0,
        'path': VmafConfig.resource_path('icpf', 'frame00000001.icpf')
    },
    {
        'content_id': 1,
        'path': VmafConfig.resource_path('icpf', 'frame00000002.icpf')
    },
]

dis_videos = [
    {
        'content_id': 0,
        'asset_id': 0,
        'dmos': 100,
        'path': VmafConfig.resource_path('icpf', 'frame00000000.icpf')
    },  # ref
    {
        'content_id': 0,
        'asset_id': 1,
        'dmos': 50,
Example #7
0
dataset_name = 'example'

quality_width = 1920
quality_height = 1080

from vmaf.config import VmafConfig

ref_videos = [
    {
        'content_id':
        0,
        'path':
        VmafConfig.resource_path('yuv', 'checkerboard_1920_1080_10_3_0_0.yuv'),
        'yuv_fmt':
        'yuv420p'
    },
]

dis_videos = [
    {
        'content_id':
        0,
        'asset_id':
        0,
        'dmos':
        100,
        'path':
        VmafConfig.resource_path('yuv', 'checkerboard_1920_1080_10_3_0_0.yuv'),
        'fps':
        30,
        'rebuf_indices': [0, 4, 15]
Example #8
0
from vmaf.config import VmafConfig
from vmaf.core.executor import run_executors_in_parallel
from vmaf.core.raw_extractor import DisYUVRawVideoExtractor
from vmaf.core.nn_train_test_model import ToddNoiseClassifierTrainTestModel
from vmaf.routine import read_dataset
from vmaf.tools.misc import import_python_file

# parameters
num_train = 500
num_test = 50
n_epochs = 30
seed = 0  # None

# read input dataset
dataset_path = VmafConfig.resource_path('dataset', 'BSDS500_noisy_dataset.py')
dataset = import_python_file(dataset_path)
assets = read_dataset(dataset)

# shuffle assets
np.random.seed(seed)
np.random.shuffle(assets)
assets = assets[:(num_train + num_test)]

raw_video_h5py_filepath = VmafConfig.workdir_path('rawvideo.hdf5')
raw_video_h5py_file = DisYUVRawVideoExtractor.open_h5py_file(
    raw_video_h5py_filepath)

print '======================== Extract raw YUVs =============================='

_, raw_yuvs = run_executors_in_parallel(
from vmaf.config import VmafConfig

dataset_name = 'test_image'
yuv_fmt = 'yuv444p'

ref_videos = [
 {'content_id': 0,
  'content_name': '100007',
  'height': 321,
  'path': VmafConfig.resource_path('test_image_yuv', '100007.yuv'),
  'width': 481},
 {'content_id': 1,
  'content_name': '100039',
  'height': 321,
  'path': VmafConfig.resource_path('test_image_yuv', '100039.yuv'),
  'width': 481},
 {'content_id': 2,
  'content_name': '100075',
  'height': 321,
  'path': VmafConfig.resource_path('test_image_yuv', '100075.yuv'),
  'width': 481},
 {'content_id': 4,
  'content_name': '100098',
  'height': 321,
  'path': VmafConfig.resource_path('test_image_yuv', '100098.yuv'),
  'width': 481},
 {'content_id': 5,
  'content_name': '100099',
  'height': 321,
  'path': VmafConfig.resource_path('test_image_yuv', '100099.yuv'),
  'width': 481},
Example #10
0
dataset_name = 'example'

quality_width = 1920
quality_height = 1080

from vmaf.config import VmafConfig

ref_videos = [
    {
        'content_id':
        0,
        'path':
        VmafConfig.resource_path('yuv', 'checkerboard_1920_1080_10_3_0_0.yuv'),
        'yuv_fmt':
        'yuv420p',
        'width':
        1920,
        'height':
        1080
    },
    {
        'content_id': 1,
        'path': VmafConfig.resource_path('yuv', 'flat_1920_1080_0.yuv'),
        'yuv_fmt': 'yuv420p',
        'width': 720,
        'height': 480
    },
]

dis_videos = [
    {
__copyright__ = "Copyright 2016-2017, Netflix, Inc."
__license__ = "Apache, Version 2.0"

import matplotlib.pyplot as plt
import numpy as np

from vmaf.config import VmafConfig
from vmaf.routine import run_vmaf_cv, run_vmaf_kfold_cv

if __name__ == '__main__':

    # ==== Run simple cross validation: one training and one testing dataset ====

    run_vmaf_cv(
        train_dataset_filepath=VmafConfig.resource_path('dataset', 'NFLX_dataset_public.py'),
        test_dataset_filepath=VmafConfig.resource_path('dataset', 'VQEGHD3_dataset.py'),
        param_filepath=VmafConfig.resource_path('param', 'vmaf_v3.py'),
        output_model_filepath=VmafConfig.workspace_path('model', 'test_model1.pkl'),
    )

    # ==== Run cross validation across genres (tough test) ====

    nflx_dataset_path = VmafConfig.resource_path('dataset', 'NFLX_dataset_public.py')
    contentid_groups = [
        [0, 5], # cartoon: BigBuckBunny, FoxBird
        [1], # CG: BirdsInCage
        [2, 6, 7], # complex: CrowdRun, OldTownCross, Seeking
        [3, 4], # ElFuente: ElFuente1, ElFuente2
        [8], # sports: Tennis
    ]
    param_filepath = VmafConfig.resource_path('param', 'vmaf_v3.py')
dataset_name = 'example'

yuv_fmt = 'yuv420p'
width = 576
height = 324
quality_width = 576
quality_height = 324

from vmaf.config import VmafConfig

ref_videos = [
    {'content_id': 0, 'path': VmafConfig.resource_path('yuv', 'src01_hrc00_576x324.yuv')},
]

dis_videos = [
    {'content_id': 0, 'asset_id': 1, 'dmos': 50, 'path': VmafConfig.resource_path('yuv', 'src01_hrc01_576x324.yuv'), 'crop_cmd': '288:162:144:81'},
    {'content_id': 0, 'asset_id': 2, 'dmos': 49, 'path': VmafConfig.resource_path('yuv', 'src01_hrc01_576x324.yuv'), 'pad_cmd': 'iw+100:ih+100:50:50'},
    {'content_id': 0, 'asset_id': 3, 'dmos': 48, 'path': VmafConfig.resource_path('yuv', 'src01_hrc01_576x324.yuv'), 'crop_cmd': '288:162:144:81', 'pad_cmd': 'iw+288:ih+162:144:81',},
]
Example #13
0
__copyright__ = "Copyright 2016-2019, Netflix, Inc."
__license__ = "Apache, Version 2.0"

import matplotlib.pyplot as plt
import numpy as np

from vmaf.config import VmafConfig, DisplayConfig
from vmaf.routine import run_vmaf_cv, run_vmaf_kfold_cv

if __name__ == '__main__':

    # ==== Run simple cross validation: one training and one testing dataset ====

    run_vmaf_cv(
        train_dataset_filepath=VmafConfig.resource_path(
            'dataset', 'NFLX_dataset_public.py'),
        test_dataset_filepath=VmafConfig.resource_path('dataset',
                                                       'VQEGHD3_dataset.py'),
        param_filepath=VmafConfig.resource_path('param', 'vmaf_v3.py'),
        output_model_filepath=VmafConfig.workspace_path(
            'model', 'test_model1.pkl'),
    )

    # ==== Run cross validation across genres (tough test) ====

    nflx_dataset_path = VmafConfig.resource_path('dataset',
                                                 'NFLX_dataset_public.py')
    contentid_groups = [
        [0, 5],  # cartoon: BigBuckBunny, FoxBird
        [1],  # CG: BirdsInCage
        [2, 6, 7],  # complex: CrowdRun, OldTownCross, Seeking
Example #14
0
dataset_name = 'example'

from vmaf.config import VmafConfig

ref_videos = [
    {'content_id': 0, 'path': VmafConfig.resource_path('yuv', 'checkerboard_1920_1080_10_3_0_0.yuv'), 'width': 1920, 'height': 1080, 'yuv_fmt': 'yuv420p'},

    {'content_id': 1, 'path': VmafConfig.resource_path('yuv', 'flat_1280_720_0.yuv'), 'width': 1280, 'height': 720, 'yuv_fmt': 'yuv420p10le'},
]

dis_videos = [
    {'content_id': 0, 'asset_id': 0, 'groundtruth': 100, 'path': VmafConfig.resource_path('yuv', 'checkerboard_1920_1080_10_3_0_0.yuv')}, # ref
    {'content_id': 0, 'asset_id': 1, 'groundtruth': 50, 'path': VmafConfig.resource_path('yuv', 'checkerboard_1920_1080_10_3_1_0.yuv')},

    {'content_id': 1, 'asset_id': 2, 'groundtruth': 100, 'path': VmafConfig.resource_path('yuv', 'flat_1280_720_0.yuv')}, # ref
    {'content_id': 1, 'asset_id': 3, 'groundtruth': 80, 'path': VmafConfig.resource_path('yuv', 'flat_1280_720_10.yuv')},
]
from vmaf.config import VmafConfig

dataset_name = 'test_image'
yuv_fmt = 'yuv444p'

dataset_dir = VmafConfig.resource_path('test_image_yuv')

quality_width = 200
quality_height = 100
resampling_type = 'bicubic'

ref_videos = [
 {'content_id': 0,
  'content_name': '100007',
  'height': 321,
  'path': dataset_dir + '/100007.yuv',
  'width': 481},
 {'content_id': 1,
  'content_name': '100039',
  'height': 321,
  'path': dataset_dir + '/100039.yuv',
  'width': 481},
 {'content_id': 2,
  'content_name': '100075',
  'height': 321,
  'path': dataset_dir + '/100075.yuv',
  'width': 481},
 {'content_id': 4,
  'content_name': '100098',
  'height': 321,
  'path': dataset_dir + '/100098.yuv',
Example #16
0
def main():
    # parameters
    num_train = 500
    num_test = 50
    n_epochs = 30
    seed = 0  # None

    # read input dataset
    dataset_path = VmafConfig.resource_path('dataset',
                                            'BSDS500_noisy_dataset.py')
    dataset = import_python_file(dataset_path)
    assets = read_dataset(dataset)

    # shuffle assets
    np.random.seed(seed)
    np.random.shuffle(assets)
    assets = assets[:(num_train + num_test)]

    raw_video_h5py_filepath = VmafConfig.workdir_path('rawvideo.hdf5')
    raw_video_h5py_file = DisYUVRawVideoExtractor.open_h5py_file(
        raw_video_h5py_filepath)

    print(
        '======================== Extract raw YUVs =============================='
    )

    _, raw_yuvs = run_executors_in_parallel(
        DisYUVRawVideoExtractor,
        assets,
        fifo_mode=True,
        delete_workdir=True,
        parallelize=False,  # CAN ONLY USE SERIAL MODE FOR DisYRawVideoExtractor
        result_store=None,
        optional_dict=None,
        optional_dict2={'h5py_file': raw_video_h5py_file})

    patch_h5py_filepath = VmafConfig.workdir_path('patch.hdf5')
    patch_h5py_file = ToddNoiseClassifierTrainTestModel.open_h5py_file(
        patch_h5py_filepath)
    model = ToddNoiseClassifierTrainTestModel(
        param_dict={
            'seed': seed,
            'n_epochs': n_epochs,
        },
        logger=None,
        optional_dict2={ # for options that won't impact the result
            # 'checkpoints_dir': VmafConfig.workspace_path('checkpoints_dir'),
            'h5py_file': patch_h5py_file,
        })

    print(
        '============================ Train model ==============================='
    )
    xys = ToddNoiseClassifierTrainTestModel.get_xys_from_results(
        raw_yuvs[:num_train])
    model.train(xys)

    print(
        '=========================== Evaluate model ============================='
    )
    xs = ToddNoiseClassifierTrainTestModel.get_xs_from_results(
        raw_yuvs[num_train:])
    ys = ToddNoiseClassifierTrainTestModel.get_ys_from_results(
        raw_yuvs[num_train:])
    result = model.evaluate(xs, ys)

    print("")
    print("f1 test %g, errorrate test %g" %
          (result['f1'], result['errorrate']))

    # tear down
    DisYUVRawVideoExtractor.close_h5py_file(raw_video_h5py_file)
    ToddNoiseClassifierTrainTestModel.close_h5py_file(patch_h5py_file)
    os.remove(raw_video_h5py_filepath)
    os.remove(patch_h5py_filepath)

    print('Done.')
Example #17
0
 def setUp(self):
     self.raw_dataset_filepath = VmafConfig.resource_path("dataset", "NFLX_dataset_public_raw.py")
     self.derived_dataset_path = VmafConfig.workdir_path("test_derived_dataset.py")
     self.derived_dataset_path_pyc = VmafConfig.workdir_path("test_derived_dataset.pyc")
from vmaf.config import VmafConfig
from vmaf.core.executor import run_executors_in_parallel
from vmaf.core.raw_extractor import DisYUVRawVideoExtractor
from vmaf.core.nn_train_test_model import ToddNoiseClassifierTrainTestModel
from vmaf.routine import read_dataset
from vmaf.tools.misc import import_python_file


# parameters
num_train = 500
num_test = 50
n_epochs = 30
seed = 0 # None

# read input dataset
dataset_path = VmafConfig.resource_path('dataset', 'BSDS500_noisy_dataset.py')
dataset = import_python_file(dataset_path)
assets = read_dataset(dataset)

# shuffle assets
np.random.seed(seed)
np.random.shuffle(assets)
assets = assets[:(num_train + num_test)]

raw_video_h5py_filepath = VmafConfig.workdir_path('rawvideo.hdf5')
raw_video_h5py_file = DisYUVRawVideoExtractor.open_h5py_file(raw_video_h5py_filepath)

print '======================== Extract raw YUVs =============================='

_, raw_yuvs = run_executors_in_parallel(
    DisYUVRawVideoExtractor,