def test_davis_tensorpack_dataflow():
    ds = Davis('/data/public/rw/datasets/videos/davis/trainval', num_frames=4)

    ds = df.MapDataComponent(
        ds,
        lambda images: [cv2.resize(image, (256, 256)) for image in images],
        index=1)
    ds = df.MapDataComponent(
        ds,
        lambda images: [cv2.resize(image, (256, 256)) for image in images],
        index=2)
    ds = df.MapDataComponent(ds,
                             lambda images: np.stack(images, axis=0),
                             index=1)
    ds = df.MapDataComponent(ds,
                             lambda images: np.stack(images, axis=0),
                             index=2)
    ds = df.BatchData(ds, 6)

    ds.reset_state()
    generator = ds.get_data()
    for _ in range(10):
        _, images, annotations = next(generator)
        assert images.shape == (6, 4, 256, 256, 3)
        assert annotations.shape == (6, 4, 256, 256, 3)
def test_davis():
    davis = Davis('/data/public/rw/datasets/videos/davis/trainval')
    assert davis.size() == 6000
    assert davis.names[:5] == [
        'bear', 'bmx-bumps', 'boat', 'boxing-fisheye', 'breakdance-flare'
    ]
    assert davis.names[-5:] == [
        'train', 'tuk-tuk', 'upside-down', 'varanus-cage', 'walking'
    ]
def dataflow(name='davis', scale=1, split='val'):
    if name == 'davis':
        ds = Davis('/data/zubin/videos/davis', name=split, num_frames=1, shuffle=False)
    elif name == 'kinetics':
        ds = Kinetics('/data/public/rw/datasets/videos/kinetics', num_frames=1, skips=[0], shuffle=False)
    else:
        raise Exception('not support dataset %s' % name)

    if name != 'davis':
        ds = df.MapData(ds, lambda dp: [dp[0], dp[1], dp[1]])

    ds = df.MapData(ds, lambda dp: [
        dp[0], # index
        dp[1], # original
        dp[2], # mask
        dp[3], # name
    ])
    feature_size = int(256 * scale)
    size = (feature_size, feature_size)

    ds = df.MapDataComponent(ds, ImageProcess.resize(small_axis=feature_size), index=1)
    ds = df.MapDataComponent(ds, lambda images: cv2.resize(images[0], size), index=2)

    ds = df.MapData(ds, lambda dp: [
        dp[0], # index
        dp[1][0], # original small axis 256 x scale
        cv2.cvtColor(cv2.resize(dp[1][0], size), cv2.COLOR_BGR2GRAY).reshape((size[0], size[1], 1)), # gray (256xscale)x(256xscale)x1
        dp[2], # annotated mask 256xscale x 256xscale
        dp[3], # name
    ])
    ds = df.MultiProcessPrefetchData(ds, nr_prefetch=32, nr_proc=1)
    return ds
def test_davis_generator_with_num_frames():
    davis = Davis('/data/public/rw/datasets/videos/davis/trainval')
    generator = davis.get_data(num_frames=4)
    idx, images, annotations = next(generator)
    assert idx == 0
    assert len(images) == 4
    assert [image.shape for image in images] == [(1080, 1920, 3)] * 4
    assert [image.shape for image in annotations] == [(1080, 1920, 3)] * 4

    for _ in range(81 // 4 - 2):
        next(generator)

    idx, images, annotations = next(generator)
    assert idx == 19
    assert len(images) == 4
    assert [image.shape for image in images] == [(1080, 1920, 3)] * 4
    assert [image.shape for image in annotations] == [(1080, 1920, 3)] * 4

    idx, images, annotations = next(generator)
    assert idx == 0
    assert len(images) == 4
    assert [image.shape for image in images] == [(1080, 1920, 3)] * 4
    assert [image.shape for image in annotations] == [(1080, 1920, 3)] * 4
def test_davis_generator():
    davis = Davis('/data/public/rw/datasets/videos/davis/trainval')
    generator = davis.get_data()
    idx, images, annotations = next(generator)
    assert idx == 0
    assert len(images) == 1
    assert images[0].shape == (1080, 1920, 3)
    assert annotations[0].shape == (1080, 1920, 3)

    for _ in range(81 - 1):
        next(generator)

    idx, images, annotations = next(generator)
    assert idx == 81
    assert len(images) == 1
    assert images[0].shape == (1080, 1920, 3)
    assert annotations[0].shape == (1080, 1920, 3)

    idx, images, annotations = next(generator)
    assert idx == 0
    assert len(images) == 1
    assert images[0].shape == (1080, 1920, 3)
    assert annotations[0].shape == (1080, 1920, 3)
def dataflow(name='davis', scale=1):
    if name == 'davis':
        ds = Davis('/data/public/rw/datasets/videos/davis/trainval',
                   num_frames=1,
                   shuffle=False)
    elif name == 'kinetics':
        ds = Kinetics('/data/public/rw/datasets/videos/kinetics',
                      num_frames=1,
                      skips=[0],
                      shuffle=False)
    else:
        raise Exception('not support dataset %s' % name)

    if name != 'davis':
        ds = df.MapData(ds, lambda dp: [dp[0], dp[1], dp[1]])

    ds = df.MapData(
        ds,
        lambda dp: [
            dp[0],  # index
            dp[1],  # original
            dp[2],  # mask
        ])
    size = (256 * scale, 256 * scale)

    ds = df.MapDataComponent(ds,
                             ImageProcess.resize(small_axis=256 * scale),
                             index=1)
    ds = df.MapDataComponent(ds,
                             lambda images: cv2.resize(images[0], size),
                             index=2)

    ds = df.MapData(
        ds,
        lambda dp: [
            dp[0],  # index
            dp[1][0],  # original
            cv2.cvtColor(cv2.resize(dp[1][0], size), cv2.COLOR_BGR2GRAY).
            reshape((size[0], size[1], 1)),  # gray
            dp[2],  # mask
        ])
    ds = df.MultiProcessPrefetchData(ds, nr_prefetch=32, nr_proc=1)
    return ds