def test_pngs():
    images_luminance = io.video2luminance(video_file, nimages=100)
    images_luminance_resized = io.video2luminance(video_file,
                                                  size=small_size,
                                                  nimages=100)

    # Convert the first 100 video frames to PNGs
    video_buffer = io.video_buffer(video_file, nimages=100)

    # store frames in temporary directory
    tmpdir = tempfile.mkdtemp()

    for frameidx, video_frame in enumerate(video_buffer):
        image_object = PIL.Image.fromarray(video_frame)
        image_object.save(os.path.join(tmpdir, 'frame%08i.png' % frameidx))

    image_files = sorted(glob(os.path.join(tmpdir, '*.png')))
    files_luminance = io.load_image_luminance(image_files)

    files_luminance_resized = io.load_image_luminance(image_files,
                                                      vdim=96,
                                                      hdim=int(96 *
                                                               aspect_ratio))

    assert np.allclose(files_luminance, images_luminance)
    assert np.allclose(files_luminance_resized, images_luminance_resized)
def test_video2luminance():
    # test video2luminance generator
    nimages = 256
    video_buffer = io.video_buffer(video_file, nimages=nimages)
    # load and downsample 1000 images
    aspect_ratio = 16 / 9.0
    small_size = (96, int(96 * aspect_ratio))

    luminance_images = np.asarray([io.imagearray2luminance(image, size=small_size).squeeze() \
                                   for image in video_buffer])

    lum = io.video2luminance(video_file, size=small_size, nimages=nimages)
    assert np.allclose(luminance_images, lum)
Beispiel #3
0
def test_smoke_test():

    # high accuracy for test
    DTYPE = np.float64
    temp_freq = 4.0

    video_file = 'http://anwarnunez.github.io/downloads/avsnr150s24fps_tiny.mp4'
    nimages = 200
    small_size = (72, 128)  # downsampled size (vdim, hdim) 16:9 aspect ratio
    luminance_images = io.video2luminance(video_file,
                                          size=small_size,
                                          nimages=nimages)
    #luminance_images = io.video2grey(video_file, size=small_size, nimages=nimages)
    nimages, vdim, hdim = luminance_images.shape

    stimulus_fps = 24
    aspect_ratio = hdim / vdim

    pyramid = pyramids.MotionEnergyPyramid(
        stimulus_vhsize=(vdim, hdim),
        stimulus_fps=stimulus_fps,
        spatial_frequencies=[8],  # only small filters
        temporal_frequencies=[temp_freq],  # only one temporal freq
        filter_temporal_width=16)
    print(pyramid)

    # centered projection
    # vhsize=(0,0): top left
    location_filters = {
        'BL': [pyramid.filters_at_vhposition(0.9, 0.1)[0]],
        'BR': [pyramid.filters_at_vhposition(0.9, 0.9 * aspect_ratio)[0]],
        'TL': [pyramid.filters_at_vhposition(0.1, 0.1)[0]],
        'TR': [pyramid.filters_at_vhposition(0.1, 0.9 * aspect_ratio)[0]]
    }

    # normal output
    output = {}
    for location, filters in sorted(location_filters.items()):
        output[location] = pyramid.raw_project_stimulus(luminance_images,
                                                        filters,
                                                        dtype=DTYPE)

    vdim, hdim = small_size
    vhalf, hhalf = int(vdim / 2), int(hdim / 2)
    # vdim (vstart, vend)
    # hdim (hstart, hend)

    blank_squares = {
        'BL': ((-vhalf, None), (0, hhalf)),
        'BR': ((-vhalf, None), (hhalf, None)),
        'TL': ((0, vhalf), (0, hhalf)),
        'TR': ((0, vhalf), (hhalf, None))
    }

    tcenter = int(nimages / 2)
    controls = {}
    for location, filters in sorted(location_filters.items()):
        fig, ax = plt.subplots()
        movie = luminance_images.copy()
        movie /= movie.max()
        (vstart, vend), (hstart, hend) = blank_squares[location]
        movie[:, vstart:vend, hstart:hend] = 0.0
        # insert spike in the middle
        movie[tcenter, vstart:vend, hstart:hend] = 100
        ax.matshow(movie[0])
        ax.set_title(location)

        filters = location_filters[location]
        controls[location] = pyramid.raw_project_stimulus(
            movie,
            filters,
            dtype=DTYPE,
        )
        pyramid.show_filter(filters[0])

    fig, ax = plt.subplots(nrows=len(location_filters),
                           sharey=True,
                           sharex=True)
    time = np.arange(nimages) * (1. / stimulus_fps)
    for idx, (location,
              filters) in enumerate(sorted(location_filters.items())):
        ax[idx].plot(time, output[location][0], ':')
        ax[idx].plot(time, controls[location][0], '-')

        ax[idx].set_title(location)
        ax[idx].vlines(tcenter / stimulus_fps, -1000, 1000)
        ax[idx].set_ylim(-200, 200)
        ax[idx].grid(True)

    info = (tcenter / stimulus_fps, tcenter, temp_freq)
    fig.suptitle('spike at %0.02f [sec] (frame #%i) filter=%0.02f[Hz]' % info)
Beispiel #4
0
    pyramids,
    utils,
    core,
    io,
)

DTYPE = np.float64

##############################
# preliminaries
##############################
video_file = 'http://anwarnunez.github.io/downloads/avsnr150s24fps_tiny.mp4'
nimages = 200
small_size = (72, 128)  # downsampled size (vdim, hdim) 16:9 aspect ratio
luminance_images = io.video2luminance(video_file,
                                      size=small_size,
                                      nimages=nimages)
nimages, vdim, hdim = luminance_images.shape
stimulus_fps = 24
aspect_ratio = hdim / vdim

pyramid = pyramids.MotionEnergyPyramid(
    stimulus_vhsize=(vdim, hdim),
    stimulus_fps=stimulus_fps,
    spatial_frequencies=[8],  # only small filters
    temporal_frequencies=[12],  # only one temporal freq
    filter_temporal_width=16)
print(pyramid)

# centered projection
# vhsize=(0,0): top left