Beispiel #1
0
def test_VideoStimulus_shift():
    # Create a horizontal bar:
    shape = (5, 5, 3)
    ndarray = np.zeros(shape, dtype=np.uint8)
    ndarray[2, :, :] = 255
    stim = VideoStimulus(ndarray)
    # Top row:
    top = stim.shift(0, -2)
    data = top.data.reshape(top.vid_shape)
    for i in range(data.shape[-1]):
        npt.assert_almost_equal(top.data.reshape(stim.vid_shape)[0, :, i], 1)
        npt.assert_almost_equal(top.data.reshape(stim.vid_shape)[1:, :, i], 0)
    # Bottom row:
    bottom = stim.shift(0, 2)
    data = bottom.data.reshape(bottom.vid_shape)
    for i in range(data.shape[-1]):
        npt.assert_almost_equal(
            bottom.data.reshape(stim.vid_shape)[:4, :, i], 0)
        npt.assert_almost_equal(
            bottom.data.reshape(stim.vid_shape)[4, :, i], 1)
    # Bottom right pixel:
    bottom = stim.shift(4, 2)
    data = bottom.data.reshape(bottom.vid_shape)
    for i in range(data.shape[-1]):
        npt.assert_almost_equal(
            bottom.data.reshape(stim.vid_shape)[4, 4, i], 1)
        npt.assert_almost_equal(
            bottom.data.reshape(stim.vid_shape)[:4, :, i], 0)
        npt.assert_almost_equal(
            bottom.data.reshape(stim.vid_shape)[:, :4, i], 0)
Beispiel #2
0
def test_VideoStimulus():
    # Create a dummy video:
    fname = 'test.mp4'
    shape = (10, 32, 48)
    ndarray = np.random.rand(*shape)
    mimwrite(fname, (255 * ndarray).astype(np.uint8), fps=1)
    stim = VideoStimulus(fname)
    npt.assert_equal(stim.shape, (np.prod(shape[1:]), shape[0]))
    npt.assert_almost_equal(stim.data,
                            ndarray.reshape((shape[0], -1)).transpose(),
                            decimal=1)
    npt.assert_equal(stim.metadata['source'], fname)
    npt.assert_equal(stim.metadata['source_size'], (shape[2], shape[1]))
    npt.assert_equal(stim.time, np.arange(shape[0]))
    npt.assert_equal(stim.electrodes, np.arange(np.prod(shape[1:])))
    os.remove(fname)

    # Resize the video:
    ndarray = np.ones(shape)
    mimwrite(fname, (255 * ndarray).astype(np.uint8), fps=1)
    resize = (16, 32)
    stim = VideoStimulus(fname, resize=resize)
    npt.assert_equal(stim.shape, (np.prod(resize), shape[0]))
    npt.assert_almost_equal(stim.data,
                            np.ones((np.prod(resize), shape[0])),
                            decimal=1)
    npt.assert_equal(stim.metadata['source'], fname)
    npt.assert_equal(stim.metadata['source_size'], (shape[2], shape[1]))
    npt.assert_equal(stim.time, np.arange(shape[0]))
    npt.assert_equal(stim.electrodes, np.arange(np.prod(resize)))
    os.remove(fname)
Beispiel #3
0
def test_VideoStimulus_rotate():
    # Create a horizontal bar:
    shape = (5, 5, 3)
    ndarray = np.zeros(shape, dtype=np.uint8)
    ndarray[2, :, :] = 255
    stim = VideoStimulus(ndarray)
    # Vertical line:
    vert = stim.rotate(90, mode='constant')
    data = vert.data.reshape(vert.vid_shape)
    for i in range(data.shape[-1]):
        npt.assert_almost_equal(data[:, 0, i], 0)
        npt.assert_almost_equal(data[:, 1, i], 0)
        npt.assert_almost_equal(data[:, 2, i], 1)
        npt.assert_almost_equal(data[:, 3, i], 0)
        npt.assert_almost_equal(data[:, 4, i], 0)
    # Diagonal, bottom-left to top-right:
    diag = stim.rotate(45, mode='constant')
    data = diag.data.reshape(diag.vid_shape)
    for i in range(data.shape[-1]):
        npt.assert_almost_equal(data[1, 3, i], 1)
        npt.assert_almost_equal(data[2, 2, i], 1)
        npt.assert_almost_equal(data[3, 1, i], 1)
        npt.assert_almost_equal(data[0, 0, i], 0)
        npt.assert_almost_equal(data[4, 4, i], 0)
    # Diagonal, top-left to bottom-right:
    diag = stim.rotate(-45, mode='constant')
    data = diag.data.reshape(diag.vid_shape)
    for i in range(data.shape[-1]):
        npt.assert_almost_equal(data[1, 1, i], 1)
        npt.assert_almost_equal(data[2, 2, i], 1)
        npt.assert_almost_equal(data[3, 3, i], 1)
        npt.assert_almost_equal(data[0, 4, i], 0)
        npt.assert_almost_equal(data[4, 0, i], 0)
Beispiel #4
0
def test_ImageStimulus_center():
    # Create a horizontal bar:
    ndarray = np.zeros((5, 5, 3), dtype=np.uint8)
    ndarray[2, :, :] = 255
    # Center phosphene:
    stim = VideoStimulus(ndarray)
    npt.assert_almost_equal(stim.data, stim.center().data)
    npt.assert_almost_equal(stim.data, stim.shift(0, 2).center().data)
Beispiel #5
0
def test_VideoStimulus_apply():
    fname = 'test.mp4'
    shape = (10, 32, 48)
    gray = 129 / 255.0
    ndarray = np.ones(shape) * gray
    mimwrite(fname, (255 * ndarray).astype(np.uint8), fps=1)
    stim = VideoStimulus(fname, as_gray=True)

    applied = stim.apply(lambda x: 0.5 * x)
    npt.assert_almost_equal(applied.data, stim.data * 0.5)
Beispiel #6
0
def test_VideoStimulus_invert():
    fname = 'test.mp4'
    shape = (10, 32, 48, 3)
    gray = 129 / 255.0
    ndarray = np.ones(shape) * gray
    mimwrite(fname, (255 * ndarray).astype(np.uint8), fps=1)
    stim = VideoStimulus(fname)
    npt.assert_almost_equal(stim.data, gray)
    npt.assert_almost_equal(stim.invert().data, 1 - gray)
    # Inverting does not change the original object:
    npt.assert_almost_equal(stim.data, gray)
    os.remove(fname)
Beispiel #7
0
def test_ProsthesisSystem_reshape_stim(rot, gtype, n_frames):
    implant = ProsthesisSystem(ElectrodeGrid((10, 10), 30, rot=rot,
                                             type=gtype))
    # Smoke test the automatic reshaping:
    n_px = 21
    implant.stim = ImageStimulus(np.ones((n_px, n_px, n_frames)).squeeze())
    npt.assert_equal(implant.stim.data.shape, (implant.n_electrodes, 1))
    npt.assert_equal(implant.stim.time, None)
    implant.stim = VideoStimulus(np.ones((n_px, n_px, 3 * n_frames)),
                                 time=2 * np.arange(3 * n_frames))
    npt.assert_equal(implant.stim.data.shape,
                     (implant.n_electrodes, 3 * n_frames))
    npt.assert_equal(implant.stim.time, 2 * np.arange(3 * n_frames))

    # Verify that a horizontal stimulus will always appear horizontally, even if
    # the device is rotated:
    data = np.zeros((50, 50))
    data[20:-20, 10:-10] = 1
    implant.stim = ImageStimulus(data)
    model = ScoreboardModel(xrange=(-1, 1),
                            yrange=(-1, 1),
                            rho=30,
                            xystep=0.02)
    model.build()
    percept = label(model.predict_percept(implant).data.squeeze().T > 0.2)
    npt.assert_almost_equal(regionprops(percept)[0].orientation, 0, decimal=1)
Beispiel #8
0
def test_VideoStimulus_rgb2gray():
    fname = 'test.mp4'
    shape = (10, 32, 48, 3)
    gray = 129 / 255.0
    ndarray = np.ones(shape) * gray
    mimwrite(fname, (255 * ndarray).astype(np.uint8), fps=1)
    stim = VideoStimulus(fname, as_gray=True)

    # Gray levels are between 0 and 1, and can be inverted:
    stim_rgb = VideoStimulus(fname)
    stim_gray = stim_rgb.rgb2gray()
    npt.assert_almost_equal(stim_gray.data, gray)
    npt.assert_equal(stim_gray.vid_shape, (shape[1], shape[2], shape[0]))
    # Original stim unchanged:
    npt.assert_equal(stim_rgb.vid_shape,
                     (shape[1], shape[2], shape[3], shape[0]))
    os.remove(fname)
Beispiel #9
0
def test_ImageStimulus_scale():
    # Create a horizontal bar:
    ndarray = np.zeros((5, 5, 3), dtype=np.uint8)
    ndarray[2, :, :] = 255
    stim = VideoStimulus(ndarray)
    npt.assert_almost_equal(stim.data, stim.scale(1).data)
    for i in range(stim.shape[-1]):
        npt.assert_almost_equal(stim.scale(0.1)[12, i], 1)
        npt.assert_almost_equal(stim.scale(0.1)[:12, i], 0)
        npt.assert_almost_equal(stim.scale(0.1)[13:, i], 0)
    with pytest.raises(ValueError):
        stim.scale(0)
def test_VideoStimulus_encode():
    stim = VideoStimulus(np.random.rand(4, 5, 6))

    # Amplitude encoding in default range:
    enc = stim.encode()
    npt.assert_almost_equal(enc.time[-1], 6000)
    npt.assert_almost_equal(enc.data[:, 4::7].min(), 0)
    npt.assert_almost_equal(enc.data[:, 4::7].max(), 50)

    # Amplitude encoding in custom range:
    enc = stim.encode(amp_range=(2, 43))
    npt.assert_almost_equal(enc.time[-1], 6000)
    npt.assert_almost_equal(enc.data[:, 4::7].min(), 2)
    npt.assert_almost_equal(enc.data[:, 4::7].max(), 43)

    with pytest.raises(TypeError):
        stim.encode(pulse={'invalid': 1})
    with pytest.raises(ValueError):
        stim.encode(pulse=BostonTrain())
Beispiel #11
0
def test_VideoStimulus_resize():
    fname = 'test.mp4'
    shape = (10, 32, 48)
    gray = 129 / 255.0
    ndarray = np.ones(shape) * gray
    mimwrite(fname, (255 * ndarray).astype(np.uint8), fps=1)
    # Gray levels are between 0 and 1, and can be inverted:
    stim = VideoStimulus(fname)
    npt.assert_almost_equal(stim.data, gray)
    npt.assert_equal(stim.resize((13, -1)).vid_shape, (13, 19, 3, 10))
    # Resize with one dimension -1:
    npt.assert_equal(stim.resize((-1, 24)).vid_shape, (16, 24, 3, 10))
    with pytest.raises(ValueError):
        stim.resize((-1, -1))
    os.remove(fname)
Beispiel #12
0
def test_VideoStimulus_encode():
    stim = VideoStimulus(np.random.rand(4, 5, 6))

    # Amplitude encoding in default range:
    enc = stim.encode()
    npt.assert_almost_equal(enc.time[-1], 6000)
    # The positions we check depends on the encoding of the pulse! First element
    # is always zero, second and third are negative phase, etc.
    npt.assert_almost_equal(np.abs(enc.data[:, ::8]).min(), 0)
    npt.assert_almost_equal(enc.data[:, 1::8].min(), -50)
    npt.assert_almost_equal(enc.data[:, 4::8].max(), 50)

    # Amplitude encoding in custom range:
    enc = stim.encode(amp_range=(2, 43))
    npt.assert_almost_equal(enc.time[-1], 6000)
    npt.assert_almost_equal(np.abs(enc.data[:, ::8]).min(), 0)
    npt.assert_almost_equal(enc.data[:, 1::8].min(), -43)
    npt.assert_almost_equal(enc.data[:, 4::8].max(), 43)
    npt.assert_almost_equal(enc.data[:, 4::8].min(), 2)

    with pytest.raises(TypeError):
        stim.encode(pulse={'invalid': 1})
    with pytest.raises(ValueError):
        stim.encode(pulse=BostonTrain())
Beispiel #13
0
def fetch_han2021(videos=None,
                  resize=None,
                  as_gray=None,
                  data_path=None,
                  download_if_missing=True):
    """Load the original videos of outdoor scenes from [Han2021]_

    Download the original videos or simulated prosthetic vision of outdoor
    scenes described in [Han2021]_ from https://osf.io/pf2ja/ (303MB) to 
    ``data_path``. 
    By default, all datasets are stored in '~/pulse2percept_data/', but a 
    different path can be specified.

    ===================   =====================
    Number of videos:                        20
    Number of frames:                125 or 126
    Frame size (px):                  320 x 180
    ===================   =====================

    Each :py:class:`~p2p.stimuli.VideoStimulus` object contains a `metadata`
    dict with the following fields:

    ====================  ================================================
    plugin                FFMPEG
    ffmpeg_version        FFMPEG version
    codec                 FFMPEG codec
    pix_fmt               pixel format
    nframes               Number of frames
    duration              Movie duration (seconds)
    fps                   Frame rate (frames per second)
    source                File name of original video (before downscaling)
    source_size           Original image size (before downscaling)
    source_shape          Original video shape (before downscaling)
    size                  Actual image size (after downscaling)
    rotate                Rotation angle
    ====================  ================================================

    .. versionadded:: 0.9

    Parameters
    ----------
    videos: str | list of strings | None, optional
        Video names you want to download. By default, all videos will be
        downloaded. Available names: 'sample1' - 'sample4', 'stim1' - 'stim16'
    resize : (height, width) or None, optional, default: None
        A tuple specifying the desired height and width of each video frame.
        The original size is 320x180 pixels.
    as_gray : bool, optional
        Flag whether to convert the image to grayscale.
        A four-channel image is interpreted as RGBA (e.g., a PNG), and the
        alpha channel will be blended with the color black.
    data_path: string, optional
        Specify another download and cache folder for the dataset. By default
        all pulse2percept data is stored in '~/pulse2percept_data' subfolders.
    download_if_missing : optional
        If False, raise an IOError if the data is not locally available
        instead of trying to download it from the source site.

    Returns
    -------
    data: dict of VideoStimulus
        VideoStimulus of the original videos in [Han2021]_

    """
    if not has_h5py:
        raise ImportError("You do not have h5py installed. "
                          "You can install it via $ pip install h5py.")
    if not has_pandas:
        raise ImportError("You do not have pandas installed. "
                          "You can install it via $ pip install pandas.")
    # Create the local data directory if it doesn't already exist:
    data_path = get_data_dir(data_path)

    # Download the dataset if it doesn't already exist:
    file_path = join(data_path, 'han2021.zip')
    if not isfile(file_path):
        if download_if_missing:
            url = 'https://osf.io/pf2ja/download'
            checksum = 'e31a74a6ac9decfa8d8b9eccd0c71da868f8dfa9f0475a4caca82085307d67b1'
            fetch_url(url, file_path, remote_checksum=checksum)
        else:
            raise IOError(f"No local file {file_path} found")

    # Open the HDF5 file:
    hf = h5py.File(file_path, 'r')
    data = dict()
    if resize != None:
        size = resize
    else:
        size = (320, 180)
    if videos == None:
        videos = hf.keys()
        for key in videos:
            vid = np.asarray(hf[key])
            name = key[0:-4]
            metadata = {
                'plugin': 'ffmpeg',
                'nframes': vid.shape[3],
                'ffmpeg_version': '4.2.2 built with gcc 9.2.1 (GCC) 20200122',
                'codec': 'h264',
                'pix_fmt': 'yuv420p(tv',
                'fps': 25.0,
                'source_size': (960, 540),
                'size': size,
                'rotate': 0,
                'duration': vid.shape[3] / 25.0,
                'source': key,
                'source_shape': (540, 960, 3, vid.shape[3])
            }
            data[name] = VideoStimulus(vid,
                                       metadata=metadata,
                                       resize=resize,
                                       as_gray=as_gray)
    else:
        if type(videos) == str:
            videos = [videos]
        for name in videos:
            key = name + '.mp4'
            if key not in hf.keys():
                raise ValueError(
                    f"[Han2021]'s original videos do not include '{name}'"
                    f". Available names: 'sample1', 'sample2', 'sample3', 'sample4', "
                    f"'stim1', 'stim2', 'stim3', 'stim4', 'stim5', 'stim6', 'stim7', "
                    f"'stim8', 'stim9', 'stim10', 'stim11', 'stim12', 'stim13', "
                    f"'stim14', 'stim15', 'stim16'")
            vid = np.asarray(hf[key])
            metadata = {
                'plugin': 'ffmpeg',
                'nframes': vid.shape[3],
                'ffmpeg_version': '4.2.2 built with gcc 9.2.1 (GCC) 20200122',
                'codec': 'h264',
                'pix_fmt': 'yuv420p(tv',
                'fps': 25.0,
                'source_size': (960, 540),
                'size': size,
                'rotate': 0,
                'duration': vid.shape[3] / 25.0,
                'source': key,
                'source_shape': (540, 960, 3, vid.shape[3])
            }

            data[name] = VideoStimulus(vid,
                                       metadata=metadata,
                                       resize=resize,
                                       as_gray=as_gray)
    hf.close()
    return data
Beispiel #14
0
def test_VideoStimulus_crop():
    fname = 'test.mp4'
    shape = (10, 48, 32)
    ndarray = np.random.rand(*shape)
    fps = 1
    mimwrite(fname, (255 * ndarray).astype(np.uint8), fps=fps)
    stim = VideoStimulus(fname, as_gray=True)
    stim_cropped = stim.crop(idx_time=[3, 9], idx_space=[6, 10, 36, 30])
    npt.assert_equal(stim_cropped.vid_shape, (30, 20, 6))
    npt.assert_equal(
        stim_cropped.data.reshape(stim_cropped.vid_shape)[3, 7, 2],
        stim.data.reshape(stim.vid_shape)[9, 17, 5])
    npt.assert_equal(
        stim_cropped.data.reshape(stim_cropped.vid_shape)[10, 18, 5],
        stim.data.reshape(stim.vid_shape)[16, 28, 8])
    npt.assert_equal(stim_cropped.time, stim.time[3:9])
    npt.assert_equal(
        stim.electrodes.reshape(48, 32)[9, 17],
        stim_cropped.electrodes.reshape(30, 20)[3, 7])
    npt.assert_equal(
        stim.electrodes.reshape(48, 32)[16, 28],
        stim_cropped.electrodes.reshape(30, 20)[10, 18])

    stim_cropped2 = stim.crop(front=5,
                              back=2,
                              left=10,
                              right=8,
                              top=6,
                              bottom=7)
    npt.assert_equal(stim_cropped2.vid_shape, (35, 14, 3))
    npt.assert_equal(
        stim_cropped2.data.reshape(stim_cropped2.vid_shape)[3, 7, 2],
        stim.data.reshape(stim.vid_shape)[9, 17, 7])
    npt.assert_equal(
        stim_cropped2.data.reshape(stim_cropped2.vid_shape)[10, 9, 1],
        stim.data.reshape(stim.vid_shape)[16, 19, 6])
    npt.assert_equal(stim_cropped2.time, stim.time[5:8])

    # crop-time and crop-length (start, end) cannot be existed at the same time
    with pytest.raises(ValueError):
        stim.crop(idx_time=[0, 1], front=3)
    with pytest.raises(ValueError):
        stim.crop(idx_time=[3, 9], back=4)
    # Crop time is invalid. It should be [t1, t2], where t1 is the starting
    # frame and t2 is the ending frame
    with pytest.raises(TypeError):
        stim.crop(idx_time=[0, 1, 2])
    with pytest.raises(ValueError):
        stim.crop(idx_time=[5, 4])
    #"crop-length(start, end) cannot be negative"
    with pytest.raises(ValueError):
        stim.crop(front=-1)
    with pytest.raises(ValueError):
        stim.crop(back=-1)
    # crop-length(start, end) should be smaller than the duration of the video
    with pytest.raises(ValueError):
        stim.crop(front=5, back=6)
    # crop-indices and crop-width (left, right, up, down) cannot exist at the
    # same time
    with pytest.raises(Exception):
        stim.crop(idx_space=[5, 10, 25], left=10)
    with pytest.raises(Exception):
        stim.crop(idx_space=[5, 10, 25, 30], left=10)
    with pytest.raises(Exception):
        stim.crop(idx_space=[5, 10, 25, 30], right=8)
    with pytest.raises(Exception):
        stim.crop(idx_space=[5, 10, 25, 30], top=6)
    with pytest.raises(Exception):
        stim.crop(idx_space=[5, 10, 25, 30], bottom=7)
    # "crop-width(left, right, up, down) cannot be negative"
    with pytest.raises(ValueError):
        stim.crop(left=-1)
    with pytest.raises(ValueError):
        stim.crop(right=-1)
    with pytest.raises(ValueError):
        stim.crop(top=-1)
    with pytest.raises(ValueError):
        stim.crop(bottom=-1)
    # "crop-width should be smaller than the shape of the video frame"
    with pytest.raises(ValueError):
        stim.crop(left=14, right=20)
    with pytest.raises(ValueError):
        stim.crop(top=12, bottom=38)
    # "crop-indices must be on the video frame"
    with pytest.raises(ValueError):
        stim.crop(idx_space=[-1, 10, 25, 30])
    with pytest.raises(ValueError):
        stim.crop(idx_space=[5, -1, 25, 30])
    with pytest.raises(ValueError):
        stim.crop(idx_space=[5, 10, 50, 30])
    with pytest.raises(ValueError):
        stim.crop(idx_space=[5, 10, 25, 51])
    # crop-indices is invalid. It should be [y1,x1,y2,x2], where (y1,x1) is
    # upperleft and (y2,x2) is bottom-right
    with pytest.raises(ValueError):
        stim.crop(idx_space=[5, 10, 4, 30])
    with pytest.raises(ValueError):
        stim.crop(idx_space=[5, 10, 25, 9])
Beispiel #15
0
def test_VideoStimulus_play(n_frames):
    ndarray = np.random.rand(2, 4, n_frames)
    video = VideoStimulus(ndarray)
    ani = video.play()
    npt.assert_equal(isinstance(ani, FuncAnimation), True)
    npt.assert_equal(len(list(ani.frame_seq)), n_frames)
Beispiel #16
0
def test_VideoStimulus_filter():
    fname = 'test.mp4'
    shape = (10, 32, 48)
    gray = 129 / 255.0
    ndarray = np.ones(shape) * gray
    mimwrite(fname, (255 * ndarray).astype(np.uint8), fps=1)
    stim = VideoStimulus(fname, as_gray=True)

    for filt in ['sobel', 'scharr', 'canny', 'median']:
        filt_stim = stim.filter(filt)
        npt.assert_equal(filt_stim.shape, stim.shape)
        npt.assert_equal(filt_stim.vid_shape, stim.vid_shape)
        npt.assert_equal(filt_stim.electrodes, stim.electrodes)
        npt.assert_equal(filt_stim.time, stim.time)

    # Invalid filter name:
    with pytest.raises(TypeError):
        stim.filter({'invalid'})
    with pytest.raises(ValueError):
        stim.filter('invalid')

    # Cannot apply filter to RGB video:
    shape = (10, 32, 48, 3)
    ndarray = np.ones(shape) * gray
    mimwrite(fname, (255 * ndarray).astype(np.uint8), fps=1)
    stim = VideoStimulus(fname)
    with pytest.raises(ValueError):
        stim.filter('sobel')

    os.remove(fname)