コード例 #1
0
ファイル: test_viz.py プロジェクト: nipy/PySurfer
def test_movie(tmpdir):
    """Test saving a movie of an MEG inverse solution."""
    import imageio
    if sys.version_info < (3,):
        raise SkipTest('imageio ffmpeg requires Python 3')
    # create and setup the Brain instance
    _set_backend()
    brain = Brain(*std_args)
    stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc')
    stc = io.read_stc(stc_fname)
    data = stc['data']
    time = np.arange(data.shape[1]) * stc['tstep'] + stc['tmin']
    brain.add_data(data, colormap='hot', vertices=stc['vertices'],
                   smoothing_steps=10, time=time, time_label='time=%0.2f ms')
    brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True)

    # save movies with different options
    dst = str(tmpdir.join('test.mov'))
    # test the number of frames in the movie
    brain.save_movie(dst)
    frames = imageio.mimread(dst)
    assert len(frames) == 2
    brain.save_movie(dst, time_dilation=10)
    frames = imageio.mimread(dst)
    assert len(frames) == 7
    brain.save_movie(dst, tmin=0.081, tmax=0.102)
    frames = imageio.mimread(dst)
    assert len(frames) == 2
    brain.close()
コード例 #2
0
ファイル: test_dicom.py プロジェクト: KentChun33333/imageio
def test_different_read_modes():
    
    dname1, dname2, fname1, fname2 = _prepare()
    
    for fname, dname, n in [(fname1, dname1, 1), (fname2, dname2, 2)]:
        
        # Test imread()
        im = imageio.imread(fname)
        assert isinstance(im, np.ndarray)
        assert im.shape == (512, 512)
        
        # Test mimread()
        ims = imageio.mimread(fname)
        assert isinstance(ims, list)
        assert ims[0].shape == im.shape
        assert len(ims) > 1
        #    
        ims2 = imageio.mimread(dname)
        assert len(ims) == len(ims2)
        
        # Test volread()
        vol = imageio.volread(dname)
        assert vol.ndim == 3
        assert vol.shape[0] > 10
        assert vol.shape[1:] == (512, 512)
        #
        vol2 = imageio.volread(fname)  # fname works as well
        assert (vol == vol2).all()
        
        # Test mvolread()
        vols = imageio.mvolread(dname)
        assert isinstance(vols, list)
        assert len(vols) == n
        assert vols[0].shape == vol.shape
        assert sum([v.shape[0] for v in vols]) == len(ims)
コード例 #3
0
ファイル: test_viz.py プロジェクト: mwaskom/PySurfer
def test_movie():
    """Test saving a movie of an MEG inverse solution."""
    import imageio

    # create and setup the Brain instance
    _set_backend()
    brain = Brain(*std_args)
    stc_fname = os.path.join(data_dir, 'meg_source_estimate-lh.stc')
    stc = io.read_stc(stc_fname)
    data = stc['data']
    time = np.arange(data.shape[1]) * stc['tstep'] + stc['tmin']
    brain.add_data(data, colormap='hot', vertices=stc['vertices'],
                   smoothing_steps=10, time=time, time_label='time=%0.2f ms')
    brain.scale_data_colormap(fmin=13, fmid=18, fmax=22, transparent=True)

    # save movies with different options
    tempdir = mkdtemp()
    try:
        dst = os.path.join(tempdir, 'test.mov')
        # test the number of frames in the movie
        brain.save_movie(dst)
        frames = imageio.mimread(dst)
        assert_equal(len(frames), 2)
        brain.save_movie(dst, time_dilation=10)
        frames = imageio.mimread(dst)
        assert_equal(len(frames), 7)
        brain.save_movie(dst, tmin=0.081, tmax=0.102)
        frames = imageio.mimread(dst)
        assert_equal(len(frames), 2)
    finally:
        # clean up
        if not (sys.platform == 'win32' and
                os.getenv('APPVEYOR', 'False') == 'True'):  # cleanup problems
            shutil.rmtree(tempdir)
    brain.close()
コード例 #4
0
ファイル: test_bsdf.py プロジェクト: imageio/imageio
def test_singleton():

    im1 = imageio.imread("imageio:chelsea.png")

    fname = os.path.join(test_dir, "chelsea.bsdf")
    imageio.imsave(fname, im1)

    # Does it look alright if we open it in bsdf without extensions?
    raw = bsdf.load(fname, [])
    assert isinstance(raw, dict)
    assert set(raw.keys()) == set(["meta", "array"])
    assert isinstance(raw["meta"], dict)
    assert isinstance(raw["array"], dict)
    assert raw["array"]["shape"] == list(im1.shape)
    assert isinstance(raw["array"]["data"], bytes)

    # Read singleton image as singleton
    im2 = imageio.imread(fname)
    assert np.all(im1 == im2)

    # Read singleton image as series
    ims = imageio.mimread(fname)
    assert len(ims) == 1 and np.all(im1 == ims[0])

    # Read + write back without image extensions
    bsdf.save(fname, bsdf.load(fname))
    im3 = imageio.mimread(fname)
    assert np.all(im1 == im3)
コード例 #5
0
ファイル: test_fits.py プロジェクト: imageio/imageio
def test_fits_reading():
    """ Test reading fits """

    need_internet()  # We keep the fits files in the imageio-binary repo

    if IS_PYPY:
        return  # no support for fits format :(

    simple = get_remote_file("images/simple.fits")
    multi = get_remote_file("images/multi.fits")

    # One image
    im = imageio.imread(simple)
    ims = imageio.mimread(simple)
    assert (im == ims[0]).all()
    assert len(ims) == 1

    # Multiple images
    ims = imageio.mimread(multi)
    assert len(ims) == 3

    R = imageio.read(multi)
    assert R.format.name == "FITS"
    ims = list(R)  # == [im for im in R]
    assert len(ims) == 3

    # Fail
    raises = pytest.raises
    raises(IndexError, R.get_data, -1)
    raises(IndexError, R.get_data, 3)
    raises(RuntimeError, R.get_meta_data, None)  # no meta data support
    raises(RuntimeError, R.get_meta_data, 0)  # no meta data support
コード例 #6
0
ファイル: test_bsdf.py プロジェクト: imageio/imageio
def test_series():

    im1 = imageio.imread("imageio:chelsea.png")
    ims1 = [im1, im1 * 0.8, im1 * 0.5]

    fname = os.path.join(test_dir, "chelseam.bsdf")
    imageio.mimsave(fname, ims1)

    # Does it look alright if we open it in bsdf without extensions?
    raw = bsdf.load(fname, [])
    assert isinstance(raw, list) and len(raw) == 3
    for r in raw:
        assert set(r.keys()) == set(["meta", "array"])
        assert isinstance(r["meta"], dict)
        assert isinstance(r["array"], dict)
        assert r["array"]["shape"] == list(im1.shape)
        assert isinstance(r["array"]["data"], bytes)

    # Read multi-image as singleton
    im2 = imageio.imread(fname)
    assert np.all(im1 == im2)

    # Read multi-image as series
    ims2 = imageio.mimread(fname)
    assert len(ims2) == 3 and all(np.all(ims1[i] == ims2[i]) for i in range(3))

    # Read + write back without image extensions
    bsdf.save(fname, bsdf.load(fname))
    ims3 = imageio.mimread(fname)
    assert len(ims3) == 3 and all(np.all(ims1[i] == ims3[i]) for i in range(3))
コード例 #7
0
ファイル: test_npz.py プロジェクト: KentChun33333/imageio
def test_npz_reading_writing():
    """ Test reading and saveing npz """
    
    if IS_PYPY:
        return  # no support for npz format :(
    
    im2 = np.ones((10, 10), np.uint8) * 2
    im3 = np.ones((10, 10, 10), np.uint8) * 3
    im4 = np.ones((10, 10, 10, 10), np.uint8) * 4
    
    filename1 = os.path.join(test_dir, 'test_npz.npz')

    # One image
    imageio.imsave(filename1, im2)
    im = imageio.imread(filename1)
    ims = imageio.mimread(filename1)
    assert (im == im2).all()
    assert len(ims) == 1
    
    # Multiple images
    imageio.mimsave(filename1, [im2, im2, im2])
    im = imageio.imread(filename1)
    ims = imageio.mimread(filename1)
    assert (im == im2).all()
    assert len(ims) == 3
    
    # Volumes
    imageio.mvolsave(filename1, [im3, im3])
    im = imageio.volread(filename1)
    ims = imageio.mvolread(filename1)
    assert (im == im3).all()
    assert len(ims) == 2
    
    # Mixed
    W = imageio.save(filename1)
    assert W.format.name == 'NPZ'
    W.append_data(im2)
    W.append_data(im3)
    W.append_data(im4)
    raises(RuntimeError, W.set_meta_data, {})  # no meta data support
    W.close()
    #
    R = imageio.read(filename1)
    assert R.format.name == 'NPZ'
    ims = list(R)  # == [im for im in R]
    assert (ims[0] == im2).all()
    assert (ims[1] == im3).all()
    assert (ims[2] == im4).all()
    # Fail
    raises(IndexError, R.get_data, -1)
    raises(IndexError, R.get_data, 3)
    raises(RuntimeError, R.get_meta_data, None)  # no meta data support
    raises(RuntimeError, R.get_meta_data, 0)  # no meta data support
コード例 #8
0
ファイル: test_tifffile.py プロジェクト: ghisvail/imageio
def test_tifffile_reading_writing():
    """ Test reading and saving tiff """
    
    need_internet()  # We keep a test image in the imageio-binary repo
    
    im2 = np.ones((10, 10, 3), np.uint8) * 2

    filename1 = os.path.join(test_dir, 'test_tiff.tiff')

    # One image
    imageio.imsave(filename1, im2)
    im = imageio.imread(filename1)
    ims = imageio.mimread(filename1)
    assert (im == im2).all()
    assert len(ims) == 1

    # Multiple images
    imageio.mimsave(filename1, [im2, im2, im2])
    im = imageio.imread(filename1)
    ims = imageio.mimread(filename1)
    assert (im == im2).all()
    assert len(ims) == 3, ims[0].shape

    # remote multipage rgb file
    filename2 = get_remote_file('images/multipage_rgb.tif')
    img = imageio.mimread(filename2)
    assert len(img) == 2
    assert img[0].shape == (3, 10, 10)

    # Mixed
    W = imageio.save(filename1)
    W.set_meta_data({'planarconfig': 'planar'})
    assert W.format.name == 'TIFF'
    W.append_data(im2)
    W.append_data(im2)
    W.close()
    #
    R = imageio.read(filename1)
    assert R.format.name == 'TIFF'
    ims = list(R)  # == [im for im in R]
    assert (ims[0] == im2).all()
    meta = R.get_meta_data()
    assert meta['orientation'] == 'top_left'
    # Fail
    raises(IndexError, R.get_data, -1)
    raises(IndexError, R.get_data, 3)

    # Ensure imwrite write works round trip
    filename3 = os.path.join(test_dir, 'test_tiff2.tiff')
    R = imageio.imread(filename1)
    imageio.imwrite(filename3, R)
    R2 = imageio.imread(filename3)
    assert (R == R2).all()
コード例 #9
0
ファイル: test_ffmpeg.py プロジェクト: imageio/imageio
def test_write_not_contiguous():

    R = imageio.read(get_remote_file("images/cockatoo.mp4"), "ffmpeg")
    assert R.format is imageio.formats["ffmpeg"]

    fname1 = get_remote_file("images/cockatoo.mp4", test_dir)
    fname2 = fname1[:-4] + ".out.mp4"

    # Read
    ims1 = []
    with imageio.read(fname1, "ffmpeg") as R:
        for i in range(10):
            im = R.get_next_data()
            ims1.append(im)

    # Save non contiguous data
    with imageio.save(fname2, "ffmpeg") as W:
        for im in ims1:
            # DOn't slice the first dimension since it won't be
            # a multiple of 16. This will cause the writer to expand
            # the data to make it fit, we won't be able to compare
            # the difference between the saved and the original images.
            im = im[:, ::2]
            assert not im.flags.c_contiguous
            W.append_data(im)

    ims2 = imageio.mimread(fname2, "ffmpeg")

    # Check
    for im1, im2 in zip(ims1, ims2):
        diff = np.abs(im1[:, ::2].astype(np.float32) - im2.astype(np.float32))
        if IS_PYPY:
            assert (diff.sum() / diff.size) < 100
        else:
            assert diff.mean() < 2.5
コード例 #10
0
ファイル: test_freeimage.py プロジェクト: almarklein/imageio
def test_ico():
    
    for float in (False, True):
        for crop in (0, 1, 2):
            for colors in (0, 1, 3, 4):
                fname = fnamebase + '%i.%i.%i.ico' % (float, crop, colors)
                rim = get_ref_im(colors, crop, float)
                imageio.imsave(fname, rim)
                im = imageio.imread(fname)
                mul = 255 if float else 1
                assert_close(rim * mul, im, 0.1)  # lossless
    
    # Meta data
    R = imageio.read(fnamebase + '0.0.0.ico')
    assert isinstance(R.get_meta_data(0), dict)
    assert isinstance(R.get_meta_data(None), dict)  # But this print warning
    writer = imageio.save(fnamebase + 'I.ico')
    writer.set_meta_data({})
    writer.close()
    
    # Parameters. Note that with makealpha, RGBA images are read in incorrectly
    im = imageio.imread(fnamebase + '0.0.0.ico', makealpha=True)
    assert im.ndim == 3 and im.shape[-1] == 4
    
    # Parameter fail
    raises(TypeError, imageio.imread, fname, notavalidkwarg=True)
    raises(TypeError, imageio.imsave, fnamebase + '1.gif', im, notavalidk=True)
    
    # Multiple images
    im = get_ref_im(4, 0, 0)
    ims1 = im, np.column_stack([im, im]), np.row_stack([im, im])
    imageio.mimsave(fnamebase + 'I.ico', ims1)
    ims2 = imageio.mimread(fnamebase + 'I.ico')
    for im1, im2 in zip(ims1, ims2):
        assert_close(im1, im2, 0.1)
コード例 #11
0
ファイル: test_swf.py プロジェクト: imageio/imageio
def test_types():

    need_internet()

    fname1 = get_remote_file("images/stent.swf", test_dir)
    fname2 = fname1[:-4] + ".out3.swf"

    for dtype in [
        np.uint8,
        np.uint16,
        np.uint32,
        np.uint64,
        np.int8,
        np.int16,
        np.int32,
        np.int64,
        np.float16,
        np.float32,
        np.float64,
    ]:
        for shape in [(100, 1), (100, 3)]:
            # Repeats an identity matrix, just for testing
            im1 = np.dstack((np.identity(shape[0], dtype=dtype),) * shape[1])
            imageio.mimsave(fname2, [im1], "swf")
            im2 = imageio.mimread(fname2, "swf")[0]
            assert im2.shape == (100, 100, 4)
            assert im2.dtype == np.uint8
            if len(shape) == 3 and dtype == np.uint8:
                assert (im1[:, :, 0] == im2[:, :, 0]).all()
コード例 #12
0
def MakeGlitchGifVSH(image, len_=60, blockSize=16, sigma=10, iterations=300, random_=True, Glitch_=False):
    im = Image.open(image)
    VSH = imageio.mimread('vsh.gif')
    VSH = extendgif(VSH, len_)
    nFrames = []

    glitchVar = 0

    path = '/'.join(image.split('/')[:-1])
    pathT = '/'.join(image.split('/')[:-1])
    name = image.split('/')[-1]
    fname = name.split('.')[0]
    path += '/glitch_' + fname + '.gif'

    frames = [im.copy() for a in range(len_)]
    i = 0
    for frame in frames:
        i += 1

        if random.randint(0, 15) >= 10 and glitchVar == 0:
            glitchVar = random.randint(1, sigma)
        if glitchVar != 0:
            frame = GlitchRet(frame.convert('RGB'), Glitch_=Glitch_, sigma=glitchVar, blockSize=blockSize,
                              iterations=iterations, random_=random_)
            glitchVar -= 1
        frame = ImageChops.multiply(frame, Image.fromarray(VSH[i]).resize(frame.size).convert('RGB'))
        nFrames.append(np.asarray(frame.convert('RGB')))
    i = 0
    imageio.mimwrite(path, nFrames, )

    return path
コード例 #13
0
def test_simpleitk_reading_writing():
    """ Test reading and saveing tiff """
    im2 = np.ones((10, 10, 3), np.uint8) * 2

    filename1 = os.path.join(test_dir, 'test_tiff.tiff')

    # One image
    imageio.imsave(filename1, im2, 'itk')
    im = imageio.imread(filename1, 'itk')
    ims = imageio.mimread(filename1, 'itk')
    assert (im == im2).all()
    assert len(ims) == 1

    # Mixed
    W = imageio.save(filename1, 'itk')
    raises(RuntimeError, W.set_meta_data, 1)
    assert W.format.name == 'ITK'
    W.append_data(im2)
    W.append_data(im2)
    W.close()
    #
    R = imageio.read(filename1, 'itk')
    assert R.format.name == 'ITK'
    ims = list(R)  # == [im for im in R]
    assert (ims[0] == im2).all()
    # Fail
    raises(IndexError, R.get_data, -1)
    raises(IndexError, R.get_data, 3)
    raises(RuntimeError, R.get_meta_data)
コード例 #14
0
ファイル: test_freeimage.py プロジェクト: rreilink/imageio
def test_animated_gif():
    
    # Get images
    im = get_ref_im(4, 0, 0)
    ims = []
    for i in range(10):
        im = im.copy()
        im[:, -5:, 0] = i * 20
        ims.append(im)
    
    # Store - animated GIF always poops out RGB
    for float in (False, True):
        for colors in (3, 4):
            ims1 = ims[:]
            if float:
                ims1 = [x.astype(np.float32) / 256 for x in ims1]
            ims1 = [x[:, :, :colors] for x in ims1]
            fname = fnamebase + '.animated.%i.gif' % colors
            imageio.mimsave(fname, ims1, duration=0.2)
            # Retrieve
            ims2 = imageio.mimread(fname)
            ims1 = [x[:, :, :3] for x in ims]  # fresh ref
            ims2 = [x[:, :, :3] for x in ims2]  # discart alpha
            for im1, im2 in zip(ims1, ims2):
                assert_close(im1, im2, 1.1)
    
    # We can also store grayscale
    fname = fnamebase + '.animated.%i.gif' % 1
    imageio.mimsave(fname, [x[:, :, 0] for x in ims], duration=0.2)
    imageio.mimsave(fname, [x[:, :, :1] for x in ims], duration=0.2)
    
    # Irragular duration. You probably want to check this manually (I did)
    duration = [0.1 for i in ims]
    for i in [2, 5, 7]:
        duration[i] = 0.5
    imageio.mimsave(fnamebase + '.animated_irr.gif', ims, duration=duration)
    
    # Other parameters
    imageio.mimsave(fnamebase + '.animated.loop2.gif', ims, loop=2)
    R = imageio.read(fnamebase + '.animated.loop2.gif')
    W = imageio.save(fnamebase + '.animated.palettes100.gif', palettesize=100)
    assert W._palettesize == 128
    # Fail
    raises(IndexError, R.get_meta_data, -1)
    raises(ValueError, imageio.mimsave, fname, ims, palettesize=300)
    raises(ValueError, imageio.mimsave, fname, ims, quantizer='foo')
    raises(ValueError, imageio.mimsave, fname, ims, duration='foo')
    
    # Test subrectangles
    imageio.mimsave(fnamebase + '.subno.gif', ims, subrectangles=False)
    imageio.mimsave(fnamebase + '.subyes.gif', ims, subrectangles=True)
    s1 = os.stat(fnamebase + '.subno.gif').st_size
    s2 = os.stat(fnamebase + '.subyes.gif').st_size
    assert s2 < s1
    
    # Meta (dummy, because always {}
    assert isinstance(imageio.read(fname).get_meta_data(), dict)
コード例 #15
0
ファイル: test_swf.py プロジェクト: ghisvail/imageio
def test_read_from_url():
    
    need_internet()
    
    burl = 'https://raw.githubusercontent.com/imageio/imageio-binaries/master/'
    url = burl + 'images/stent.swf'
    
    ims = imageio.mimread(url)
    assert len(ims) == 10
コード例 #16
0
ファイル: test_tifffile.py プロジェクト: suryakencana/imageio
def test_tifffile_reading_writing():
    """ Test reading and saveing tiff """
    im2 = np.ones((10, 10, 3), np.uint8) * 2

    filename1 = os.path.join(test_dir, "test_tiff.tiff")

    # One image
    imageio.imsave(filename1, im2)
    im = imageio.imread(filename1)
    ims = imageio.mimread(filename1)
    assert (im == im2).all()
    assert len(ims) == 1

    # Multiple images
    imageio.mimsave(filename1, [im2, im2, im2])
    im = imageio.imread(filename1)
    ims = imageio.mimread(filename1)
    assert (im == im2).all()
    assert len(ims) == 3, ims[0].shape

    # remote multipage rgb file
    filename2 = get_remote_file("images/multipage_rgb.tif")
    img = imageio.mimread(filename2)
    assert len(img) == 2
    assert img[0].shape == (3, 10, 10)

    # Mixed
    W = imageio.save(filename1)
    W.set_meta_data({"planarconfig": "planar"})
    assert W.format.name == "TIFF"
    W.append_data(im2)
    W.append_data(im2)
    W.close()
    #
    R = imageio.read(filename1)
    assert R.format.name == "TIFF"
    ims = list(R)  # == [im for im in R]
    assert (ims[0] == im2).all()
    meta = R.get_meta_data()
    assert meta["is_rgb"]
    # Fail
    raises(IndexError, R.get_data, -1)
    raises(IndexError, R.get_data, 3)
コード例 #17
0
ファイル: animation.py プロジェクト: Stargrazer82301/CAAPR
    def from_file(cls, path):

        """
        This function ...
        :param path:
        :return:
        """

        frames = imageio.mimread(path)
        return cls(frames)
コード例 #18
0
ファイル: animation.py プロジェクト: SKIRT/PTS
    def from_file(cls, path):

        """
        This function ...
        :param path:
        :return:
        """

        frames = imageio.mimread(path)
        animation = cls(frames)
        animation.path = path
        return animation
コード例 #19
0
ファイル: test_swf.py プロジェクト: achennu/imageio
def test_types():
    fname1 = get_remote_file('images/stent.swf', test_dir)
    fname2 = fname1[:-4] + '.out3.swf'
    
    for dtype in [np.uint8, np.float32]:
        for shape in [(100, 100), (100, 100, 1), (100, 100, 3)]:
            im1 = np.empty(shape, dtype)  # empty is nice for testing nan
            imageio.mimsave(fname2, [im1], 'swf')
            im2 = imageio.mimread(fname2, 'swf')[0]
            assert im2.shape == (100, 100, 4)
            assert im2.dtype == np.uint8
            if len(shape) == 3 and dtype == np.uint8:
                assert (im1[:, :, 0] == im2[:, :, 0]).all()
コード例 #20
0
ファイル: test_spe.py プロジェクト: imageio/imageio
def test_spe_reading():
    need_internet()
    fname = get_remote_file("images/test_000_.SPE")

    fr1 = np.zeros((32, 32), np.uint16)
    fr2 = np.ones_like(fr1)

    # Test imread
    im = imageio.imread(fname)
    ims = imageio.mimread(fname)

    np.testing.assert_equal(im, fr1)
    np.testing.assert_equal(ims, [fr1, fr2])

    # Test volread
    vol = imageio.volread(fname)
    vols = imageio.mvolread(fname)

    np.testing.assert_equal(vol, [fr1, fr2])
    np.testing.assert_equal(vols, [[fr1, fr2]])

    # Test get_reader
    r = imageio.get_reader(fname)

    np.testing.assert_equal(r.get_data(1), fr2)
    np.testing.assert_equal(list(r), [fr1, fr2])
    pytest.raises(IndexError, r.get_data, -1)
    pytest.raises(IndexError, r.get_data, 2)

    # check metadata
    md = r.get_meta_data()
    assert md["ROIs"] == [
        {"top_left": [238, 187], "bottom_right": [269, 218], "bin": [1, 1]}
    ]
    cmt = [
        "OD 1.0 in r, g                                                    "
        "              ",
        "000200000000000004800000000000000000000000000000000000000000000000"
        "0002000001000X",
        "                                                                  "
        "              ",
        "                                                                  "
        "              ",
        "ACCI2xSEQU-1---10000010001600300EA                              SW"
        "0218COMVER0500",
    ]
    assert md["comments"] == cmt
    np.testing.assert_equal(md["frame_shape"], fr1.shape)
コード例 #21
0
ファイル: test_ffmpeg.py プロジェクト: rreilink/imageio
def test_read_and_write():
    
    fname1 = get_remote_file('images/cockatoo.mp4', test_dir)
    fname2 = fname1[:-4] + '.out.mp4'
    
    # Read
    ims1 = []
    with imageio.read(fname1, 'ffmpeg') as R:
        for i in range(10):
            im = R.get_next_data()
            ims1.append(im)
            assert im.shape == (720, 1280, 3)
        assert im.sum() > 0
    
        # Seek
        im = R.get_data(120)
        assert im.shape == (720, 1280, 3)
    
    # Save
    with imageio.save(fname2, 'ffmpeg') as W:
        for im in ims1:
            W.append_data(im)
    
    # Read the result
    ims2 = imageio.mimread(fname2, 'ffmpeg')
    assert len(ims1) == len(ims2)
    
    # Check
    for im1, im2 in zip(ims1, ims2):
        diff = np.abs(im1.astype(np.float32) - im2.astype(np.float32))
        assert diff.mean() < 2.0
    
    # Check loop
    R = imageio.read(fname2, 'ffmpeg', loop=True)
    im1 = R.get_next_data()
    for i in range(1, len(R)):
        R.get_next_data()
    im2 = R.get_next_data()
    im3 = R.get_data(0)
    im4 = R.get_data(2)  # touch skipping frames
    assert (im1 == im2).all()
    assert (im1 == im3).all()
    assert not (im1 == im4).all()
    R.close()
コード例 #22
0
def test_ico():

    if os.getenv("TRAVIS", "") == "true" and sys.version_info >= (3, 4):
        skip("Freeimage ico is unstable for this Travis build")

    for float in (False, True):
        for crop in (0,):
            for colors in (1, 3, 4):
                fname = fnamebase + "%i.%i.%i.ico" % (float, crop, colors)
                rim = get_ref_im(colors, crop, float)
                rim = rim[:32, :32]  # ico needs nice size
                imageio.imsave(fname, rim)
                im = imageio.imread(fname)
                mul = 255 if float else 1
                assert_close(rim * mul, im, 0.1)  # lossless

    # Meta data
    R = imageio.read(fnamebase + "0.0.1.ico")
    assert isinstance(R.get_meta_data(0), dict)
    assert isinstance(R.get_meta_data(None), dict)  # But this print warning
    R.close()
    writer = imageio.save(fnamebase + "I.ico")
    writer.set_meta_data({})
    writer.close()

    # Parameters. Note that with makealpha, RGBA images are read in incorrectly
    im = imageio.imread(fnamebase + "0.0.1.ico", makealpha=True)
    assert im.ndim == 3 and im.shape[-1] == 4

    # Parameter fail
    raises(TypeError, imageio.imread, fname, notavalidkwarg=True)
    raises(TypeError, imageio.imsave, fnamebase + "1.gif", im, notavalidk=True)

    if sys.platform.startswith("win"):  # issue #21
        skip("Windows has a known issue with multi-icon files")

    # Multiple images
    im = get_ref_im(4, 0, 0)[:32, :32]
    ims = [np.repeat(np.repeat(im, i, 1), i, 0) for i in (1, 2)]  # SegF on win
    ims = im, np.column_stack((im, im)), np.row_stack((im, im))  # error on win
    imageio.mimsave(fnamebase + "I2.ico", ims)
    ims2 = imageio.mimread(fnamebase + "I2.ico")
    for im1, im2 in zip(ims, ims2):
        assert_close(im1, im2, 0.1)
コード例 #23
0
ファイル: test_bsdf.py プロジェクト: imageio/imageio
def test_not_an_image():

    fname = os.path.join(test_dir, "notanimage.bsdf")

    # Not an image not a list
    bsdf.save(fname, 1)
    with raises(RuntimeError):
        imageio.imread(fname)

    # A list with non-images
    bsdf.save(fname, [1])
    with raises(RuntimeError):
        imageio.imread(fname)

    # An empty list could work though
    bsdf.save(fname, [])
    with raises(IndexError):
        imageio.imread(fname)
    assert imageio.mimread(fname) == []
コード例 #24
0
ファイル: test_freeimage.py プロジェクト: rreilink/imageio
def test_ico():
    
    for float in (False, True):
        for crop in (0, 1, 2):
            for colors in (1, 3, 4):
                fname = fnamebase + '%i.%i.%i.ico' % (float, crop, colors)
                rim = get_ref_im(colors, crop, float)
                imageio.imsave(fname, rim)
                im = imageio.imread(fname)
                mul = 255 if float else 1
                assert_close(rim * mul, im, 0.1)  # lossless
    
    # Meta data
    R = imageio.read(fnamebase + '0.0.1.ico')
    assert isinstance(R.get_meta_data(0), dict)
    assert isinstance(R.get_meta_data(None), dict)  # But this print warning
    R.close()
    writer = imageio.save(fnamebase + 'I.ico')
    writer.set_meta_data({})
    writer.close()
    
    # Parameters. Note that with makealpha, RGBA images are read in incorrectly
    im = imageio.imread(fnamebase + '0.0.1.ico', makealpha=True)
    assert im.ndim == 3 and im.shape[-1] == 4
    
    # Parameter fail
    raises(TypeError, imageio.imread, fname, notavalidkwarg=True)
    raises(TypeError, imageio.imsave, fnamebase + '1.gif', im, notavalidk=True)

    if sys.platform.startswith('win'):  # issue #21
        skip('Windows has a known issue with multi-icon files')
    
    # Multiple images
    im = get_ref_im(4, 0, 0)
    ims = [np.repeat(np.repeat(im, i, 1), i, 0) for i in (1, 2)]  # SegF on win
    ims = im, np.column_stack((im, im)), np.row_stack((im, im))  # error on win
    imageio.mimsave(fnamebase + 'I2.ico', ims)
    ims2 = imageio.mimread(fnamebase + 'I2.ico')
    for im1, im2 in zip(ims, ims2):
        assert_close(im1, im2, 0.1)
コード例 #25
0
ファイル: test_ffmpeg.py プロジェクト: ghisvail/imageio
def test_read_and_write():
    need_internet()
    
    R = imageio.read(get_remote_file('images/cockatoo.mp4'), 'ffmpeg')
    assert R.format is imageio.formats['ffmpeg']
    
    fname1 = get_remote_file('images/cockatoo.mp4', test_dir)
    fname2 = fname1[:-4] + '.out.mp4'
    
    # Read
    ims1 = []
    with imageio.read(fname1, 'ffmpeg') as R:
        for i in range(10):
            im = R.get_next_data()
            ims1.append(im)
            assert im.shape == (720, 1280, 3)
            assert (im.sum() / im.size) > 0  # pypy mean is broken
        assert im.sum() > 0
    
        # Seek
        im = R.get_data(120)
        assert im.shape == (720, 1280, 3)
    
    # Save
    with imageio.save(fname2, 'ffmpeg') as W:
        for im in ims1:
            W.append_data(im)
    
    # Read the result
    ims2 = imageio.mimread(fname2, 'ffmpeg')
    assert len(ims1) == len(ims2)
    
    # Check
    for im1, im2 in zip(ims1, ims2):
        diff = np.abs(im1.astype(np.float32) - im2.astype(np.float32))
        if IS_PYPY:
            assert (diff.sum() / diff.size) < 100
        else:
            assert diff.mean() < 2.5
コード例 #26
0
def recursive_copy_files(source_path, destination_path, override=False):
    """
    Recursive copies files from source  to destination directory.
    :param source_path: source directory
    :param destination_path: destination directory
    :param override if True all files will be overridden otherwise skip if file exist
    :return: count of copied files
    """
    startTime = time.time()
    totalFiles = sum([len(files) for r, d, files in os.walk(source_path)])
    files_count = 0

    if not os.path.exists(destination_path):
        os.mkdir(destination_path)
    items = glob.glob(source_path + SEPARATOR + '*')
    for item in items:
        try:
            if os.path.isdir(item):
                path = os.path.join(destination_path,
                                    item.split(SEPARATOR)[-1])
                files_count += recursive_copy_files(source_path=item,
                                                    destination_path=path,
                                                    override=override)
            else:
                file = os.path.join(destination_path,
                                    item.split(SEPARATOR)[-1])
                if not os.path.exists(file) or override:
                    print("Input " + item)
                    print("Output " + file)
                    base_file, ext = os.path.splitext(file)
                    if ext == ".gif":
                        gif = imageio.mimread(item)
                        #for number, image in enumerate(gif):
                        img = Image.fromarray(gif[len(gif) - 1])
                        outputFileName = base_file + ".png"
                        img.save(outputFileName)
                        img = loadImageToArray(outputFileName)
                        img = rescaleImage(img)
                        img = Image.fromarray(img)
                        os.remove(outputFileName)
                        img.save(base_file + ".png")
                    if ext == ".mp4":
                        vid = imageio.get_reader(item, 'ffmpeg')
                        vid_length = vid.get_length()
                        nums = [
                            int(vid_length / 8),
                            int(vid_length / 4),
                            int(vid_length / 2),
                            int(vid_length / 1.5)
                        ]
                        for num in nums:
                            image = vid.get_data(num)
                            outputFileName = base_file + str(num) + ".png"
                            imageio.imwrite(outputFileName, image)
                            img = loadImageToArray(outputFileName)
                            img = rescaleImage(img)
                            img = Image.fromarray(img)
                            os.remove(outputFileName)
                            #print(base_file, ext)
                            img.save(base_file + "_" + str(num) + ".png")
                    else:
                        img = loadImageToArray(item)
                        img = rescaleImage(img)
                        img = Image.fromarray(img)

                        #print(base_file, ext)
                        img.save(base_file + ".png")
                        #shutil.copyfile(item, file)
        except Exception as e:
            print("Exception in recursive_copy_files(): " + str(e))
        files_count += 1
        timeElapsed = time.time() - startTime
        timeLeft = (totalFiles / files_count * timeElapsed) - timeElapsed
        sys.stdout.write("\rTotal time left - " + str(timeLeft).split(".")[0] +
                         " s\n")
        sys.stdout.flush()

    return files_count
コード例 #27
0
import imageio
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from skimage.transform import resize
#from IPython.display import HTML
import warnings
warnings.filterwarnings("ignore")

source_image = imageio.imread('C:/Users/Sharan Babu/Desktop/02.png')
driving_video = imageio.mimread('C:/Users/Sharan Babu/Desktop/00.mp4')


#Resize image and video to 256x256

source_image = resize(source_image, (256, 256))[..., :3]
driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video]

def display(source, driving, generated=None):
    fig = plt.figure(figsize=(8 + 4 * (generated is not None), 6))

    ims = []
    for i in range(len(driving)):
        cols = [source]
        cols.append(driving[i])
        if generated is not None:
            cols.append(generated[i])
        im = plt.imshow(np.concatenate(cols, axis=1), animated=True)
        plt.axis('off')
        ims.append([im])
コード例 #28
0
    newImg = np.zeros((row, col), np.uint8)
    for x in range(0, 255):
        H = (cdf[x] * 255 - cdf[np.min(img)] * 255) / (cdf[254] -
                                                       cdf[np.min(img)])
        H = round(H)
        newImg[img == x] = H

    return newImg


"""
--------------------------------------------------------------------------------------------
"""
img1 = cv2.imread('1.tiff', 0)
img2 = cv2.imread('2.png', 0)
gif = imageio.mimread('3.gif')
img3 = cv2.cvtColor(gif[0], cv2.COLOR_BGR2GRAY)
img4 = cv2.imread('4.jpg', 0)
img5 = cv2.imread('5.bmp', 0)
"""
showing image 
"""
plt.subplot(151), plt.imshow(
    img1, cmap="gray"), plt.axis("off"), plt.title("format:TIFF")
plt.subplot(152), plt.imshow(
    img2, cmap="gray"), plt.axis("off"), plt.title("format:PNG")
plt.subplot(153), plt.imshow(
    img3, cmap="gray"), plt.axis("off"), plt.title("format:GIF")
plt.subplot(154), plt.imshow(
    img4, cmap="gray"), plt.axis("off"), plt.title("format:JPG")
plt.subplot(155), plt.imshow(
コード例 #29
0
ファイル: test_core.py プロジェクト: almarklein/imageio
def test_functions():
    """ Test the user-facing API functions """
    
    # Test help(), it prints stuff, so we just check whether that goes ok
    imageio.help()  # should print overview
    imageio.help('PNG')  # should print about PNG
    
    fname1 = get_remote_file('images/chelsea.png', test_dir)
    fname2 = fname1[:-3] + 'jpg'
    fname3 = fname1[:-3] + 'notavalidext'
    open(fname3, 'wb')
    
    # Test read()
    R1 = imageio.read(fname1)
    R2 = imageio.read(fname1, 'png')
    assert R1.format is R2.format
    # Fail
    raises(ValueError, imageio.read, fname3)  # existing but not readable
    raises(IOError, imageio.read, 'notexisting.barf')
    raises(IndexError, imageio.read, fname1, 'notexistingformat')
    
    # Test save()
    W1 = imageio.save(fname2)
    W2 = imageio.save(fname2, 'JPG')
    assert W1.format is W2.format
    # Fail
    raises(ValueError, imageio.save, 'wtf.notexistingfile')
    
    # Test imread()
    im1 = imageio.imread(fname1)
    im2 = imageio.imread(fname1, 'png')
    assert im1.shape[2] == 3
    assert np.all(im1 == im2)
    
    # Test imsave()
    if os.path.isfile(fname2):
        os.remove(fname2)
    assert not os.path.isfile(fname2)
    imageio.imsave(fname2, im1[:, :, 0])
    imageio.imsave(fname2, im1)
    assert os.path.isfile(fname2)
    
    # Test mimread()
    fname3 = get_remote_file('images/newtonscradle.gif', test_dir)
    ims = imageio.mimread(fname3)
    assert isinstance(ims, list)
    assert len(ims) > 1
    assert ims[0].ndim == 3
    assert ims[0].shape[2] in (1, 3, 4)
    
    if IS_PYPY:
        return  # no support for npz format :(
    
    # Test mimsave()
    fname5 = fname3[:-4] + '2.npz'
    if os.path.isfile(fname5):
        os.remove(fname5)
    assert not os.path.isfile(fname5)
    imageio.mimsave(fname5, [im[:, :, 0] for im in ims])
    imageio.mimsave(fname5, ims)
    assert os.path.isfile(fname5)
    
    # Test volread()
    fname4 = get_remote_file('images/stent.npz', test_dir)
    vol = imageio.volread(fname4)
    assert vol.ndim == 3
    assert vol.shape[0] == 256
    assert vol.shape[1] == 128
    assert vol.shape[2] == 128
    
    # Test volsave()
    volc = np.zeros((10, 10, 10, 3), np.uint8)  # color volume
    fname6 = fname4[:-4] + '2.npz'
    if os.path.isfile(fname6):
        os.remove(fname6)
    assert not os.path.isfile(fname6)
    imageio.volsave(fname6, volc)
    imageio.volsave(fname6, vol)
    assert os.path.isfile(fname6)
    
    # Test mvolread()
    vols = imageio.mvolread(fname4)
    assert isinstance(vols, list)
    assert len(vols) == 1
    assert vols[0].shape == vol.shape
    
    # Test mvolsave()
    if os.path.isfile(fname6):
        os.remove(fname6)
    assert not os.path.isfile(fname6)
    imageio.mvolsave(fname6, [volc, volc])
    imageio.mvolsave(fname6, vols)
    assert os.path.isfile(fname6)
    
    # Fail for save functions
    raises(ValueError, imageio.imsave, fname2, np.zeros((100, 100, 5)))
    raises(ValueError, imageio.imsave, fname2, 42)
    raises(ValueError, imageio.mimsave, fname5, [np.zeros((100, 100, 5))])
    raises(ValueError, imageio.mimsave, fname5, [42])
    raises(ValueError, imageio.volsave, fname4, np.zeros((100, 100, 100, 40)))
    raises(ValueError, imageio.volsave, fname4, 42)
    raises(ValueError, imageio.mvolsave, fname4, [np.zeros((90, 90, 90, 40))])
    raises(ValueError, imageio.mvolsave, fname4, [42])
コード例 #30
0
ファイル: test_tifffile.py プロジェクト: imageio/imageio
def test_tifffile_reading_writing():
    """ Test reading and saving tiff """
    
    need_internet()  # We keep a test image in the imageio-binary repo
    
    im2 = np.ones((10, 10, 3), np.uint8) * 2

    filename1 = os.path.join(test_dir, 'test_tiff.tiff')

    # One image
    imageio.imsave(filename1, im2)
    im = imageio.imread(filename1)
    ims = imageio.mimread(filename1)
    assert im.shape == im2.shape
    assert (im == im2).all()
    assert len(ims) == 1
    
    # Multiple images
    imageio.mimsave(filename1, [im2, im2, im2])
    im = imageio.imread(filename1)
    ims = imageio.mimread(filename1)
    assert im.shape == im2.shape
    assert (im == im2).all()  # note: this does not imply that the shape match!
    assert len(ims) == 3
    for i in range(3):
        assert ims[i].shape == im2.shape
        assert (ims[i] == im2).all()

    # Read all planes as one array - we call it a volume for clarity
    vol = imageio.volread(filename1)
    vols = imageio.mvolread(filename1)
    assert vol.shape == (3, ) + im2.shape
    assert len(vols) == 1 and vol.shape == vols[0].shape
    for i in range(3):
        assert (vol[i] == im2).all()
    
    # remote multipage rgb file
    filename2 = get_remote_file('images/multipage_rgb.tif')
    img = imageio.mimread(filename2)
    assert len(img) == 2
    assert img[0].shape == (3, 10, 10)

    # Mixed
    W = imageio.save(filename1)
    W.set_meta_data({'planarconfig': 'SEPARATE'})  # was "planar"
    assert W.format.name == 'TIFF'
    W.append_data(im2)
    W.append_data(im2)
    W.close()
    #
    R = imageio.read(filename1)
    assert R.format.name == 'TIFF'
    ims = list(R)  # == [im for im in R]
    assert (ims[0] == im2).all()
    # meta = R.get_meta_data()
    # assert meta['orientation'] == 'top_left'  # not there in later version
    # Fail
    raises(IndexError, R.get_data, -1)
    raises(IndexError, R.get_data, 3)
    
    # Ensure imread + imwrite works round trip
    filename3 = os.path.join(test_dir, 'test_tiff2.tiff')
    im1 = imageio.imread(filename1)
    imageio.imwrite(filename3, im1)
    im3 = imageio.imread(filename3)
    assert im1.ndim == 3
    assert im1.shape == im3.shape
    assert (im1 == im3).all()
    
    # Ensure imread + imwrite works round trip - volume like
    filename3 = os.path.join(test_dir, 'test_tiff2.tiff')
    im1 = imageio.volread(filename1)
    imageio.volwrite(filename3, im1)
    im3 = imageio.volread(filename3)
    assert im1.ndim == 4
    assert im1.shape == im3.shape
    assert (im1 == im3).all()

    # Read metadata
    md = imageio.get_reader(filename2).get_meta_data()
    assert md['is_imagej'] is None
    assert md['description'] == 'shape=(2,3,10,10)'
    assert md['description1'] == ''
    assert md['datetime'] == datetime.datetime(2015, 5, 9, 9, 8, 29)
    assert md['software'] == 'tifffile.py'

    # Write metadata
    dt = datetime.datetime(2018, 8, 6, 15, 35, 5)
    w = imageio.get_writer(filename1, software='testsoftware')
    w.append_data(np.zeros((10, 10)), meta={'description': 'test desc',
                                            'datetime': dt})
    w.close()
    r = imageio.get_reader(filename1)
    md = r.get_meta_data()
    assert 'datetime' in md
    assert md['datetime'] == dt
    assert 'software' in md
    assert md['software'] == 'testsoftware'
    assert 'description' in md
    assert md['description'] == 'test desc'
コード例 #31
0
ファイル: test_pillow.py プロジェクト: imageio/imageio
def test_animated_gif():

    # Read newton's cradle
    ims = imageio.mimread("imageio:newtonscradle.gif")
    assert len(ims) == 36
    for im in ims:
        assert im.shape == (150, 200, 4)
        assert im.min() > 0
        assert im.max() <= 255

    # Get images
    im = get_ref_im(4, 0, 0)
    ims = []
    for i in range(10):
        im = im.copy()
        im[:, -5:, 0] = i * 20
        ims.append(im)

    # Store - animated GIF always poops out RGB
    for isfloat in (False, True):
        for colors in (3, 4):
            ims1 = ims[:]
            if isfloat:
                ims1 = [x.astype(np.float32) / 256 for x in ims1]
            ims1 = [x[:, :, :colors] for x in ims1]
            fname = fnamebase + ".animated.%i.gif" % colors
            imageio.mimsave(fname, ims1, duration=0.2)
            # Retrieve
            print("fooo", fname, isfloat, colors)
            ims2 = imageio.mimread(fname)
            ims1 = [x[:, :, :3] for x in ims]  # fresh ref
            ims2 = [x[:, :, :3] for x in ims2]  # discart alpha
            for im1, im2 in zip(ims1, ims2):
                assert_close(im1, im2, 1.1)

    # We can also store grayscale
    fname = fnamebase + ".animated.%i.gif" % 1
    imageio.mimsave(fname, [x[:, :, 0] for x in ims], duration=0.2)
    imageio.mimsave(fname, [x[:, :, :1] for x in ims], duration=0.2)

    # Irragular duration. You probably want to check this manually (I did)
    duration = [0.1 for i in ims]
    for i in [2, 5, 7]:
        duration[i] = 0.5
    imageio.mimsave(fnamebase + ".animated_irr.gif", ims, duration=duration)

    # Other parameters
    imageio.mimsave(fnamebase + ".animated.loop2.gif", ims, loop=2, fps=20)
    R = imageio.read(fnamebase + ".animated.loop2.gif")
    W = imageio.save(fnamebase + ".animated.palettes100.gif", palettesize=100)
    assert W._writer.opt_palette_size == 128
    # Fail
    assert raises(IndexError, R.get_meta_data, -1)
    assert raises(ValueError, imageio.mimsave, fname, ims, palettesize=300)
    assert raises(ValueError, imageio.mimsave, fname, ims, quantizer="foo")
    assert raises(ValueError, imageio.mimsave, fname, ims, duration="foo")

    # Add one duplicate image to ims to touch subractangle with not change
    ims.append(ims[-1])

    # Test subrectangles
    imageio.mimsave(fnamebase + ".subno.gif", ims, subrectangles=False)
    imageio.mimsave(fnamebase + ".subyes.gif", ims, subrectangles=True)
    s1 = os.stat(fnamebase + ".subno.gif").st_size
    s2 = os.stat(fnamebase + ".subyes.gif").st_size
    assert s2 < s1

    # Meta (dummy, because always {})
    imageio.mimsave(fname, [x[:, :, 0] for x in ims], duration=0.2)
    assert isinstance(imageio.read(fname).get_meta_data(), dict)
コード例 #32
0
ファイル: demo.py プロジェクト: johndpope/face_animation
    parser.set_defaults(adapt_scale=False)

    opt = parser.parse_args()
    print("TEST!!")
    print(opt.relative)
    print(opt.adapt_scale)
    print(opt.cpu)
    print(opt.find_best_frame)
    print(opt.best_frame)
    print(opt.result_video)

    source_image = imageio.imread(opt.source_image)
    reader = imageio.get_reader(opt.driving_video)
    fps = reader.get_meta_data()['fps']
    reader.close()
    driving_video = imageio.mimread(opt.driving_video, memtest=False)

    source_image = resize(source_image, (256, 256))[..., :3]
    driving_video = [
        resize(frame, (256, 256))[..., :3] for frame in driving_video
    ]
    generator, kp_detector = load_checkpoints(config_path=opt.config,
                                              checkpoint_path=opt.checkpoint,
                                              cpu=opt.cpu)

    if opt.find_best_frame or opt.best_frame is not None:
        i = opt.best_frame if opt.best_frame is not None else find_best_frame(
            source_image, driving_video, cpu=opt.cpu)
        print("Best frame: " + str(i))
        driving_forward = driving_video[i:]
        driving_backward = driving_video[:(i + 1)][::-1]
コード例 #33
0
                Z_ = W * (1/(2+rho)*(2*X + rho*temp)) + (1-W)*temp
                converged_2 = torch.norm(Z-Z_)/torch.norm(Z) < eps or j>limit
                print((torch.norm(Z-Z_)/torch.norm(Z)).item())
                Z = Z_[:]
                j+=1
            temp = (Z - U @ V.transpose(0,1))
            print(torch.norm(temp))
            Y = Y + rho*temp
            rho = min(rho*mu, 1e20)
            converged = torch.norm(temp)<eps or i>limit
            i+=1
imshow(U @ V.transpose(0,1))
imshow(Z)

# %%
gif = imageio.mimread('mit_logo_2.gif')
gif = gif[:75]
imageio.mimwrite('mit_logo_2_short.mp4', gif)

# %%

# with open('netflix-data/combined_data_1.txt') as f:
#     txt = f.read()
#
# lines = txt.split("\n")
# user_id_to_idx = {}
# idx = 0
# t = tqdm(lines)
# for line in t:
#     if len(line)==0:
#         continue
コード例 #34
0
ファイル: test_swf.py プロジェクト: marine008/imageio
def test_reading_saving():

    need_internet()

    fname1 = get_remote_file("images/stent.swf", test_dir)
    fname2 = fname1[:-4] + ".out.swf"
    fname3 = fname1[:-4] + ".compressed.swf"
    fname4 = fname1[:-4] + ".out2.swf"

    # Read
    R = imageio.read(fname1)
    assert len(R) == 10
    assert R.get_meta_data() == {}  # always empty dict
    ims1 = []
    for im in R:
        assert im.shape == (657, 451, 4)
        assert mean(im) > 0
        ims1.append(im)
    # Seek
    assert (R.get_data(3) == ims1[3]).all()
    # Fails
    raises(IndexError, R.get_data, -1)  # No negative index
    raises(IndexError, R.get_data, 10)  # Out of bounds
    R.close()

    # Test loop
    R = imageio.read(fname1, loop=True)
    assert (R.get_data(10) == ims1[0]).all()

    # setting meta data is ignored
    W = imageio.save(fname2)
    W.set_meta_data({"foo": 3})
    W.close()

    # Just make sure mimread works
    assert len(imageio.mimread(fname1)) == 10

    # I'm not sure why, but the below does not work on pypy, which is weird,
    # because the file *is* closed, but somehow it's not flushed? Ah well ...
    if IS_PYPY:
        return

    # Write and re-read, now without loop, and with html page
    imageio.mimsave(fname2, ims1, loop=False, html=True)
    ims2 = imageio.mimread(fname2)

    # Check images. We can expect exact match, since
    # SWF is lossless.
    assert len(ims1) == len(ims2)
    for im1, im2 in zip(ims1, ims2):
        assert (im1 == im2).all()

    # Test compressed
    imageio.mimsave(fname3, ims2, compress=True)
    ims3 = imageio.mimread(fname3)
    assert len(ims1) == len(ims3)
    for im1, im3 in zip(ims1, ims3):
        assert (im1 == im3).all()

    # Test conventional, Bonus, we don't officially support this.
    _swf = imageio.plugins.swf.load_lib()
    _swf.write_swf(fname4, ims1)
    ims4 = _swf.read_swf(fname4)
    assert len(ims1) == len(ims4)
    for im1, im4 in zip(ims1, ims4):
        assert (im1 == im4).all()

    # We want to manually validate that this file plays in 3d party tools
    # So we write a small HTML5 doc that we can load
    html = """<!DOCTYPE html>
            <html>
            <body>
            Original:
            <embed src="%s">
            <br ><br >
            Written:
            <embed src="%s">
            <br ><br >
            Compressed:
            <embed src="%s">
            <br ><br >
            Written 2:
            <embed src="%s">
            </body>
            </html>
            """ % (
        fname1,
        fname2,
        fname3,
        fname4,
    )

    with open(os.path.join(test_dir, "test_swf.html"), "wb") as f:
        for line in html.splitlines():
            f.write(line.strip().encode("utf-8") + b"\n")
コード例 #35
0
ファイル: test_ffmpeg.py プロジェクト: tvajtay/imageio
def test_read_and_write():

    R = imageio.read(get_remote_file("images/cockatoo.mp4"), "ffmpeg")
    assert R.format is imageio.formats["ffmpeg"]

    fname1 = get_remote_file("images/cockatoo.mp4", test_dir)
    fname2 = fname1[:-4] + ".out.mp4"

    frame1, frame2, frame3 = 41, 131, 227

    # Read
    ims1 = []
    with imageio.read(fname1, "ffmpeg") as R:
        for i in range(10):
            im = R.get_next_data()
            ims1.append(im)
            assert im.shape == (720, 1280, 3)
            assert (im.sum() / im.size) > 0  # pypy mean is broken
        assert im.sum() > 0

        # Seek to reference frames in steps. OUR code will skip steps
        im11 = R.get_data(frame1)
        im12 = R.get_data(frame2)
        im13 = R.get_data(frame3)

        # Now go backwards, seek will kick in
        R.get_next_data()
        im23 = R.get_data(frame3)
        im22 = R.get_data(frame2)
        im21 = R.get_data(frame1)

        # Also use set_image_index
        R.set_image_index(frame2)
        im32 = R.get_next_data()
        R.set_image_index(frame3)
        im33 = R.get_next_data()
        R.set_image_index(frame1)
        im31 = R.get_next_data()

        for im in (im11, im12, im13, im21, im22, im23, im31, im32, im33):
            assert im.shape == (720, 1280, 3)

        assert (im11 == im21).all() and (im11 == im31).all()
        assert (im12 == im22).all() and (im12 == im32).all()
        assert (im13 == im23).all() and (im13 == im33).all()

        assert not (im11 == im12).all()
        assert not (im11 == im13).all()

    # Save
    with imageio.save(fname2, "ffmpeg") as W:
        for im in ims1:
            W.append_data(im)

    # Read the result
    ims2 = imageio.mimread(fname2, "ffmpeg")
    assert len(ims1) == len(ims2)
    for im in ims2:
        assert im.shape == (720, 1280, 3)

    # Check
    for im1, im2 in zip(ims1, ims2):
        diff = np.abs(im1.astype(np.float32) - im2.astype(np.float32))
        if IS_PYPY:
            assert (diff.sum() / diff.size) < 100
        else:
            assert diff.mean() < 2.5
コード例 #36
0
    parser.add_argument("--swap_index", default="1,2,5", type=lambda x: list(map(int, x.split(','))),
                        help='index of swaped parts')
    parser.add_argument("--hard", action="store_true", help="use hard segmentation labels for blending")
    parser.add_argument("--use_source_segmentation", action="store_true", help="use source segmentation for swaping")
    parser.add_argument("--first_order_motion_model", action="store_true", help="use first order model for alignment")
    parser.add_argument("--supervised", action="store_true",
                        help="use supervised segmentation labels for blending. Only for faces.")

    parser.add_argument("--cpu", action="store_true", help="cpu mode")


    opt = parser.parse_args()

    source_image = imageio.imread(opt.source_image)

    target_video = imageio.mimread(opt.target_video, memtest=False)
    source_image = resize(source_image, (256, 256))[..., :3]
    target_video = [resize(frame, (256, 256))[..., :3] for frame in target_video]

    blend_scale = (256 / 4) / 512 if opt.supervised else 1
    reconstruction_module, segmentation_module = load_checkpoints(opt.config, opt.checkpoint, blend_scale=blend_scale, 
                                                                  first_order_motion_model=opt.first_order_motion_model, cpu=opt.cpu)

    if opt.supervised:
        face_parser = load_face_parser(opt.cpu)
    else:
        face_parser = None
    predictions = make_video(opt.swap_index, source_image, target_video, reconstruction_module, segmentation_module,
                             face_parser, hard=opt.hard, use_source_segmentation=opt.use_source_segmentation, cpu=opt.cpu)

    # Read fps of the target video and save result with the same fps
コード例 #37
0
ファイル: test_freeimage.py プロジェクト: atknin/imageio
def test_animated_gif():

    if sys.platform.startswith('darwin'):
        skip('On OSX quantization of freeimage is unstable')

    # Get images
    im = get_ref_im(4, 0, 0)
    ims = []
    for i in range(10):
        im = im.copy()
        im[:, -5:, 0] = i * 20
        ims.append(im)

    # Store - animated GIF always poops out RGB
    for isfloat in (False, True):
        for colors in (3, 4):
            ims1 = ims[:]
            if isfloat:
                ims1 = [x.astype(np.float32) / 256 for x in ims1]
            ims1 = [x[:, :, :colors] for x in ims1]
            fname = fnamebase + '.animated.%i.gif' % colors
            imageio.mimsave(fname, ims1, duration=0.2)
            # Retrieve
            ims2 = imageio.mimread(fname)
            ims1 = [x[:, :, :3] for x in ims]  # fresh ref
            ims2 = [x[:, :, :3] for x in ims2]  # discart alpha
            for im1, im2 in zip(ims1, ims2):
                assert_close(im1, im2, 1.1)

    # We can also store grayscale
    fname = fnamebase + '.animated.%i.gif' % 1
    imageio.mimsave(fname, [x[:, :, 0] for x in ims], duration=0.2)
    imageio.mimsave(fname, [x[:, :, :1] for x in ims], duration=0.2)

    # Irragular duration. You probably want to check this manually (I did)
    duration = [0.1 for i in ims]
    for i in [2, 5, 7]:
        duration[i] = 0.5
    imageio.mimsave(fnamebase + '.animated_irr.gif', ims, duration=duration)

    # Other parameters
    imageio.mimsave(fnamebase + '.animated.loop2.gif', ims, loop=2, fps=20)
    R = imageio.read(fnamebase + '.animated.loop2.gif')
    W = imageio.save(fnamebase + '.animated.palettes100.gif', palettesize=100)
    assert W._palettesize == 128
    # Fail
    raises(IndexError, R.get_meta_data, -1)
    raises(ValueError, imageio.mimsave, fname, ims, palettesize=300)
    raises(ValueError, imageio.mimsave, fname, ims, quantizer='foo')
    raises(ValueError, imageio.mimsave, fname, ims, duration='foo')

    # Add one duplicate image to ims to touch subractangle with not change
    ims.append(ims[-1])

    # Test subrectangles
    imageio.mimsave(fnamebase + '.subno.gif', ims, subrectangles=False)
    imageio.mimsave(fnamebase + '.subyes.gif', ims, subrectangles=True)
    s1 = os.stat(fnamebase + '.subno.gif').st_size
    s2 = os.stat(fnamebase + '.subyes.gif').st_size
    assert s2 < s1

    # Meta (dummy, because always {}
    assert isinstance(imageio.read(fname).get_meta_data(), dict)
コード例 #38
0

# GAME = 'MsPacman-v0'
# mspacman_color = np.array([210, 164, 74]).mean()
#
# # Preprocess the image input
# def preprocess(obs):
#     img = obs[1:176:2, ::2]                # crop and downsize
#     img = img.sum(axis=2)                  # to greyscale
#     img[img == mspacman_color] = 0         # Improve contrast
#     img = (img // 3 - 128).astype(np.int64)
#     img = img.reshape(88, 80, 1)
#     return img
#
#
# env = gym.make(GAME)
# s = env.reset()
# prev_img = preprocess(s)
# s_, r, done, _ = env.step(1)
# img = preprocess(s_)
# im = Image.fromarray(img - prev_img, 'RGB')
# im.show()

import imageio

gif_original = 'results/random.gif'
gif_speed_up = 'results/speed_up.gif'

gif = imageio.mimread(gif_original)

imageio.mimsave(gif_speed_up, gif, fps=30)
コード例 #39
0

######          Loading pictures and video   #######

if selection == 2:
#Import Gif
    url = "https://media.giphy.com/media/3bb5jcIADH9ewHnpl9/giphy.gif"
    fname = "tmp.gif"

    ## Read the gif from the web, save to the disk
    imdata = urllib.request.urlopen(url).read()
    imbytes = bytearray(imdata)
    open(fname,"wb+").write(imdata)

    ## Read the gif from disk to `RGB`s using `imageio.miread`
    gif = imageio.mimread(fname)
    nums = len(gif)


    # convert form RGB to BGR
    augmentation_gif = [cv.cvtColor(img, cv.COLOR_RGB2BGR) for img in gif]
    #Create padded augmentation gif
    height_gif, width_gif, cc_gif= augmentation_gif[0].shape
    original = cv.VideoCapture('img/video_outside.avi')
else:
    augmentation_layer = cv.imread('img/AugmentedLayerCyanLab.png')
    original = cv.VideoCapture('img/original_short.avi')



fourcc = cv.VideoWriter_fourcc(*"DIVX")
コード例 #40
0
ファイル: test_pillow.py プロジェクト: yyrjl/imageio
def test_animated_gif():

    # Read newton's cradle
    ims = imageio.mimread("imageio:newtonscradle.gif")
    assert len(ims) == 36
    for im in ims:
        assert im.shape == (150, 200, 4)
        assert im.min() > 0
        assert im.max() <= 255

    # Get images
    im = get_ref_im(4, 0, 0)
    ims = []
    for i in range(10):
        im = im.copy()
        im[:, -5:, 0] = i * 20
        ims.append(im)

    # Store - animated GIF always poops out RGB
    for isfloat in (False, True):
        for colors in (3, 4):
            ims1 = ims[:]
            if isfloat:
                ims1 = [x.astype(np.float32) / 256 for x in ims1]
            ims1 = [x[:, :, :colors] for x in ims1]
            fname = fnamebase + ".animated.%i.gif" % colors
            imageio.mimsave(fname, ims1, duration=0.2)
            # Retrieve
            print("fooo", fname, isfloat, colors)
            ims2 = imageio.mimread(fname)
            ims1 = [x[:, :, :3] for x in ims]  # fresh ref
            ims2 = [x[:, :, :3] for x in ims2]  # discart alpha
            for im1, im2 in zip(ims1, ims2):
                assert_close(im1, im2, 1.1)

    # We can also store grayscale
    fname = fnamebase + ".animated.%i.gif" % 1
    imageio.mimsave(fname, [x[:, :, 0] for x in ims], duration=0.2)
    imageio.mimsave(fname, [x[:, :, :1] for x in ims], duration=0.2)

    # Irragular duration. You probably want to check this manually (I did)
    duration = [0.1 for i in ims]
    for i in [2, 5, 7]:
        duration[i] = 0.5
    imageio.mimsave(fnamebase + ".animated_irr.gif", ims, duration=duration)

    # Other parameters
    imageio.mimsave(fnamebase + ".animated.loop2.gif", ims, loop=2, fps=20)
    R = imageio.read(fnamebase + ".animated.loop2.gif")
    W = imageio.save(fnamebase + ".animated.palettes100.gif", palettesize=100)
    assert W._writer.opt_palette_size == 128
    # Fail
    assert raises(IndexError, R.get_meta_data, -1)
    assert raises(ValueError, imageio.mimsave, fname, ims, palettesize=300)
    assert raises(ValueError, imageio.mimsave, fname, ims, quantizer="foo")
    assert raises(ValueError, imageio.mimsave, fname, ims, duration="foo")

    # Add one duplicate image to ims to touch subractangle with not change
    ims.append(ims[-1])

    # Test subrectangles
    imageio.mimsave(fnamebase + ".subno.gif", ims, subrectangles=False)
    imageio.mimsave(fnamebase + ".subyes.gif", ims, subrectangles=True)
    s1 = os.stat(fnamebase + ".subno.gif").st_size
    s2 = os.stat(fnamebase + ".subyes.gif").st_size
    assert s2 < s1

    # Meta (dummy, because always {})
    imageio.mimsave(fname, [x[:, :, 0] for x in ims], duration=0.2)
    assert isinstance(imageio.read(fname).get_meta_data(), dict)
コード例 #41
0
net.to(device)
net_path = os.path.join(model_path,
                        '{}_epoch-HairMatteNet.ckpt'.format(resume_epochs))
net.load_state_dict(
    torch.load(net_path, map_location=lambda storage, loc: storage))
net.eval()

transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

############ Main block ############
start = time.time()
video = imageio.mimread(path_input, memtest=False)
video = [frame[..., :3] for frame in video]
old_size = video[0].shape
batch_size = len(video)
print('Number of frames: ', batch_size)

data = Generator(video, transform=transform)
dataloader = DataLoader(data,
                        shuffle=False,
                        batch_size=batch_size,
                        num_workers=1)

result = []
for i, data in enumerate(dataloader, 0):
    image = data.to(device)
    with torch.no_grad():
コード例 #42
0
def main(args):
    if not os.path.isfile(args.input):
        print("Error : {} file doesn't exist".format(args.input),
              file=sys.stderr)
        exit(1)
    start = time.time()

    gpu_ids = args.gpu

    prefs = {
        "titsize": args.bsize,
        "aursize": args.asize,
        "nipsize": args.nsize,
        "vagsize": args.vsize,
        "hairsize": args.hsize
    }

    if args.cpu:
        gpu_ids = None
    elif gpu_ids is None:
        gpu_ids = [0]

    if not args.gif:
        # Read image
        file = open(args.input, "rb")
        image_bytes = bytearray(file.read())
        np_image = np.asarray(image_bytes, dtype=np.uint8)
        image = cv2.imdecode(np_image, cv2.IMREAD_COLOR)

        # See if image loaded correctly
        if image is None:
            print("Error : {} file is not valid".format(args.input),
                  file=sys.stderr)
            exit(1)

        # Preprocess
        if args.overlay:
            original_image = image.copy()
            image = utils.crop_input(image, args.overlay[0], args.overlay[1],
                                     args.overlay[2], args.overlay[3])
        elif args.auto_resize:
            image = utils.resize_input(image)
        elif args.auto_resize_crop:
            image = utils.resize_crop_input(image)
        elif args.auto_rescale:
            image = utils.rescale_input(image)

        # See if image has the correct shape after preprocessing
        if image.shape != (512, 512, 3):
            print("Error : image is not 512 x 512, got shape: {}".format(
                image.shape),
                  file=sys.stderr)
            exit(1)

        # Process
        if args.n_runs is None or args.n_runs == 1:
            result = process(image, gpu_ids, prefs)

            if args.overlay:
                result = utils.overlay_original_img(original_image, result,
                                                    args.overlay[0],
                                                    args.overlay[1],
                                                    args.overlay[2],
                                                    args.overlay[3])

            cv2.imwrite(args.output, result)
        else:
            base_output_filename = utils.strip_file_extension(
                args.output, ".png")

            def process_one_image(i):
                result = process(image, gpu_ids, prefs)

                if args.overlay:
                    result = utils.overlay_original_img(
                        original_image, result, args.overlay[0],
                        args.overlay[1], args.overlay[2], args.overlay[3])
                cv2.imwrite(base_output_filename + "%03d.png" % i, result)

            if args.cpu:
                pool = ThreadPool(args.n_cores)
                pool.map(process_one_image, range(args.n_runs))
                pool.close()
                pool.join()
            else:
                for i in range(args.n_runs):
                    process_one_image(i)
    else:
        # Read images
        gif_imgs = imageio.mimread(args.input)
        print("Total {} frames in the gif!".format(len(gif_imgs)))

        # Preprocess
        if args.auto_resize:
            gif_imgs = [utils.resize_input(img) for img in gif_imgs]
        elif args.auto_resize_crop:
            gif_imgs = [utils.resize_crop_input(img) for img in gif_imgs]
        elif args.auto_rescale:
            gif_imgs = [utils.rescale_input(img) for img in gif_imgs]

        # Process
        if args.n_runs is None or args.n_runs == 1:
            process_gif_wrapper(
                gif_imgs,
                args.output if args.output != "output.png" else "output.gif",
                gpu_ids, prefs, args.n_cores)
        else:
            base_output_filename = utils.strip_file_extension(
                args.output,
                ".gif") if args.output != "output.png" else "output"
            for i in range(args.n_runs):
                process_gif_wrapper(gif_imgs,
                                    base_output_filename + "%03d.gif" % i,
                                    gpu_ids, prefs, args.n_cores)

    end = time.time()
    duration = end - start

    # Done
    print("Done! We have taken", round(duration, 2), "seconds")

    # Exit
    sys.exit()
コード例 #43
0
import glob
import numpy as np
import imageio

vids = sorted(glob.glob("*.mp4"))

all_vids = []
for v in range(len(vids)):
    if v == 0:
        all_vids = imageio.mimread(vids[v], memtest=False)[:100]
    else:
        co = imageio.mimread(vids[v], memtest=False)
        try:
            composite = []
            for f in range(100):
                frame = np.concatenate((all_vids[f], co[f]), axis=0)
                composite.append(frame)
            all_vids = composite
        except:
            pass

imageio.mimsave("all_comp_xiph.mp4", all_vids, fps=15)
コード例 #44
0
ファイル: gifstack.py プロジェクト: ihattan/BlockCipherModes
import numpy as np
import imageio as im

ecb = im.mimread('C:\\Users\\Ian\\Desktop\\blockcipher\\animated\\an_ecb.gif')
ctr = im.mimread('C:\\Users\\Ian\\Desktop\\blockcipher\\animated\\an_ctr.gif')

stack = np.hstack((ecb, ctr))
im.mimwrite('C:\\Users\\Ian\\Desktop\\blockcipher\\animated\\stack.gif',
            stack,
            duration=0.01)
コード例 #45
0
ファイル: untitled0.py プロジェクト: ys-lee/addframetophoto
import imageio
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import imageio
from PIL import ImageSequence
import numpy as np
import sys

gif = imageio.mimread("pp.gif")
nums = len(gif)

im = Image.fromarray(np.uint8(gif[1]))
im = im.convert('RGB')
im.thumbnail((300, 300))
im.show()
コード例 #46
0
    '''
    # Get user supplied values
    imagePath = sys.argv[1]
    facePath = sys.argv[2]

    if len(sys.argv) < 4:
        outputPath = "output"
    else:
        outputPath = sys.argv[3]

    print("arguments given", imagePath, facePath, outputPath)

    # if gif
    if imagePath.endswith(".gif"):
        # Read in images
        images = imageio.mimread(imagePath, format="GIF-PIL")
        face = imageio.imread(facePath)
        output = []

        for image in images:
            image = find_and_replace(image, face)
            output.append(image)

            if DEBUG:
                cv2.imshow("BGR", output[-1])
                cv2.waitKey(0)

                cv2.imshow("RGB", output[-1][:, :, ::-1])
                cv2.waitKey(0)

        if DEBUG:
コード例 #47
0
ファイル: crop.py プロジェクト: LandesLab/KNOT
# The footer for information about how these images were cropped #
footer = '_(%i,%i)_(%ix%i)_(f%i-%i)' % (*Roi_cen, *Roi_size, *Frames)

#%% LOAD AND CROP %%#
print("Starting...")
for f in files:
    filename = f.split('\\')[-1]
    f_name = FOLD_EXP + filename if (FOLD_EXP not in f) else f

    # Skip over already cropped files #
    if (filename[:3] == 'roi'): continue
    print(filename, end='\t')

    # Loading - KEEP IN MIND: THIS IS FOR SINGLE PLANE, MONOCHROMATIC IMAGES #
    data = np.array(imageio.mimread(f_name, memtest=False))
    while (len(np.shape(data)) > 3):
        data = data[0, ...]
    print('| Loaded |', end='\t')

    # Visualize before cropping #
    if (VISUAL):
        plt.figure(figsize=(12, 6))
        ax = plt.axes(position=[0, 0, 0.5, 1])
        ax.imshow(data[Frames[0], :, :], cmap='gray')
        ax.plot([rng_x[0], rng_x[0], rng_x[-1], rng_x[-1], rng_x[0]],
                [rng_y[0], rng_y[-1], rng_y[-1], rng_y[0], rng_y[0]],
                c='r')
        ax.text(10, 20, "Frame %i" % (Frames[0]), color='w')
        ax.set_xticklabels([])
        ax.set_yticklabels([])
コード例 #48
0
# Load an image using OpenCV
cv_img = cv2.cvtColor(cv2.imread("chatbot-image.png"), cv2.COLOR_BGR2RGB)

# Get the image dimensions (OpenCV stores image data as NumPy ndarray)
height, width, no_channels = cv_img.shape

# Create a canvas that can fit the above image
canvas = tkinter.Canvas(window, width=width, height=height)
canvas.pack()

# Use PIL (Pillow) to convert the NumPy ndarray to a PhotoImage
photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img))

# Add a PhotoImage to the Canvas
canvas.create_image(0, 0, image=photo, anchor=tkinter.NW)

gif = imageio.mimread("chatgif.gif")
nums = len(gif)
print("Total {} frames in the gif!".format(nums))

imgs = [cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for img in gif]

btn_blur = tkinter.Button(window,
                          text="Transition",
                          width=50,
                          command=transition)
btn_blur.pack(anchor=tkinter.CENTER, expand=True)

# Run the window loop
window.mainloop()
コード例 #49
0
    color_list = os.listdir(root)
    a = 0
    for color in color_list:
        # if "拼色" in color or "花色" in color:
        #     continue
        file_names = os.listdir(os.path.join(root, color))
        for file_name in file_names:
            file_path = os.path.join(root, color, file_name)
            file_path = file_path.encode('utf-8').decode('utf-8')
            # 加载sklearn中样图
            # china = cv2.imread(file_path)
            # file_path = "hua2.jpg"
            china = cv2.imdecode(np.fromfile(file_path, dtype=np.uint8), -1)
            ## imdecode读取的是rgb,如果后续需要opencv处理的话,需要转换成bgr,转换后图片颜色会变化
            if china is None:
                tmp = imageio.mimread(file_path)
                if tmp is not None:
                    imt = np.array(tmp)
                    imt = imt[0]
                    china = imt[:, :, 0:3]

            china = cv2.cvtColor(china, cv2.COLOR_RGB2BGR)

            n_colors = get_n_color(china)
            # cv2.imwrite(f"image2/{n_colors}_{a}.jpg", china)
            a += 1
            # cv2.imshow("image", china)
            # cv2.waitKey(800)
            # n_colors = 5
            china = np.array(china, dtype=np.float64) / 255
コード例 #50
0
def loadFrame(args):
    mean = np.asarray([0.485, 0.456, 0.406], np.float32)
    std = np.asarray([0.229, 0.224, 0.225], np.float32)

    curr_w = 320
    curr_h = 240
    height = width = 224
    (filename, augment) = args

    data = np.zeros((3, height, width), dtype=np.float32)

    try:
        ### load file from HDF5
        # filename = filename.replace('.avi','.hdf5')
        # filename = filename.replace('UCF-101','UCF-101-hdf5')
        # h = h5py.File(filename,'r')
        h = imageio.mimread(filename)
        nFrames = len(h['meta']) - 1
        frame_index = np.random.randint(nFrames)
        frame = h['meta'][frame_index]

        if (augment == True):
            ## RANDOM CROP - crop 70-100% of original size
            ## don't maintain aspect ratio
            if (np.random.randint(2) == 0):
                resize_factor_w = 0.3 * np.random.rand() + 0.7
                resize_factor_h = 0.3 * np.random.rand() + 0.7
                w1 = int(curr_w * resize_factor_w)
                h1 = int(curr_h * resize_factor_h)
                w = np.random.randint(curr_w - w1)
                h = np.random.randint(curr_h - h1)
                frame = frame[h:(h + h1), w:(w + w1)]

            ## FLIP
            if (np.random.randint(2) == 0):
                frame = cv2.flip(frame, 1)

            frame = cv2.resize(frame, (width, height))
            frame = frame.astype(np.float32)

            ## Brightness +/- 15
            brightness = 30
            random_add = np.random.randint(brightness + 1) - brightness / 2.0
            frame += random_add
            frame[frame > 255] = 255.0
            frame[frame < 0] = 0.0

        else:
            # don't augment
            frame = cv2.resize(frame, (width, height))
            frame = frame.astype(np.float32)

        ## resnet model was trained on images with mean subtracted
        frame = frame / 255.0
        frame = (frame - mean) / std
        frame = frame.transpose(2, 0, 1)
        data[:, :, :] = frame
    except:
        print("Exception: " + filename)
        data = np.array([])
    return data
コード例 #51
0
        raise argparse.ArgumentTypeError('Boolean value expected.')

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Start Image Animation')
    parser.add_argument("--image_path", type=str, default='./data/images/02.png')
    parser.add_argument("--video_path", type=str, default='./data/videos/04.mp4')
    parser.add_argument("--use_relative",type=str2bool, nargs='?',
                        const=True, default=True)
    #parser.add_argument("--exclude_pattern", nargs="+", default=[""])
    args = parser.parse_args()


    image_path = args.image_path
    video_path = args.video_path
    source_image = imageio.imread(image_path)
    driving_video = imageio.mimread(video_path, memtest=False)


    #Resize image and video to 256x256

    source_image = resize(source_image, (256, 256))[..., :3]
    driving_video = np.array([resize(frame, (256, 256))[..., :3] for frame in driving_video])

    imageio.imwrite('./generated/downscaled_image.png',source_image)
    generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml',
                                checkpoint_path='./models/vox-cpk.pth.tar')

    predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=args.use_relative)
    #predictions = []
    #orig_img = driving_video[0]
    #for i in tqdm(range(np.shape(driving_video)[0]-1)):
コード例 #52
0
# Just for lulz.

# REQUIRE: pip install fpdf2 imageio
# USAGE: ./gif2pdf.py $gif_filepath

import sys
from fpdf import FPDF
from imageio import mimread
from PIL import Image

in_filepath = sys.argv[1]
if not in_filepath.endswith(".gif"):
    print("Input file must be a GIF", file=sys.stderr)
    sys.exit(1)

imgs = mimread(in_filepath)
size = (imgs[0].shape[1], imgs[0].shape[0])

pdf = FPDF(format=size)
pdf.set_margin(0)
duration_in_secs = 0
for img in imgs:
    pdf.add_page(duration=duration_in_secs)
    # Converting the numpy.ndarray to a PIL.Image:
    pdf.image(Image.frombytes("RGBA", size, img.tobytes()), w=pdf.epw)
    duration_in_secs = img.meta["duration"] / 1000 or 0.04

out_filepath = in_filepath.replace(".gif", ".pdf")
pdf.output(out_filepath)
print(f'You can now open "{out_filepath}" in Adobe Acrobat Reader,')
print("press CTRL+L to launch presentation mode,")
コード例 #53
0
def vidcreategif(inputvideo, outputgif, fps, colors):
    videofile = imageio.mimread(inputvideo)
    imageio.mimsave(outputgif, videofile, fps=fps, palettesize=int(colors))
コード例 #54
0
ファイル: image.py プロジェクト: zshipko/imagepy
def imread_frames(filename, *args, **kw):
    '''reads multiple frames'''
    try:
        return [Image(i) for i in mimread(filename, *args, **kw)]
    except ValueError:
        return None
コード例 #55
0
def plot(x, self, n_sample_per_video=5, max_num_video=4):
    opt = self.opt
    x = [x_ for x_ in x]
    if opt.n_past + opt.n_eval > len(x):
        for _ in range(opt.n_past + opt.n_eval - len(x)):
            x.append(x[-1])
    gen_seq = [[] for i in range(n_sample_per_video)]
    gt_seq = [x[i] for i in range(len(x))]
    recon_seq = [x[i] for i in range(len(x))]

    # get reconstruction
    frame_predictor_hidden = None
    posterior_hidden = None
    prior_hidden = None

    x_in = x[0]
    batch_size = x_in.shape[0]
    for i in range(1, opt.n_eval):
        h = self.encoder(x_in)
        if opt.last_frame_skip or i < opt.n_past + 1:
            h, skip = h
        else:
            h, _ = h

        h_target = self.encoder(x[i])
        h_target = h_target[0]

        _, z_t, _, posterior_hidden = self.posterior(h_target,
                                                     posterior_hidden)
        h, frame_predictor_hidden = self.frame_predictor(
            torch.cat([h, z_t], 1), frame_predictor_hidden)
        x_pred = self.decoder([h, skip]).argmax(dim=1, keepdim=True)
        recon_seq[i] = x_pred
        x_in = x[i]

    # get prediction
    for s in range(n_sample_per_video):
        frame_predictor_hidden = None
        posterior_hidden = None
        prior_hidden = None

        gen_seq[s].append(x[0])
        x_in = x[0]
        for i in range(1, opt.n_eval):
            h = self.encoder(x_in)
            if opt.last_frame_skip or i < opt.n_past + 1:
                h, skip = h
            else:
                h, _ = h
            if i < opt.n_past:
                h_target = self.encoder(x[i])
                h_target = h_target[0]
                z_t, _, _, posterior_hidden = self.posterior(
                    h_target, posterior_hidden)
                _, _, _, prior_hidden = self.prior(h, prior_hidden)
                _, frame_predictor_hidden = self.frame_predictor(
                    torch.cat([h, z_t], 1), frame_predictor_hidden)
                x_in = x[i]
            else:
                z_t, _, _, prior_hidden = self.prior(h, prior_hidden)
                h, frame_predictor_hidden = self.frame_predictor(
                    torch.cat([h, z_t], 1), frame_predictor_hidden)
                x_in = self.decoder([h, skip]).argmax(dim=1, keepdim=True)

            gen_seq[s].append(x_in)

    gifs = [[] for t in range(opt.n_eval)]
    nrow = min(batch_size, max_num_video)
    for i in range(nrow):
        # ground truth sequence
        row = []
        for t in range(opt.n_eval):
            row.append(gt_seq[t][i])

        s_list = list(range(n_sample_per_video))
        for ss in range(len(s_list)):
            s = s_list[ss]
            for t in range(opt.n_eval):
                row.append(gen_seq[s][t][i])
        for t in range(opt.n_eval):
            n_pad = 2
            if t < opt.n_past:
                canvas_color = green = [0., 1., 0.]  # green
            elif opt.n_past <= t < opt.n_past + opt.n_future:
                canvas_color = yellow = [1., 1., 0.]  # yellow
            else:
                canvas_color = red = [1., 0., 0.]  # red

            row = []

            gen = self.colorize(gt_seq[t][i].unsqueeze(0)).float().div(255)
            h, w = gen.shape[-2:]
            canvas = torch.tensor(green, device=gen.device,
                                  dtype=gen.dtype).view(3, 1, 1).repeat(
                                      1, h + n_pad * 2, w + n_pad * 2)
            canvas[:, n_pad:n_pad + h, n_pad:n_pad + w] = gen
            row.append(canvas)

            recon = self.colorize(
                recon_seq[t][i].unsqueeze(0)).float().div(255)
            h, w = recon.shape[-2:]
            canvas_recon = torch.tensor(green,
                                        device=recon.device,
                                        dtype=recon.dtype).view(
                                            3, 1,
                                            1).repeat(1, h + n_pad * 2,
                                                      w + n_pad * 2)
            canvas_recon[:, n_pad:n_pad + h, n_pad:n_pad + w] = recon
            row.append(canvas_recon)

            for ss in range(len(s_list)):
                s = s_list[ss]

                gen = self.colorize(
                    gen_seq[s][t][i].unsqueeze(0)).float().div(255)
                h, w = gen.shape[-2:]
                canvas = torch.tensor(canvas_color,
                                      device=gen.device,
                                      dtype=gen.dtype).view(3, 1, 1).repeat(
                                          1, h + n_pad * 2, w + n_pad * 2)
                canvas[:, n_pad:n_pad + h, n_pad:n_pad + w] = gen
                row.append(canvas)

            gifs[t].append(row)

    fname = '%s/sample_%d.gif' % (self.sample_dir, self.global_iter)
    save_gif(fname, gifs)

    result = torch.from_numpy(np.array(imageio.mimread(
        fname, memtest=False))).transpose(2, 3).transpose(1, 2)
    self.writer.add_video('video_pred', result.unsqueeze(0), self.global_iter)
コード例 #56
0
import imageio
import numpy as np
from skimage.transform import resize
import warnings

warnings.filterwarnings("ignore")

source_image = imageio.imread('Adolf-Hitler-1933.jpg')
driving_video = imageio.mimread('jadoo.mp4', memtest=False)

#Resize image and video to 256x256

source_image = resize(source_image, (256, 256))[..., :3]
driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video]

from demo import load_checkpoints

generator, kp_detector = load_checkpoints(
    config_path='config/vox-256.yaml', checkpoint_path='vox-adv-cpk.pth.tar')

from demo import make_animation
from skimage import img_as_ubyte

predictions = make_animation(source_image,
                             driving_video,
                             generator,
                             kp_detector,
                             relative=True)

#save resulting video
imageio.mimsave('generated.mp4',
コード例 #57
0
ファイル: test_swf.py プロジェクト: ghisvail/imageio
def test_reading_saving():
    
    need_internet()
    
    fname1 = get_remote_file('images/stent.swf', test_dir)
    fname2 = fname1[:-4] + '.out.swf'
    fname3 = fname1[:-4] + '.compressed.swf'
    fname4 = fname1[:-4] + '.out2.swf'
    
    # Read
    R = imageio.read(fname1)
    assert len(R) == 10
    assert R.get_meta_data() == {}  # always empty dict
    ims1 = []
    for im in R:
        assert im.shape == (657, 451, 4)
        assert mean(im) > 0
        ims1.append(im)
    # Seek
    assert (R.get_data(3) == ims1[3]).all()
    # Fails
    raises(IndexError, R.get_data, -1)  # No negative index
    raises(IndexError, R.get_data, 10)  # Out of bounds
    R.close()
    
    # Test loop
    R = imageio.read(fname1, loop=True)
    assert (R.get_data(10) == ims1[0]).all()
    
    # setting meta data is ignored
    W = imageio.save(fname2)
    W.set_meta_data({'foo': 3})
    W.close()
    
    # Write and re-read, now without loop, and with html page
    imageio.mimsave(fname2, ims1, loop=False, html=True)
    ims2 = imageio.mimread(fname2)
    
    # Check images. We can expect exact match, since
    # SWF is lossless.
    assert len(ims1) == len(ims2)
    for im1, im2 in zip(ims1, ims2):
        assert (im1 == im2).all()

    # Test compressed
    imageio.mimsave(fname3, ims2, compress=True)
    ims3 = imageio.mimread(fname3)
    assert len(ims1) == len(ims3)
    for im1, im3 in zip(ims1, ims3):
        assert (im1 == im3).all()
    
    # Test conventional, Bonus, we don't officially support this.
    _swf = imageio.plugins.swf.load_lib()
    _swf.write_swf(fname4, ims1)
    ims4 = _swf.read_swf(fname4)
    assert len(ims1) == len(ims4)
    for im1, im4 in zip(ims1, ims4):
        assert (im1 == im4).all()
    
    # We want to manually validate that this file plays in 3d party tools
    # So we write a small HTML5 doc that we can load
    html = """<!DOCTYPE html>
            <html>
            <body>
            
            Original:
            <embed src="%s">
            <br ><br >
            Written:
            <embed src="%s">
            <br ><br >
            Compressed:
            <embed src="%s">
            <br ><br >
            Written 2:
            <embed src="%s">
            </body>
            </html>
            """ % (fname1, fname2, fname3, fname4)
    
    with open(os.path.join(test_dir, 'test_swf.html'), 'wb') as f:
        for line in html.splitlines():
            f.write(line.strip().encode('utf-8') + b'\n')
コード例 #58
0
ファイル: test_core.py プロジェクト: simoneperazzoli/imageio
def test_functions():
    """ Test the user-facing API functions """

    # Test help(), it prints stuff, so we just check whether that goes ok
    imageio.help()  # should print overview
    imageio.help('PNG')  # should print about PNG

    fname1 = get_remote_file('images/chelsea.png', test_dir)
    fname2 = fname1[:-3] + 'jpg'
    fname3 = fname1[:-3] + 'notavalidext'
    open(fname3, 'wb')

    # Test read()
    R1 = imageio.read(fname1)
    R2 = imageio.read(fname1, 'png')
    assert R1.format is R2.format
    # Fail
    raises(ValueError, imageio.read, fname3)  # existing but not readable
    raises(FileNotFoundError, imageio.read, 'notexisting.barf')
    raises(IndexError, imageio.read, fname1, 'notexistingformat')

    # Test save()
    W1 = imageio.save(fname2)
    W2 = imageio.save(fname2, 'JPG')
    W1.close()
    W2.close()
    assert W1.format is W2.format
    # Fail
    raises(FileNotFoundError, imageio.save,
           '~/dirdoesnotexist/wtf.notexistingfile')

    # Test imread()
    im1 = imageio.imread(fname1)
    im2 = imageio.imread(fname1, 'png')
    assert im1.shape[2] == 3
    assert np.all(im1 == im2)

    # Test imsave()
    if os.path.isfile(fname2):
        os.remove(fname2)
    assert not os.path.isfile(fname2)
    imageio.imsave(fname2, im1[:, :, 0])
    imageio.imsave(fname2, im1)
    assert os.path.isfile(fname2)

    # Test mimread()
    fname3 = get_remote_file('images/newtonscradle.gif', test_dir)
    ims = imageio.mimread(fname3)
    assert isinstance(ims, list)
    assert len(ims) > 1
    assert ims[0].ndim == 3
    assert ims[0].shape[2] in (1, 3, 4)
    # Test protection
    with raises(RuntimeError):
        imageio.mimread('imageio:chelsea.png', 'dummy', length=np.inf)

    if IS_PYPY:
        return  # no support for npz format :(

    # Test mimsave()
    fname5 = fname3[:-4] + '2.npz'
    if os.path.isfile(fname5):
        os.remove(fname5)
    assert not os.path.isfile(fname5)
    imageio.mimsave(fname5, [im[:, :, 0] for im in ims])
    imageio.mimsave(fname5, ims)
    assert os.path.isfile(fname5)

    # Test volread()
    fname4 = get_remote_file('images/stent.npz', test_dir)
    vol = imageio.volread(fname4)
    assert vol.ndim == 3
    assert vol.shape[0] == 256
    assert vol.shape[1] == 128
    assert vol.shape[2] == 128

    # Test volsave()
    volc = np.zeros((10, 10, 10, 3), np.uint8)  # color volume
    fname6 = os.path.join(test_dir, 'images', 'stent2.npz')
    if os.path.isfile(fname6):
        os.remove(fname6)
    assert not os.path.isfile(fname6)
    imageio.volsave(fname6, volc)
    imageio.volsave(fname6, vol)
    assert os.path.isfile(fname6)

    # Test mvolread()
    vols = imageio.mvolread(fname4)
    assert isinstance(vols, list)
    assert len(vols) == 1
    assert vols[0].shape == vol.shape

    # Test mvolsave()
    if os.path.isfile(fname6):
        os.remove(fname6)
    assert not os.path.isfile(fname6)
    imageio.mvolsave(fname6, [volc, volc])
    imageio.mvolsave(fname6, vols)
    assert os.path.isfile(fname6)

    # Fail for save functions
    raises(ValueError, imageio.imsave, fname2, np.zeros((100, 100, 5)))
    raises(ValueError, imageio.imsave, fname2, 42)
    raises(ValueError, imageio.mimsave, fname5, [np.zeros((100, 100, 5))])
    raises(ValueError, imageio.mimsave, fname5, [42])
    raises(ValueError, imageio.volsave, fname6, np.zeros((100, 100, 100, 40)))
    raises(ValueError, imageio.volsave, fname6, 42)
    raises(ValueError, imageio.mvolsave, fname6, [np.zeros((90, 90, 90, 40))])
    raises(ValueError, imageio.mvolsave, fname6, [42])
コード例 #59
0
ファイル: functions.py プロジェクト: mountain-viewer/VK-Hack
def generate_gifs(image, match_info, choice):
    if ((int(choice) == 1) and
        (match_info["home_score"] > match_info["away_score"])) or (
            (int(choice) == 2) and
            (match_info["home_score"] <= match_info["away_score"])):
        GIF_FOLDERS = ["win"]
    else:
        GIF_FOLDERS = ["noooo"]
    GIF_FOLDERS += ['goal', '1_0', 'woman_red_card']  #, 'noooo', 'win' 'lost']
    SCALE_FACTORS = {
        'woman_red_card': (2, 2),
        'goal': (1, 1),
        '1_0': (5, 5),
        'noooo': (2, 2),
        'win': (2, 2)
    }  #, 'lost': (2, 2)}
    COORDS = {
        'woman_red_card': (0.6, 0.8),
        'goal': (0, 0.3),
        '1_0': (0.1, 0.85),
        'noooo': (0, 0.8),
        'win': (0.1, 0.75)
    }  #, 'lost': (0,0.7)}
    NUM_FRAMES = {
        'woman_red_card': 45,
        'goal': 24,
        '1_0': 10,
        'noooo': 8,
        'win': 2
    }  #, 'lost': 2}
    DURATION = {
        'woman_red_card': 50,
        'goal': 100,
        '1_0': 150,
        'noooo': 100,
        'win': 100
    }  #, 'lost': 100}

    STATIC_IMAGE = cv2.resize(image, dsize=(480, 640))

    for i, gif_name in enumerate(GIF_FOLDERS):
        static_image = STATIC_IMAGE
        if gif_name == 'win' or gif_name == 'noooo':
            versus_sticker = create_sticker_with_info(match_info=match_info)
            versus_img = versus_sticker[:, :, :3]
            versus_img_mask = versus_sticker[:, :, 3]
            static_image = overlay_image(bg=static_image,
                                         fg=versus_img,
                                         fgMask=versus_img_mask,
                                         coords=(static_image.shape[1] // 4,
                                                 20))
        gif = []
        for img_name in sorted(glob.glob(f'./Data/{gif_name}/frame_*.gif')):
            curr_frame = imageio.mimread(img_name)[0]
            gif.append(
                cv2.resize(
                    curr_frame,
                    dsize=(curr_frame.shape[1] // SCALE_FACTORS[gif_name][0],
                           curr_frame.shape[0] // SCALE_FACTORS[gif_name][1])))
        gif_array = []
        for idx, gif_frame in enumerate(gif):
            gif_img = gif_frame[:, :, :3]
            gif_img_mask = gif_frame[:, :, 3]
            overlayed_img = overlay_image(
                bg=static_image,
                fg=gif_img,
                fgMask=gif_img_mask,
                coords=(int(static_image.shape[1] * COORDS[gif_name][0]),
                        int(static_image.shape[1] * COORDS[gif_name][1])))
            gif_array.append(Image.fromarray(overlayed_img))
        # GIF version
        gif_array[0].save(f'./static/gifs/{i}.gif',
                          save_all=True,
                          append_images=gif_array[:NUM_FRAMES[gif_name]],
                          duration=DURATION[gif_name],
                          loop=0)
        optimize(f'./static/gifs/{i}.gif')
        # VIDEO version


#         dump_video(
#             filename=f'./static/gifs/{i}.mp4',
#             clip=gif_array
#         )

    return [
        f'95.213.37.132:5000/static/gifs/{i}.gif'
        for i in range(len(GIF_FOLDERS))
    ]
コード例 #60
0
ファイル: atlas_brain_matching.py プロジェクト: bf777/MesoNet
def atlasBrainMatch(
    brain_img_dir,
    sensory_img_dir,
    coords_input,
    sensory_match,
    mat_save,
    threshold,
    git_repo_base,
    region_labels,
    landmark_arr_orig,
    use_unet,
    use_dlc,
    atlas_to_brain_align,
    model,
    olfactory_check,
    plot_landmarks,
    align_once,
    original_label,
    use_voxelmorph,
    exist_transform,
    voxelmorph_model="motif_model_atlas.h5",
    vxm_template_path="templates",
    dlc_template_path="dlc_templates",
    flow_path="",
):
    """
    Align and overlap brain atlas onto brain image based on four landmark locations in the brain image and the atlas.
    :param brain_img_dir: The directory containing each brain image to be used.
    :param sensory_img_dir: The directory containing each sensory image to be used (if you are aligning each brain
    image using a sensory map).
    :param coords_input: Predicted locations of the four landmarks on the brain image from the file generated by
    DeepLabCut.
    :param sensory_match: Whether or not a sensory map is to be used.
    :param mat_save: Whether or not to export each brain region to a .mat file in applyMask, which is called at the end
    of this function.
    :param threshold: The threshold for the cv2.opening operation carried out in applyMask, which is called at the end
    of this function.
    :param git_repo_base: The path to the base git repository containing necessary resources for MesoNet (reference
    atlases, DeepLabCut config files, etc.)
    :param region_labels: Choose whether or not to attempt to label each region with its name from the Allen Institute
    Mouse Brain Atlas.
    :param landmark_arr_orig: The original array of landmarks from DeepLabCut (to be distinguished from any automatic
    exclusions to landmark array based on prediction quality).
    :param use_unet: Choose whether or not to identify the borders of the cortex using a U-net model.
    :param atlas_to_brain_align: If True, registers the atlas to each brain image. If False, registers each brain image
    to the atlas.
    :param model: The name of the U-net model (for passthrough to mask_functions.py)
    :param olfactory_check: If True, draws olfactory bulb contours on the brain image.
    :param plot_landmarks: If True, plots DeepLabCut landmarks (large circles) and original alignment landmarks (small
    circles) on final brain image.
    :param align_once: if True, carries out all alignments based on the alignment of the first atlas and brain. This can
    save time if you have many frames of the same brain with a fixed camera position.
    :param original_label: if True, uses a brain region labelling approach that attempts to automatically sort brain
    regions in a consistent order (left to right by hemisphere, then top to bottom for vertically aligned regions). This
    approach may be more flexible if you're using a custom brain atlas (i.e. not one in which region is filled with a
    unique number).
    :param exist_transform: if True, uses an existing voxelmorph transformation field for all data instead of predicting
    a new transformation.
    :param voxelmorph_model: the name of a .h5 model located in the models folder of the git repository for MesoNet,
    generated using voxelmorph and containing weights for a voxelmorph local deformation model.
    :param vxm_template_path: the path to a template atlas (.npy or .mat( to which the brain image will be aligned in
    voxelmorph.
    :param flow_path: the path to a voxelmorph transformation field that will be used to transform all data instead of
    predicting a new transformation if exist_transform is True.
    """
    # load brain images folder
    brain_img_arr = []
    dlc_img_arr = []
    peak_arr = []
    atlas_label_list = []
    dst_list = []
    vxm_template_list = []
    br_list = []

    voxelmorph_model_path = os.path.join(
        git_repo_base, "models", "voxelmorph", voxelmorph_model
    )

    # Prepare template for VoxelMorph
    convert_to_png(vxm_template_path)
    vxm_template_orig = cv2.imread(
        glob.glob(os.path.join(git_repo_base, "atlases", vxm_template_path, "*.png"))[0]
    )

    # Prepare template for DeepLabCut + VoxelMorph
    # convert_to_png(dlc_template_path)
    # dlc_template = cv2.imread(
    #     glob.glob(os.path.join(git_repo_base, "atlases", dlc_template_path, "*.png"))[0]
    # )
    # dlc_template = np.uint8(dlc_template)
    # dlc_template = cv2.resize(dlc_template, (512, 512))

    # Prepare output folder
    cwd = os.getcwd()
    output_mask_path = os.path.join(cwd, "../output_mask")
    # Output folder for transparent masks and masks overlaid onto brain image
    output_overlay_path = os.path.join(cwd, "../output_overlay")
    if not os.path.isdir(output_mask_path):
        os.mkdir(output_mask_path)
    if not os.path.isdir(output_overlay_path):
        os.mkdir(output_overlay_path)

    if not atlas_to_brain_align:
        im = cv2.imread(
            os.path.join(git_repo_base, "atlases/Atlas_workflow2_binary.png")
        )
    else:
        if use_voxelmorph and not use_dlc:
            im = cv2.imread(
                os.path.join(git_repo_base, "atlases/Atlas_for_Voxelmorph_binary.png")
            )
        else:
            im = cv2.imread(
                os.path.join(git_repo_base, "atlases/Atlas_workflow1_binary.png")
            )
        im_left = cv2.imread(os.path.join(git_repo_base, "atlases/left_hemi.png"))
        ret, im_left = cv2.threshold(im_left, 5, 255, cv2.THRESH_BINARY_INV)
        im_right = cv2.imread(os.path.join(git_repo_base, "atlases/right_hemi.png"))
        ret, im_right = cv2.threshold(im_right, 5, 255, cv2.THRESH_BINARY_INV)
        im_left = np.uint8(im_left)
        im_right = np.uint8(im_right)
        im = np.uint8(im)
    # im = atlas_from_mat(os.path.join(git_repo_base, 'atlases/atlas_ROIs.mat'))
    atlas = im
    # FOR ALIGNING BRAIN TO ATLAS
    # im_binary = np.uint8(im)

    for num, file in enumerate(os.listdir(cwd)):
        if fnmatch.fnmatch(file, "*.png") and "mask" not in file:
            dlc_img_arr.append(os.path.join(cwd, file))
    for num, file in enumerate(os.listdir(brain_img_dir)):
        if fnmatch.fnmatch(file, "*.png"):
            brain_img_arr.append(os.path.join(brain_img_dir, file))
            brain_img_arr.sort(key=natural_sort_key)
        elif fnmatch.fnmatch(file, "*.tif"):
            tif_stack = imageio.mimread(os.path.join(brain_img_dir, file))
            for tif_im in tif_stack:
                brain_img_arr.append(tif_im)
    # i_coord, j_coord = np.array([(100, 256, 413, 256), (148, 254, 148, 446)])
    # https://www.pyimagesearch.com/2014/07/21/detecting-circles-images-using-opencv-hough-circles/
    coord_circles_img = cv2.imread(
        os.path.join(
            git_repo_base, "atlases", "multi_landmark", "landmarks_new_binary.png"
        ),
        cv2.IMREAD_GRAYSCALE,
    )
    coord_circles_img = np.uint8(coord_circles_img)
    # detect circles in the image
    circles, hierarchy = cv2.findContours(
        coord_circles_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE
    )[-2:]
    # ensure at least some circles were found
    if circles is not None:
        # convert the (x, y) coordinates and radius of the circles to integers
        atlas_arr = np.array(
            [
                (
                    int(cv2.moments(circle)["m10"] / cv2.moments(circle)["m00"]),
                    int(cv2.moments(circle)["m01"] / cv2.moments(circle)["m00"]),
                )
                for circle in circles
            ]
        )

    atlas_arr = np.array(
        [
            (102, 148),
            (166, 88),
            (214, 454),
            (256, 88),
            (256, 256),
            (256, 428),
            (410, 148),
            (346, 88),
            (298, 454),
        ]
    )

    peak_arr_flat = []
    peak_arr_total = []

    if sensory_match:
        for num, file in enumerate(brain_img_arr):
            img_name = str(os.path.splitext(os.path.basename(file))[0])
            sensory_img_for_brain = os.path.join(sensory_img_dir, img_name)
            print(img_name)
            print(sensory_img_for_brain)
            if glob.glob(sensory_img_for_brain):
                sensory_img_for_brain_dir = os.listdir(sensory_img_for_brain)
                sensory_img_for_brain_dir.sort(key=natural_sort_key)
                for num_im, file_im in enumerate(sensory_img_for_brain_dir):
                    sensory_im = io.imread(
                        os.path.join(sensory_img_dir, img_name, file_im)
                    )
                    sensory_im = trans.resize(sensory_im, (512, 512))
                    io.imsave(
                        os.path.join(sensory_img_dir, img_name, file_im), sensory_im
                    )
                    peak = find_peaks(os.path.join(sensory_img_dir, img_name, file_im))
                    peak_arr.append(peak)
            for x in peak_arr:
                for y in x:
                    peak_arr_flat.append(y)
            peak_arr_total.append(peak_arr_flat)
            peak_arr_flat = []
            peak_arr = []

    dlc_pts = []
    atlas_pts = []
    sensory_peak_pts = []
    sensory_atlas_pts = []
    sub_dlc_pts = []
    sub_atlas_pts = []
    sub_sensory_peak_pts = []
    sub_sensory_atlas_pts = []

    bregma_index_list = []
    bregma_list = []
    if use_dlc:
        bregma_present = True
    else:
        bregma_present = False

    coords = pd.read_csv(coords_input)
    x_coord = coords.iloc[2:, 1::3]
    y_coord = coords.iloc[2:, 2::3]
    accuracy = coords.iloc[2:, 3::3]
    acc_left_total = accuracy.iloc[:, 0:5]
    acc_right_total = accuracy.iloc[:, 3:8]
    landmark_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8]  # [0, 3, 2, 1]
    for arr_index, i in enumerate(range(0, len(x_coord))):
        landmark_arr = landmark_arr_orig
        x_coord_flat = x_coord.iloc[i].values.astype("float32")
        y_coord_flat = y_coord.iloc[i].values.astype("float32")
        x_coord_flat = x_coord_flat[landmark_arr]
        y_coord_flat = y_coord_flat[landmark_arr]
        dlc_list = []
        atlas_list = []
        for (coord_x, coord_y) in zip(x_coord_flat, y_coord_flat):
            dlc_coord = (coord_x, coord_y)
            dlc_list.append(dlc_coord)
        for coord_atlas in atlas_arr:
            atlas_coord = (coord_atlas[0], coord_atlas[1])
            atlas_list.append(atlas_coord)
        atlas_list = [atlas_list[i] for i in landmark_arr]
        # Initialize result as max value

        landmark_indices = landmark_indices[0 : len(landmark_arr)]

        # atlas_indices = min_landmark_arr
        atlas_indices = landmark_arr

        # print('atlas indices: {}'.format(atlas_indices))
        # print('landmark indices: {}'.format(landmark_indices))
        # print('x coords: {}'.format(x_coord_flat))

        pts_dist = np.absolute(
            np.asarray(atlas_list) - np.asarray((im.shape[0] / 2, im.shape[1] / 2))
        )
        pts_avg_dist = [np.mean(v) for v in pts_dist]
        # print("bregma dist: {}".format(pts_avg_dist))
        bregma_index = np.argmin(np.asarray(pts_avg_dist))
        # print("bregma index: {}".format(bregma_index))

        for j in landmark_indices:
            sub_dlc_pts.append([x_coord_flat[j], y_coord_flat[j]])
        for j in atlas_indices:
            sub_atlas_pts.append([atlas_arr[j][0], atlas_arr[j][1]])

        dlc_pts.append(sub_dlc_pts)
        atlas_pts.append(sub_atlas_pts)
        coords_to_mat(
            sub_dlc_pts, i, output_mask_path, bregma_present, bregma_index, landmark_arr
        )
        bregma_index_list.append(bregma_index)
        sub_dlc_pts = []
        sub_atlas_pts = []
    if sensory_match:
        k_coord, m_coord = np.array([(189, 323, 435, 348), (315, 315, 350, 460)])
        coords_peak = peak_arr_total
        for img_num, img in enumerate(brain_img_arr):
            for j in [1, 0, 3, 2]:  # Get peak values from heatmaps
                sub_sensory_peak_pts.append(
                    [coords_peak[img_num][j][0], coords_peak[img_num][j][1]]
                )
            for j in [0, 1, 2, 3]:  # Get circle locations
                sub_sensory_atlas_pts.append([k_coord[j], m_coord[j]])
            sensory_peak_pts.append(sub_sensory_peak_pts)
            sensory_atlas_pts.append(sub_sensory_atlas_pts)
            sensory_to_mat(
                sub_sensory_peak_pts, dlc_pts[img_num][3], img_num, output_mask_path
            )
            sub_sensory_peak_pts = []
            sub_sensory_atlas_pts = []
        sensory_peak_pts, sensory_atlas_pts = (
            np.asarray(sensory_peak_pts).astype("float32"),
            np.asarray(sensory_atlas_pts).astype("float32"),
        )

    for (n, br) in enumerate(brain_img_arr):
        vxm_template = np.uint8(vxm_template_orig)
        vxm_template = cv2.resize(vxm_template, (512, 512))

        align_val = n
        if atlas_to_brain_align:
            im = np.uint8(im)
            br = cv2.imread(br)
            br = np.uint8(br)
            br = cv2.resize(br, (512, 512))
        else:
            # FOR ALIGNING BRAIN TO ATLAS
            if ".png" in br:
                im = cv2.imread(br)
            else:
                im = br
            im = np.uint8(im)
            im = cv2.resize(im, (512, 512))

        if atlas_to_brain_align:
            # atlas_mask_dir = os.path.join(git_repo_base, "atlases/Atlas_workflow1_smooth_binary.png")
            if use_voxelmorph and not use_dlc:
                atlas_mask_dir = os.path.join(
                    git_repo_base, "atlases/Atlas_for_Voxelmorph_border.png"
                )
            else:
                atlas_mask_dir = os.path.join(
                    git_repo_base, "atlases/atlas_smooth2_binary.png"
                )
            atlas_mask_dir_left = os.path.join(
                git_repo_base, "atlases/left_hemisphere_smooth.png"
            )
            atlas_mask_dir_right = os.path.join(
                git_repo_base, "atlases/right_hemisphere_smooth.png"
            )
            atlas_label_mask_dir = os.path.join(
                git_repo_base, "atlases/diff_colour_regions/Common_atlas.mat"
            )
            atlas_label_mask_dir_left = os.path.join(
                git_repo_base, "atlases/diff_colour_regions/atlas_left_hemisphere.csv"
            )
            atlas_label_mask_dir_right = os.path.join(
                git_repo_base, "atlases/diff_colour_regions/atlas_right_hemisphere.csv"
            )
            # atlas_label_mask_left = atlas_from_mat(atlas_label_mask_dir_left, [])
            # atlas_label_mask_right = atlas_from_mat(atlas_label_mask_dir_right, [])
            atlas_label_mask_left = np.genfromtxt(
                atlas_label_mask_dir_left, delimiter=","
            )
            atlas_label_mask_right = np.genfromtxt(
                atlas_label_mask_dir_right, delimiter=","
            )
            atlas_mask_left = cv2.imread(atlas_mask_dir_left, cv2.IMREAD_UNCHANGED)
            atlas_mask_left = cv2.resize(atlas_mask_left, (im.shape[0], im.shape[1]))
            atlas_mask_left = np.uint8(atlas_mask_left)
            atlas_mask_right = cv2.imread(atlas_mask_dir_right, cv2.IMREAD_UNCHANGED)
            atlas_mask_right = cv2.resize(atlas_mask_right, (im.shape[0], im.shape[1]))
            atlas_mask_right = np.uint8(atlas_mask_right)
        else:
            atlas_mask_dir = os.path.join(
                git_repo_base, "atlases/atlas_smooth2_binary.png"
            )
        atlas_mask = cv2.imread(atlas_mask_dir, cv2.IMREAD_UNCHANGED)
        atlas_mask = cv2.resize(atlas_mask, (im.shape[0], im.shape[1]))
        atlas_mask = np.uint8(atlas_mask)
        mask_dir = os.path.join(cwd, "../output_mask/{}.png".format(n))

        print("Performing first transformation of atlas {}...".format(n))

        mask_warped_path = os.path.join(
            output_mask_path, "{}_mask_warped.png".format(str(n))
        )
        if use_dlc:
            # First alignment of brain atlas using three cortical landmarks and standard affine transform
            atlas_pts_for_input = np.array([atlas_pts[n][0 : len(dlc_pts[n])]]).astype(
                "float32"
            )
            pts_for_input = np.array([dlc_pts[n]]).astype("float32")

            if align_once:
                align_val = 0
            else:
                align_val = n

            if len(atlas_pts_for_input[0]) == 2:
                atlas_pts_for_input = np.append(atlas_pts_for_input[0], [[0, 0]], axis=0)
                pts_for_input = np.append(pts_for_input[0], [[0, 0]], axis=0)
            if len(atlas_pts_for_input[0]) <= 2:
                warp_coords = cv2.estimateAffinePartial2D(
                    atlas_pts_for_input, pts_for_input
                )[0]
                if atlas_to_brain_align:
                    atlas_warped_left = cv2.warpAffine(im_left, warp_coords, (512, 512))
                    atlas_warped_right = cv2.warpAffine(im_right, warp_coords, (512, 512))
                    atlas_warped = cv2.bitwise_or(atlas_warped_left, atlas_warped_right)
                    ret, atlas_warped = cv2.threshold(
                        atlas_warped, 5, 255, cv2.THRESH_BINARY_INV
                    )
                    atlas_left_transform_path = os.path.join(
                        output_mask_path, "{}_atlas_left_transform.png".format(str(n))
                    )
                    atlas_right_transform_path = os.path.join(
                        output_mask_path, "{}_atlas_right_transform.png".format(str(n))
                    )
                    io.imsave(atlas_left_transform_path, atlas_warped_left)
                    io.imsave(atlas_right_transform_path, atlas_warped_right)
                else:
                    atlas_warped = cv2.warpAffine(im, warp_coords, (512, 512))
            elif len(atlas_pts_for_input[0]) == 3:
                warp_coords = cv2.getAffineTransform(atlas_pts_for_input, pts_for_input)
                atlas_warped = cv2.warpAffine(im, warp_coords, (512, 512))
            elif len(atlas_pts_for_input[0]) >= 4:
                im_final_size = (512, 512)

                left = acc_left_total.iloc[n, :].values.astype("float32").tolist()
                right = acc_right_total.iloc[n, :].values.astype("float32").tolist()
                left = np.argsort(left).tolist()
                right = np.argsort(right).tolist()
                right = [x + 1 for x in right]
                if set([1, 3, 5, 7]).issubset(landmark_arr):
                    left = [1, 3, 5]
                    right = [3, 5, 7]
                else:
                    left = [x for x in landmark_indices if x in range(0, 6)][0:2]
                    right = [x for x in landmark_indices if x in range(3, 9)][0:2]

                try:
                    atlas_pts_left = np.array(
                        [
                            atlas_pts[align_val][left[0]],
                            atlas_pts[align_val][left[1]],
                            atlas_pts[align_val][left[2]],
                        ],
                        dtype=np.float32,
                    )
                    atlas_pts_right = np.array(
                        [
                            atlas_pts[align_val][right[0]],
                            atlas_pts[align_val][right[1]],
                            atlas_pts[align_val][right[2]],
                        ],
                        dtype=np.float32,
                    )
                    dlc_pts_left = np.array(
                        [
                            dlc_pts[align_val][left[0]],
                            dlc_pts[align_val][left[1]],
                            dlc_pts[align_val][left[2]],
                        ],
                        dtype=np.float32,
                    )
                    dlc_pts_right = np.array(
                        [
                            dlc_pts[align_val][right[0]],
                            dlc_pts[align_val][right[1]],
                            dlc_pts[align_val][right[2]],
                        ],
                        dtype=np.float32,
                    )

                except:
                    atlas_pts_left = np.array(
                        [
                            atlas_pts[align_val][0],
                            atlas_pts[align_val][2],
                            atlas_pts[align_val][3],
                        ],
                        dtype=np.float32,
                    )
                    atlas_pts_right = np.array(
                        [
                            atlas_pts[align_val][1],
                            atlas_pts[align_val][2],
                            atlas_pts[align_val][3],
                        ],
                        dtype=np.float32,
                    )
                    dlc_pts_left = np.array(
                        [
                            dlc_pts[align_val][0],
                            dlc_pts[align_val][2],
                            dlc_pts[align_val][3],
                        ],
                        dtype=np.float32,
                    )
                    dlc_pts_right = np.array(
                        [
                            dlc_pts[align_val][1],
                            dlc_pts[align_val][2],
                            dlc_pts[align_val][3],
                        ],
                        dtype=np.float32,
                    )

                warp_coords_left = cv2.getAffineTransform(atlas_pts_left, dlc_pts_left)
                warp_coords_right = cv2.getAffineTransform(atlas_pts_right, dlc_pts_right)
                if atlas_to_brain_align:
                    atlas_warped_left = cv2.warpAffine(
                        im_left, warp_coords_left, im_final_size
                    )
                    atlas_warped_right = cv2.warpAffine(
                        im_right, warp_coords_right, im_final_size
                    )
                    atlas_warped = cv2.bitwise_or(atlas_warped_left, atlas_warped_right)
                    ret, atlas_warped = cv2.threshold(
                        atlas_warped, 5, 255, cv2.THRESH_BINARY_INV
                    )
                    if not original_label:
                        atlas_label_left = cv2.warpAffine(
                            atlas_label_mask_left, warp_coords_left, im_final_size
                        )
                        atlas_label_right = cv2.warpAffine(
                            atlas_label_mask_right, warp_coords_right, im_final_size
                        )
                        atlas_label = cv2.bitwise_or(atlas_label_left, atlas_label_right)

                else:
                    pts_np = np.array(
                        [
                            dlc_pts[align_val][0],
                            dlc_pts[align_val][1],
                            dlc_pts[align_val][2],
                        ],
                        dtype=np.float32,
                    )
                    atlas_pts_np = np.array(
                        [
                            atlas_pts[align_val][0],
                            atlas_pts[align_val][1],
                            atlas_pts[align_val][2],
                        ],
                        dtype=np.float32,
                    )
                    warp_coords = cv2.getAffineTransform(pts_np, atlas_pts_np)
                    atlas_warped = cv2.warpAffine(im, warp_coords, (512, 512))
                    print(warp_coords)
                    # vxm_template = cv2.warpAffine(vxm_template, warp_coords, (512, 512))
                    # try:
                    #    atlas_warped = niftyreg_align(git_repo_base, atlas_warped, output_mask_path, n)
                    # except:
                    #    print("ERROR: could not use niftyreg to warp atlas {}! Please check inputs.".format(str(n)))

            if atlas_to_brain_align:
                if len(atlas_pts_for_input[0]) == 2:
                    atlas_mask_left_warped = cv2.warpAffine(
                        atlas_mask_left, warp_coords, (512, 512)
                    )
                    atlas_mask_right_warped = cv2.warpAffine(
                        atlas_mask_right, warp_coords, (512, 512)
                    )
                    atlas_mask_warped = cv2.bitwise_or(
                        atlas_mask_left_warped, atlas_mask_right_warped
                    )
                if len(atlas_pts_for_input[0]) == 3:
                    atlas_mask_warped = cv2.warpAffine(atlas_mask, warp_coords, (512, 512))
                if len(atlas_pts_for_input[0]) >= 4:
                    atlas_mask_left_warped = cv2.warpAffine(
                        atlas_mask_left, warp_coords_left, (512, 512)
                    )
                    atlas_mask_right_warped = cv2.warpAffine(
                        atlas_mask_right, warp_coords_right, (512, 512)
                    )
                    atlas_mask_warped = cv2.bitwise_or(
                        atlas_mask_left_warped, atlas_mask_right_warped
                    )
                atlas_mask_warped = np.uint8(atlas_mask_warped)

            # Second alignment of brain atlas using cortical landmarks and piecewise affine transform
            print("Performing second transformation of atlas {}...".format(n))

            atlas_first_transform_path = os.path.join(
                output_mask_path, "{}_atlas_first_transform.png".format(str(n))
            )

            dst = atlas_warped

            io.imsave(atlas_first_transform_path, dst)

            # If a sensory map of the brain is provided, do a third alignment of the brain atlas using up to
            # four peaks of sensory activity
            if sensory_match:
                original_label = True
                # COMMENT OUT FOR ALIGNING BRAIN TO ATLAS
                # mask_dir
                # atlas_first_transform_path
                if atlas_to_brain_align:
                    dst = getMaskContour(
                        mask_dir,
                        atlas_warped,
                        sensory_peak_pts[align_val],
                        sensory_atlas_pts[align_val],
                        cwd,
                        align_val,
                        False,
                    )
                    atlas_mask_warped = getMaskContour(
                        atlas_first_transform_path,
                        atlas_mask_warped,
                        sensory_peak_pts[align_val],
                        sensory_atlas_pts[align_val],
                        cwd,
                        align_val,
                        False,
                    )
                    atlas_mask_warped = cv2.resize(
                        atlas_mask_warped, (im.shape[0], im.shape[1])
                    )
                else:
                    dst = atlas_warped
        else:
            # If we're not using DeepLabCut...
            if atlas_to_brain_align and not use_voxelmorph:
                dst = cv2.bitwise_or(im_left, im_right)
                ret, dst = cv2.threshold(
                    dst, 5, 255, cv2.THRESH_BINARY_INV
                )
            else:
                dst = im
            dst = np.uint8(dst)

        # if use_voxelmorph:
        #     if atlas_to_brain_align:
        #         _, flow = voxelmorph_align(
        #             voxelmorph_model_path, br_vxm, vxm_template, exist_transform, flow_path
        #         )
        #     else:
        #         dst, flow = voxelmorph_align(
        #             voxelmorph_model_path, dst, vxm_template, exist_transform, flow_path
        #         )
        #     flow_path_after = os.path.join(output_mask_path, "{}_flow.npy".format(str(n)))
        #     np.save(flow_path_after, flow)
        #     if not exist_transform:
        #         if atlas_to_brain_align:
        #             dst_gray = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
        #             dst = vxm_transform(dst_gray, flow_path_after)
        #             ret, dst = cv2.threshold(
        #                 dst, 5, 255, cv2.THRESH_BINARY
        #             )
        #             dst = np.uint8(dst)

        if use_dlc:
            if atlas_to_brain_align:
                io.imsave(mask_warped_path, atlas_mask_warped)
            else:
                io.imsave(mask_warped_path, atlas_mask)
        else:
            io.imsave(mask_warped_path, dst)
            if atlas_to_brain_align:
                if use_voxelmorph:
                    atlas_mask_warped = atlas_mask
                else:
                    atlas_mask_warped = cv2.bitwise_or(
                        atlas_mask_left, atlas_mask_right
                    )
                atlas_mask_warped = cv2.cvtColor(atlas_mask_warped, cv2.COLOR_BGR2GRAY)
                #
                # atlas_mask_warped = vxm_transform(atlas_mask_warped, flow_path_after)
                ret, atlas_mask_warped = cv2.threshold(
                    atlas_mask_warped, 5, 255, cv2.THRESH_BINARY
                )
                atlas_mask_warped = np.uint8(atlas_mask_warped)
                original_label = True
            else:
                atlas_mask_warped = atlas_mask
                #
                # atlas_mask_warped = vxm_transform(atlas_mask_warped, flow_path_after)
            io.imsave(mask_warped_path, atlas_mask_warped)
        # Resize images back to 512x512
        dst = cv2.resize(dst, (im.shape[0], im.shape[1]))
        atlas_path = os.path.join(output_mask_path, "{}_atlas.png".format(str(n)))

        if atlas_to_brain_align:
            io.imsave(atlas_path, dst)
            br_list.append(br)
        else:
            brain_warped_path = os.path.join(
                output_mask_path, "{}_brain_warp.png".format(str(n))
            )
            vxm_template_output_path = os.path.join(
                output_mask_path, "{}_vxm_template.png".format(str(n))
            )
            dst_list.append(dst)
            if use_voxelmorph:
                vxm_template_list.append(vxm_template)
                io.imsave(vxm_template_output_path, vxm_template_list[n])
            io.imsave(brain_warped_path, dst)
            io.imsave(atlas_path, atlas)

        if atlas_to_brain_align:
            if original_label:
                atlas_label = []
            atlas_label = atlas_to_mask(
                atlas_path,
                mask_dir,
                mask_warped_path,
                output_mask_path,
                n,
                use_unet,
                atlas_to_brain_align,
                git_repo_base,
                olfactory_check,
                atlas_label
            )
            atlas_label_list.append(atlas_label)
        elif not use_dlc:
            io.imsave(os.path.join(output_mask_path, "{}.png".format(n)), dst)
        if bregma_present:
            bregma_val = int(bregma_index_list[n])
            bregma_list.append(dlc_pts[n][bregma_val])

    # Carries out VoxelMorph on each motif-based functional map (MBFM) that has been aligned to a raw brain image
    if use_dlc and use_voxelmorph and align_once:
        for (n_post, dst_post), vxm_template_post in zip(enumerate(dst_list), vxm_template_list):
            _, flow_post = voxelmorph_align(
               voxelmorph_model_path, dst_post, vxm_template_post, exist_transform, flow_path
            )
            flow_path_after = os.path.join(output_mask_path, "{}_flow.npy".format(str(n_post)))
            np.save(flow_path_after, flow_post)
            if not exist_transform:
                dst_gray = cv2.cvtColor(atlas, cv2.COLOR_BGR2GRAY)
                dst_post = vxm_transform(dst_gray, flow_path_after)
                ret, dst_post = cv2.threshold(
                    dst_post, 5, 255, cv2.THRESH_BINARY
                )
                dst_post = np.uint8(dst_post)

            mask_warped_path = os.path.join(
                output_mask_path, "{}_mask_warped.png".format(str(n_post))
            )

            atlas_first_transform_path_post = os.path.join(
                output_mask_path, "{}_atlas_first_transform.png".format(str(n_post))
            )

            io.imsave(atlas_first_transform_path_post, dst_post)

            atlas_path = os.path.join(output_mask_path, "{}_atlas.png".format(str(n_post)))

            brain_warped_path = os.path.join(
                output_mask_path, "{}_brain_warp.png".format(str(n_post))
            )
            mask_dir = os.path.join(cwd, "../output_mask/{}.png".format(n_post))
            dst_post = cv2.resize(dst_post, (im.shape[0], im.shape[1]))
            if not atlas_to_brain_align:
                atlas_to_brain_align = True
                original_label = True
            if atlas_to_brain_align:
                io.imsave(atlas_path, dst_post)
            else:
                io.imsave(brain_warped_path, dst_post)
            if atlas_to_brain_align:
                if original_label:
                    atlas_label = []
                atlas_label = atlas_to_mask(
                    atlas_path,
                    mask_dir,
                    mask_warped_path,
                    output_mask_path,
                    n_post,
                    use_unet,
                    atlas_to_brain_align,
                    git_repo_base,
                    olfactory_check,
                    atlas_label
                )
                atlas_label_list.append(atlas_label)


    # Converts the transformed brain atlas into a segmentation template for the original brain image
    applyMask(
        brain_img_dir,
        output_mask_path,
        output_overlay_path,
        output_overlay_path,
        mat_save,
        threshold,
        git_repo_base,
        bregma_list,
        atlas_to_brain_align,
        model,
        dlc_pts,
        atlas_pts,
        olfactory_check,
        use_unet,
        use_dlc,
        plot_landmarks,
        align_once,
        atlas_label_list,
        region_labels,
        original_label,
    )