Example #1
0
def test_ico():
    
    for float in (False, True):
        for crop in (0, 1, 2):
            for colors in (0, 1, 3, 4):
                fname = fnamebase + '%i.%i.%i.ico' % (float, crop, colors)
                rim = get_ref_im(colors, crop, float)
                imageio.imsave(fname, rim)
                im = imageio.imread(fname)
                mul = 255 if float else 1
                assert_close(rim * mul, im, 0.1)  # lossless
    
    # Meta data
    R = imageio.read(fnamebase + '0.0.0.ico')
    assert isinstance(R.get_meta_data(0), dict)
    assert isinstance(R.get_meta_data(None), dict)  # But this print warning
    writer = imageio.save(fnamebase + 'I.ico')
    writer.set_meta_data({})
    writer.close()
    
    # Parameters. Note that with makealpha, RGBA images are read in incorrectly
    im = imageio.imread(fnamebase + '0.0.0.ico', makealpha=True)
    assert im.ndim == 3 and im.shape[-1] == 4
    
    # Parameter fail
    raises(TypeError, imageio.imread, fname, notavalidkwarg=True)
    raises(TypeError, imageio.imsave, fnamebase + '1.gif', im, notavalidk=True)
    
    # Multiple images
    im = get_ref_im(4, 0, 0)
    ims1 = im, np.column_stack([im, im]), np.row_stack([im, im])
    imageio.mimsave(fnamebase + 'I.ico', ims1)
    ims2 = imageio.mimread(fnamebase + 'I.ico')
    for im1, im2 in zip(ims1, ims2):
        assert_close(im1, im2, 0.1)
Example #2
0
 def imsave(self, out_fn=None):
     if out_fn is None:
         out_fn = 'vphas-{0}-{1}.jpg'.format(self.offset, self.ccd)
     log.info('Writing {0}'.format(out_fn))
     #mimg.imsave(out_fn, np.rot90(self.as_array()), origin='lower')
     img = np.rot90(self.as_array())
     imageio.imsave(out_fn, img, quality=90, optimize=True)
Example #3
0
def imsave(fn, im, **kwargs):
    """Wrapper around various libraries that haven't got their act together.

    For TIFF, uses scikit-image's wrapper around tifffile.py. For other
    formats, uses imageio.

    Parameters
    ----------
    fn : string
        The filename to save to.
    im : array, shape (M, N[, 3])
        The image to save.
    kwargs : dict, optional
        Keyword arguments to the save function. Format dependent. For example,
        JPEG images take a ``quality`` (int) argument in [1, 95], while
        TIFF images take a ``compress`` (int) argument in [0, 9].

    Notes
    -----
    The ``fn`` and ``im`` arguments can be swapped -- the function will
    determine which to use by testing for string types.
    """
    if isinstance(im, str):
        fn, im = im, fn
    if fn.endswith('.tif'):
        io.imsave(fn, im, plugin='tifffile', **kwargs)
    else:
        iio.imsave(fn, im, **kwargs)
Example #4
0
def test_singleton():

    im1 = imageio.imread("imageio:chelsea.png")

    fname = os.path.join(test_dir, "chelsea.bsdf")
    imageio.imsave(fname, im1)

    # Does it look alright if we open it in bsdf without extensions?
    raw = bsdf.load(fname, [])
    assert isinstance(raw, dict)
    assert set(raw.keys()) == set(["meta", "array"])
    assert isinstance(raw["meta"], dict)
    assert isinstance(raw["array"], dict)
    assert raw["array"]["shape"] == list(im1.shape)
    assert isinstance(raw["array"]["data"], bytes)

    # Read singleton image as singleton
    im2 = imageio.imread(fname)
    assert np.all(im1 == im2)

    # Read singleton image as series
    ims = imageio.mimread(fname)
    assert len(ims) == 1 and np.all(im1 == ims[0])

    # Read + write back without image extensions
    bsdf.save(fname, bsdf.load(fname))
    im3 = imageio.mimread(fname)
    assert np.all(im1 == im3)
Example #5
0
def test_jpg_more():
    need_internet()

    # Test broken JPEG
    fname = fnamebase + "_broken.jpg"
    open(fname, "wb").write(b"this is not an image")
    raises(Exception, imageio.imread, fname)
    #
    bb = imageio.imsave(imageio.RETURN_BYTES, get_ref_im(3, 0, 0), "JPEG")
    with open(fname, "wb") as f:
        f.write(bb[:400])
        f.write(b" ")
        f.write(bb[400:])
    raises(Exception, imageio.imread, fname)

    # Test EXIF stuff
    fname = get_remote_file("images/rommel.jpg")
    im = imageio.imread(fname)
    assert im.shape[0] > im.shape[1]
    im = imageio.imread(fname, exifrotate=False)
    assert im.shape[0] < im.shape[1]
    im = imageio.imread(fname, exifrotate=2)  # Rotation in Python
    assert im.shape[0] > im.shape[1]
    # Write the jpg and check that exif data is maintained
    if sys.platform.startswith("darwin"):
        return  # segfaults on my osx VM, why?
    imageio.imsave(fnamebase + "rommel.jpg", im)
    im = imageio.imread(fname)
    assert im.meta.EXIF_MAIN
Example #6
0
def test_simpleitk_reading_writing():
    """ Test reading and saveing tiff """
    im2 = np.ones((10, 10, 3), np.uint8) * 2

    filename1 = os.path.join(test_dir, 'test_tiff.tiff')

    # One image
    imageio.imsave(filename1, im2, 'itk')
    im = imageio.imread(filename1, 'itk')
    ims = imageio.mimread(filename1, 'itk')
    assert (im == im2).all()
    assert len(ims) == 1

    # Mixed
    W = imageio.save(filename1, 'itk')
    raises(RuntimeError, W.set_meta_data, 1)
    assert W.format.name == 'ITK'
    W.append_data(im2)
    W.append_data(im2)
    W.close()
    #
    R = imageio.read(filename1, 'itk')
    assert R.format.name == 'ITK'
    ims = list(R)  # == [im for im in R]
    assert (ims[0] == im2).all()
    # Fail
    raises(IndexError, R.get_data, -1)
    raises(IndexError, R.get_data, 3)
    raises(RuntimeError, R.get_meta_data)
Example #7
0
def reflect(filename):
    # Get filenames
    base, ext = os.path.splitext(filename)
    filename2 = base + "_reflected" + ext
    # Get images
    im1 = imageio.imread(filename)
    im2 = reflect_image(im1)
    # Write back
    imageio.imsave(filename2, im2)
Example #8
0
def save(path, im):
    # both imageio and skimage currently save uint16 images with 180deg rotation
    # as they both use freeimage and this has some weird internal formats
    # see https://github.com/scikit-image/scikit-image/issues/1101
    # and https://github.com/imageio/imageio/issues/3
    from distutils.version import StrictVersion
    if im.dtype == np.uint16 and StrictVersion(imageio.__version__) <= StrictVersion('0.5.1'):
        im = im[::-1,::-1]
    imageio.imsave(path, im)
 def publishCR2(self, file):
     print "publishCR2", file
     raw = rawpy.imread(file)
     #rgb = raw.postprocess(gamma=(1,1), no_auto_bright=True, output_bps=16)
     rgb = raw.postprocess()
     imageio.imsave(file+'.tiff', rgb)
     print "save"
     bayer = raw.raw_image
     print "bayer"
     self.pub_image.publish(self.bridge.cv2_to_imgmsg(cv2.fromarray(bayer), "bgr8"))
Example #10
0
def imwrite(filename, image, format=None):
    """ imwrite(filename, image, format=None)
    
    Write image (numpy array) to file, requires imageio or PIL. 
    
    Parameters
    ----------
    filename : string
        The name of the file to store the screenshot to. If filename is None, 
        the interpolated image is returned as a numpy array.
    image : numpy array
        The image to write.
    format : string
        The format for the image to be saved in. If not given, the
        format is deduced from the filename.
    
    Notes
    -----
      * For floating point images, 0 is considered black and 1 is white.
      * For integer types, 0 is considered black and 255 is white.
    
    """
    
    if imageio is None and PIL is None:
        raise RuntimeError("visvis.imwrite requires the imageio or PIL package.")
    
    # check image
    if len(image.shape) == 2:
        pass # grayscale
    elif len(image.shape) == 3:
        if image.shape[2] in [3, 4]:
            pass # RGB or RGBA
        else:
            raise ValueError("Cannot write image: Too many values in third dim.")
    else:
        raise ValueError("Cannot write image: Invalid number of dimensions.")
    
    # check type -> convert
    if image.dtype.name == 'uint8':
        pass # ok
    elif image.dtype.name in ['float32', 'float64']:
        image = image.copy()
        image[image<0] = 0
        image[image>1] = 1
        image = (image*255).astype(np.uint8)
    else:
        image = image.astype(np.uint8)
    
    # write image
    if imageio:
        imageio.imsave(filename, image, format)
    elif PIL:
        pim = PIL.Image.fromarray(image)
        pim.save(filename, format)
def run_feeimage_test_suite():
    """ Run freeimage test suite.
    Lots of images. Berrer done locally and then checking the result.
    Not so much suited for CI, I think.
    """
    
    if not os.path.isdir(TESTDIR):
        os.mkdir(TESTDIR)
    if not os.path.isdir(ZIPDIR):
        os.mkdir(ZIPDIR)
    
    for name in names:
        fname = os.path.join(ZIPDIR, name+'.zip')
        # Make sure that the file is there
        if not os.path.isfile(fname):
            print('Downloading %s.zip' % name)
            f1 = urlopen(ulr+name+'.zip')
            f2 = open(fname, 'wb')
            shutil.copyfileobj(f1, f2)
            f1.close()
            f2.close()
        
        # Check contents
        zf = zipfile.ZipFile(fname, 'r')
        subnames = zf.namelist()
        zf.extractall(TESTDIR)
        zf.close()
        
        # Read and write each one
        for subname in subnames:
            if subname in FAILS:
                continue
            fname_zip = fname+'/%s' % subname
            subname_, ext = os.path.splitext(subname)
            fname_dst1 = os.path.join(TESTDIR, subname+'_1'+ext)
            fname_dst2 = os.path.join(TESTDIR, subname+'_2'+ext)
            if os.path.splitext(subname)[1].lower() in NOT_WRITABLE:
                fname_dst1 += '.png'
                fname_dst2 += '.png'
            print('Reading+saving %s' % subname)
            try:
                # Read from zip, save to file
                im = imageio.imread(fname_zip)
                imageio.imsave(fname_dst1, im)
                # Read from file, save to file
                im = imageio.imread(fname_dst1)
                imageio.imsave(fname_dst2, im)
            except Exception:
                e_type, e_value, e_tb = sys.exc_info()
                del e_tb
                err = str(e_value)
                print('woops! ' + fname_zip)
                print('  ' + err)
Example #12
0
def test_npz_reading_writing():
    """ Test reading and saveing npz """
    
    if IS_PYPY:
        return  # no support for npz format :(
    
    im2 = np.ones((10, 10), np.uint8) * 2
    im3 = np.ones((10, 10, 10), np.uint8) * 3
    im4 = np.ones((10, 10, 10, 10), np.uint8) * 4
    
    filename1 = os.path.join(test_dir, 'test_npz.npz')

    # One image
    imageio.imsave(filename1, im2)
    im = imageio.imread(filename1)
    ims = imageio.mimread(filename1)
    assert (im == im2).all()
    assert len(ims) == 1
    
    # Multiple images
    imageio.mimsave(filename1, [im2, im2, im2])
    im = imageio.imread(filename1)
    ims = imageio.mimread(filename1)
    assert (im == im2).all()
    assert len(ims) == 3
    
    # Volumes
    imageio.mvolsave(filename1, [im3, im3])
    im = imageio.volread(filename1)
    ims = imageio.mvolread(filename1)
    assert (im == im3).all()
    assert len(ims) == 2
    
    # Mixed
    W = imageio.save(filename1)
    assert W.format.name == 'NPZ'
    W.append_data(im2)
    W.append_data(im3)
    W.append_data(im4)
    raises(RuntimeError, W.set_meta_data, {})  # no meta data support
    W.close()
    #
    R = imageio.read(filename1)
    assert R.format.name == 'NPZ'
    ims = list(R)  # == [im for im in R]
    assert (ims[0] == im2).all()
    assert (ims[1] == im3).all()
    assert (ims[2] == im4).all()
    # Fail
    raises(IndexError, R.get_data, -1)
    raises(IndexError, R.get_data, 3)
    raises(RuntimeError, R.get_meta_data, None)  # no meta data support
    raises(RuntimeError, R.get_meta_data, 0)  # no meta data support
Example #13
0
def test_tifffile_reading_writing():
    """ Test reading and saving tiff """
    
    need_internet()  # We keep a test image in the imageio-binary repo
    
    im2 = np.ones((10, 10, 3), np.uint8) * 2

    filename1 = os.path.join(test_dir, 'test_tiff.tiff')

    # One image
    imageio.imsave(filename1, im2)
    im = imageio.imread(filename1)
    ims = imageio.mimread(filename1)
    assert (im == im2).all()
    assert len(ims) == 1

    # Multiple images
    imageio.mimsave(filename1, [im2, im2, im2])
    im = imageio.imread(filename1)
    ims = imageio.mimread(filename1)
    assert (im == im2).all()
    assert len(ims) == 3, ims[0].shape

    # remote multipage rgb file
    filename2 = get_remote_file('images/multipage_rgb.tif')
    img = imageio.mimread(filename2)
    assert len(img) == 2
    assert img[0].shape == (3, 10, 10)

    # Mixed
    W = imageio.save(filename1)
    W.set_meta_data({'planarconfig': 'planar'})
    assert W.format.name == 'TIFF'
    W.append_data(im2)
    W.append_data(im2)
    W.close()
    #
    R = imageio.read(filename1)
    assert R.format.name == 'TIFF'
    ims = list(R)  # == [im for im in R]
    assert (ims[0] == im2).all()
    meta = R.get_meta_data()
    assert meta['orientation'] == 'top_left'
    # Fail
    raises(IndexError, R.get_data, -1)
    raises(IndexError, R.get_data, 3)

    # Ensure imwrite write works round trip
    filename3 = os.path.join(test_dir, 'test_tiff2.tiff')
    R = imageio.imread(filename1)
    imageio.imwrite(filename3, R)
    R2 = imageio.imread(filename3)
    assert (R == R2).all()
    def make_frames(z):
        files = []
        tmpdir = tempfile.mkdtemp()

        if verbose:
            print('Saving sequence ' + filename + ' as a ' +  vext + ' format')
        for frame in range(N_frame):
            fname = 'frame%06d.png' % frame
            full_fname = os.path.join(tmpdir, fname)
            image = np.rot90(z[..., frame])
            imageio.imsave(full_fname, (image*255).astype(np.uint8), compression=0, quantize=256)
            files.append(fname)
        return tmpdir, files
Example #15
0
def scandirs(path):
    for currentFile in glob.glob( os.path.join(path, '*') ):
        if os.path.isdir(currentFile):
            print 'got a directory: ' + currentFile
            scandirs(currentFile)

        filename, file_extension = os.path.splitext(currentFile)
        file_extension = file_extension.lower()
        if  file_extension in rawextens:
            print "processing file: " + currentFile
            imageFilename = "{0}.jpg".format(filename)
            raw = rawpy.imread(currentFile)
            rgb = raw.postprocess()
            imageio.imsave(imageFilename, rgb)
Example #16
0
def test_jpg():

    for float in (False, True):
        for crop in (0, 1, 2):
            for colors in (0, 1, 3):
                fname = fnamebase + "%i.%i.%i.jpg" % (float, crop, colors)
                rim = get_ref_im(colors, crop, float)
                imageio.imsave(fname, rim)
                im = imageio.imread(fname)
                mul = 255 if float else 1
                assert_close(rim * mul, im, 1.1)  # lossy

    # No alpha in JPEG
    raises(Exception, imageio.imsave, fname, im4)

    # Parameters
    imageio.imsave(fnamebase + ".jpg", im3, progressive=True, optimize=True, baseline=True)

    # Parameter fail
    raises(TypeError, imageio.imread, fnamebase + ".jpg", notavalidkwarg=True)
    raises(TypeError, imageio.imsave, fnamebase + ".jpg", im, notavalidk=True)

    # Compression
    imageio.imsave(fnamebase + "1.jpg", im3, quality=10)
    imageio.imsave(fnamebase + "2.jpg", im3, quality=90)
    s1 = os.stat(fnamebase + "1.jpg").st_size
    s2 = os.stat(fnamebase + "2.jpg").st_size
    assert s2 > s1
    raises(ValueError, imageio.imsave, fnamebase + ".jpg", im, quality=120)
Example #17
0
def test_jpg():
    
    for isfloat in (False, True):
        for crop in (0, 1, 2):
            for colors in (0, 1, 3):
                fname = fnamebase + '%i.%i.%i.jpg' % (isfloat, crop, colors)
                rim = get_ref_im(colors, crop, isfloat)
                imageio.imsave(fname, rim)
                im = imageio.imread(fname)
                mul = 255 if isfloat else 1
                assert_close(rim * mul, im, 1.1)  # lossy
    
    # No alpha in JPEG
    fname = fnamebase + '.jpg'
    raises(Exception, imageio.imsave, fname, im4)
    
    # Parameters
    imageio.imsave(fnamebase + '.jpg', im3, progressive=True, optimize=True, 
                   baseline=True)
    
    # Parameter fail - We let Pillow kwargs thorugh
    # raises(TypeError, imageio.imread, fnamebase + '.jpg', notavalidkwarg=1)
    # raises(TypeError, imageio.imsave, fnamebase + '.jpg', im, notavalidk=1)
    
    # Compression
    imageio.imsave(fnamebase + '1.jpg', im3, quality=10)
    imageio.imsave(fnamebase + '2.jpg', im3, quality=90)
    s1 = os.stat(fnamebase + '1.jpg').st_size
    s2 = os.stat(fnamebase + '2.jpg').st_size
    assert s2 > s1 
    raises(ValueError, imageio.imsave, fnamebase + '.jpg', im, quality=120)
Example #18
0
def test_pnm():

    for useAscii in (True, False):
        for crop in (0, 1, 2):
            for colors in (0, 1, 3):
                fname = fnamebase
                fname += "%i.%i.%i.ppm" % (useAscii, crop, colors)
                rim = get_ref_im(colors, crop, isfloat=False)
                imageio.imsave(fname, rim, use_ascii=useAscii)
                im = imageio.imread(fname)
                assert_close(rim, im, 0.1)  # lossless

                # Parameter fail
                raises(TypeError, imageio.imread, fname, notavalidkwarg=True)
                raises(TypeError, imageio.imsave, fname, im, notavalidk=True)
Example #19
0
def main(srcDir):
	srcNames = sorted(os.listdir(srcDir))
	nr = 0
	for srcName in srcNames:
		nr += 1
		srcPath = os.path.join(srcDir, srcName)
		rgb = tifffile.imread(srcPath)
		if nr == 1:
			rgbMerge = rgb
		else:
			rgbMerge = np.concatenate((rgbMerge, rgb), axis=1)
		countLog = str(nr) + '/' + str(len(srcNames)) + ' >'
		print(countLog, srcName, 'merged')
	destPath = os.path.join(srcDir, '..', '3d360_merge__' + os.path.basename(srcDir) + '.tif')
	imageio.imsave(destPath, rgbMerge)
	print('finished!')
Example #20
0
def test_ico():

    if os.getenv("TRAVIS", "") == "true" and sys.version_info >= (3, 4):
        skip("Freeimage ico is unstable for this Travis build")

    for float in (False, True):
        for crop in (0,):
            for colors in (1, 3, 4):
                fname = fnamebase + "%i.%i.%i.ico" % (float, crop, colors)
                rim = get_ref_im(colors, crop, float)
                rim = rim[:32, :32]  # ico needs nice size
                imageio.imsave(fname, rim)
                im = imageio.imread(fname)
                mul = 255 if float else 1
                assert_close(rim * mul, im, 0.1)  # lossless

    # Meta data
    R = imageio.read(fnamebase + "0.0.1.ico")
    assert isinstance(R.get_meta_data(0), dict)
    assert isinstance(R.get_meta_data(None), dict)  # But this print warning
    R.close()
    writer = imageio.save(fnamebase + "I.ico")
    writer.set_meta_data({})
    writer.close()

    # Parameters. Note that with makealpha, RGBA images are read in incorrectly
    im = imageio.imread(fnamebase + "0.0.1.ico", makealpha=True)
    assert im.ndim == 3 and im.shape[-1] == 4

    # Parameter fail
    raises(TypeError, imageio.imread, fname, notavalidkwarg=True)
    raises(TypeError, imageio.imsave, fnamebase + "1.gif", im, notavalidk=True)

    if sys.platform.startswith("win"):  # issue #21
        skip("Windows has a known issue with multi-icon files")

    # Multiple images
    im = get_ref_im(4, 0, 0)[:32, :32]
    ims = [np.repeat(np.repeat(im, i, 1), i, 0) for i in (1, 2)]  # SegF on win
    ims = im, np.column_stack((im, im)), np.row_stack((im, im))  # error on win
    imageio.mimsave(fnamebase + "I2.ico", ims)
    ims2 = imageio.mimread(fnamebase + "I2.ico")
    for im1, im2 in zip(ims, ims2):
        assert_close(im1, im2, 0.1)
Example #21
0
def test_tifffile_reading_writing():
    """ Test reading and saveing tiff """
    im2 = np.ones((10, 10, 3), np.uint8) * 2

    filename1 = os.path.join(test_dir, "test_tiff.tiff")

    # One image
    imageio.imsave(filename1, im2)
    im = imageio.imread(filename1)
    ims = imageio.mimread(filename1)
    assert (im == im2).all()
    assert len(ims) == 1

    # Multiple images
    imageio.mimsave(filename1, [im2, im2, im2])
    im = imageio.imread(filename1)
    ims = imageio.mimread(filename1)
    assert (im == im2).all()
    assert len(ims) == 3, ims[0].shape

    # remote multipage rgb file
    filename2 = get_remote_file("images/multipage_rgb.tif")
    img = imageio.mimread(filename2)
    assert len(img) == 2
    assert img[0].shape == (3, 10, 10)

    # Mixed
    W = imageio.save(filename1)
    W.set_meta_data({"planarconfig": "planar"})
    assert W.format.name == "TIFF"
    W.append_data(im2)
    W.append_data(im2)
    W.close()
    #
    R = imageio.read(filename1)
    assert R.format.name == "TIFF"
    ims = list(R)  # == [im for im in R]
    assert (ims[0] == im2).all()
    meta = R.get_meta_data()
    assert meta["is_rgb"]
    # Fail
    raises(IndexError, R.get_data, -1)
    raises(IndexError, R.get_data, 3)
Example #22
0
def load_image(url):
    """ To read images/diagrams (some from the web/GDrive) and cahche them.
    """
    if not '/' in url:
        url = os.path.join(THIS_DIR, 'images', url)
    if url.startswith('http'):
        fname = hashlib.md5(url.encode('utf-8')).hexdigest() + '.png'
    else:
        fname = url.split('/')[-1]
    filename = os.path.join(THIS_DIR, 'images', fname)
    if filename == url:
        return imageio.imread(filename)
    elif not LOAD_IMAGES_FROM_CACHE:
        data = imageio.imread(url, os.path.splitext(fname)[1])
        imageio.imsave(filename, data)
        return data
    elif os.path.isfile(filename):
        return imageio.imread(filename)
    else:
        return np.zeros((10, 10), np.uint8)
Example #23
0
def merge_imagens():
 	listaImagensMergeadas = []

 	for passo in range(0, numeroImagensIntermediarias + 2):
 		print "Mergeando passo: " + str(passo)
 		imagemDeformada1 = imread("imagens_semelhante1/deformacao_passo" + str(passo) + ".jpg")
 		imagemDeformada2 = imread("imagens_semelhante2/deformacao_passo" + str(passo) + ".jpg")

 		imagemMergeada = imagemDeformada2.copy()
 		alturaImagem, larguraImagem, _ = shape(imagemMergeada)

 		for linhaPixel in range(0, alturaImagem):
 			for colunaPixel in range(0,larguraImagem):
 				t = passo * 1.0/(numeroImagensIntermediarias + 1)
 				imagemMergeada[linhaPixel, colunaPixel] = (1 - t) * imagemDeformada1[linhaPixel, colunaPixel] + (t) * imagemDeformada2[linhaPixel, colunaPixel]


		imsave("imagensMedias/imagemMediaPasso" + str(passo) + ".jpg", imagemMergeada)
		listaImagensMergeadas.append(imagemMergeada)

	return listaImagensMergeadas
Example #24
0
    def save_frame(self, filename, t=0, withmask=True):
        """ Save a clip's frame to an image file.

        Saves the frame of clip corresponding to time ``t`` in
        'filename'. ``t`` can be expressed in seconds (15.35), in
        (min, sec), in (hour, min, sec), or as a string: '01:03:05.35'.

        If ``withmask`` is ``True`` the mask is saved in
        the alpha layer of the picture (only works with PNGs).

        """

        im = self.get_frame(t)

        if withmask and self.mask is not None:
            mask = 255 * self.mask.get_frame(t)
            im = np.dstack([im, mask]).astype('uint8')
        else:
            im = im.astype("uint8")

        imsave(filename, im)
Example #25
0
def test_gif():
    # The not-animated gif
    
    for float in (False, True):
        for crop in (0, 1, 2):
            for colors in (0, 3, 4):
                fname = fnamebase + '%i.%i.%i.gif' % (float, crop, colors)
                rim = get_ref_im(colors, crop, float)
                imageio.imsave(fname, rim)
                im = imageio.imread(fname)
                mul = 255 if float else 1
                if colors in (0, 1):
                    im = im[:, :, 0]
                else:
                    im = im[:, :, :3]
                    rim = rim[:, :, :3]
                assert_close(rim * mul, im, 1.1)  # lossless
    
    # Parameter fail
    raises(TypeError, imageio.imread, fname, notavalidkwarg=True)
    raises(TypeError, imageio.imsave, fnamebase + '1.gif', im, notavalidk=True)
Example #26
0
def test_gif():
    # The not-animated gif

    for isfloat in (False, True):
        for crop in (0, 1, 2):
            for colors in (0, 3, 4):
                if colors > 1 and sys.platform.startswith("darwin"):
                    continue  # quantize fails, see also png
                fname = fnamebase + "%i.%i.%i.gif" % (isfloat, crop, colors)
                rim = get_ref_im(colors, crop, isfloat)
                imageio.imsave(fname, rim)
                im = imageio.imread(fname)
                mul = 255 if isfloat else 1
                if colors not in (0, 1):
                    im = im[:, :, :3]
                    rim = rim[:, :, :3]
                assert_close(rim * mul, im, 1.1)  # lossless

    # Parameter fail
    raises(TypeError, imageio.imread, fname, notavalidkwarg=True)
    raises(TypeError, imageio.imsave, fnamebase + "1.gif", im, notavalidk=True)
Example #27
0
def test_ico():
    
    for float in (False, True):
        for crop in (0, 1, 2):
            for colors in (1, 3, 4):
                fname = fnamebase + '%i.%i.%i.ico' % (float, crop, colors)
                rim = get_ref_im(colors, crop, float)
                imageio.imsave(fname, rim)
                im = imageio.imread(fname)
                mul = 255 if float else 1
                assert_close(rim * mul, im, 0.1)  # lossless
    
    # Meta data
    R = imageio.read(fnamebase + '0.0.1.ico')
    assert isinstance(R.get_meta_data(0), dict)
    assert isinstance(R.get_meta_data(None), dict)  # But this print warning
    R.close()
    writer = imageio.save(fnamebase + 'I.ico')
    writer.set_meta_data({})
    writer.close()
    
    # Parameters. Note that with makealpha, RGBA images are read in incorrectly
    im = imageio.imread(fnamebase + '0.0.1.ico', makealpha=True)
    assert im.ndim == 3 and im.shape[-1] == 4
    
    # Parameter fail
    raises(TypeError, imageio.imread, fname, notavalidkwarg=True)
    raises(TypeError, imageio.imsave, fnamebase + '1.gif', im, notavalidk=True)

    if sys.platform.startswith('win'):  # issue #21
        skip('Windows has a known issue with multi-icon files')
    
    # Multiple images
    im = get_ref_im(4, 0, 0)
    ims = [np.repeat(np.repeat(im, i, 1), i, 0) for i in (1, 2)]  # SegF on win
    ims = im, np.column_stack((im, im)), np.row_stack((im, im))  # error on win
    imageio.mimsave(fnamebase + 'I2.ico', ims)
    ims2 = imageio.mimread(fnamebase + 'I2.ico')
    for im1, im2 in zip(ims, ims2):
        assert_close(im1, im2, 0.1)
Example #28
0
def createPNG(s111,bname,sfill):			
	speedInd = 0
	dirInd   = 1

	#numpy.zeros(shape, dtype=float, order='C') -> Return a new array of given shape and type, filled with zeros.
	#i assume this is X,Y,4 (or Y,X,4) and dtype uint8 = Byte (-128 to 127)
	row = s111.shape[0]
	col = s111.shape[1]

	dim = 4 #s111 - 4 here for RGBA
	out = np.zeros((row,col,dim),np.uint8)
	
	#what is this doing exactly? converting speed/direction values to u and v values
	#This will loop through each cell in the dataset and take the value that is found
	for i in range(row-1,-1,-1):
		for j in range(0,col):			
		#this checks if the 'mask' bit is on...i.e. if there is a speed/direction value in this location
			if s111[i,j][speedInd] != sfill:#-9999.0 is not the s111 standard for land value; cannot hard code this, must use group_f value in file 
				speed_ms = s111[i,j][speedInd]*0.514
				dir_rad = math.radians(s111[i,j][dirInd])
				fx = speed_ms*math.sin(dir_rad)
				fy = speed_ms*math.cos(dir_rad)
				fxi = int((fx+20.48)/0.01)
				fyi = int((fy+20.48)/0.01)

				#the 0,1,2,3 the RGBA values for the PNG
				out[i,j,0] = (fxi & 0x0ff0)>>4
				out[i,j,1] = ((fxi & 0x000f)<<4)|((fyi & 0x0f00)>>8)
				out[i,j,2] = fyi & 0x00ff				
				#out[i,j,3] = mask[i,j]*255 #this is looking for just a 1 or a 0 but this will only be executed if the "mask" is basically 'false' i.e. there IS a value here so it will always be 255
				out[i,j,3] = 255
			else: 
				s111[i,j][speedInd] = -9999.0
				#print(s111[i,j][speedInd])
	outfn = getOutfn(bname)
	#scipy.misc.imsave(outfn,out)
	out = np.flipud(out) #image was upside-down on save
	io.imsave(outfn,out)
	#this was depricated need to use imageio.imwrite 
	io.imwrite(outfn, out)
Example #29
0
def imsave(filename, im, format=None):
    """ Function to save image data. Requires imageio or PIL.
    """
    # Import imageio or PIL
    imageio = PIL = None
    try:
        import imageio
    except ImportError:
        try:
            import PIL.Image
        except ImportError:
            pass
     
    if imageio is not None:
        return imageio.imsave(filename, im, format)
    elif PIL is not None:
        pim = PIL.Image.fromarray(im)
        pim.save(filename, format)
    else:
        raise RuntimeError("imsave requires the imageio or PIL package.")
Example #30
0
def test_bmp():

    for float in (False, True):
        for crop in (0, 1, 2):
            for colors in (0, 1, 3, 4):
                fname = fnamebase + "%i.%i.%i.bmp" % (float, crop, colors)
                rim = get_ref_im(colors, crop, float)
                imageio.imsave(fname, rim)
                im = imageio.imread(fname)
                mul = 255 if float else 1
                assert_close(rim * mul, im, 0.1)  # lossless

    # Compression
    imageio.imsave(fnamebase + "1.bmp", im3, compression=False)
    imageio.imsave(fnamebase + "2.bmp", im3, compression=True)
    s1 = os.stat(fnamebase + "1.bmp").st_size
    s2 = os.stat(fnamebase + "2.bmp").st_size
    assert s1 + s2  # todo: bug in FreeImage? assert s1 < s2

    # Parameter fail
    raises(TypeError, imageio.imread, fnamebase + "1.bmp", notavalidkwarg=True)
    raises(TypeError, imageio.imsave, fnamebase + "1.bmp", im, notavalidk=True)
Example #31
0
for i in range(0, H, 2):
    for j in range(0, W, 2):
        temp = raw_image[i + 1][j] / 2 + raw_image[i][j + 1] / 2
        g_image[i][j] = temp
        g_image[i + 1][j + 1] = temp
        g_image[i + 1][j] = temp
        g_image[i][j + 1] = temp


for i in range(0, H, 2):
    for j in range(0, W, 2):
        b_image[i + 1][j] = raw_image[i + 1][j + 1]
        b_image[i][j] = raw_image[i + 1][j + 1]
        b_image[i][j + 1] = raw_image[i + 1][j + 1]

rgb_image = cv2.merge([b_image, g_image, r_image])

rgb_image = im = np.maximum(rgb_image - 512, 0) / (16383 - 512)  # subtract the black level

rgb = raw.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16)
imageio.imsave('source.tiff', rgb)  # 保存最佳格式

rgb = raw.postprocess()
imageio.imsave('default.tiff', rgb)  # 默认rawpy 处理效果


rgb_image = np.minimum(np.maximum(rgb_image, 0), 1)
scipy.misc.toimage(rgb_image * 255, high=255, low=0, cmin=0, cmax=255).save('rgb_image.jpg')  # 自定义方法处理效果


Example #32
0
    image1 = torch.tensor(image1).permute(2, 0, 1).unsqueeze(0).float().cuda()
    image = torch.cat((image0, image1), dim=0)
    image = normalize(image)
    image.requires_grad_()

    output = model(image)

    # LRP
    Tt = clrp_target(output)

    lrp_rel = model.relprop(R=Tt, alpha=1)

    clrp_maps = (render.hm_to_rgb(
        lrp_rel[0, 0].data.cpu().numpy(), scaling=3, sigma=1, cmap='seismic') *
                 255).astype(np.uint8)
    imageio.imsave('../lrp_hm0.jpg', clrp_maps)
    clrp_maps = (render.hm_to_rgb(
        lrp_rel[1, 0].data.cpu().numpy(), scaling=3, sigma=1, cmap='seismic') *
                 255).astype(np.uint8)
    imageio.imsave('../lrp_hm1.jpg', clrp_maps)

    # CLRP
    Tt = clrp_target(output)
    To = clrp_others(output)

    clrp_rel_target = model.relprop(R=Tt, alpha=1)
    clrp_rel_others = model.relprop(R=To, alpha=1)

    clrp_rscale = clrp_rel_target.sum(
        dim=[1, 2, 3], keepdim=True) / clrp_rel_others.sum(dim=[1, 2, 3],
                                                           keepdim=True)
Example #33
0
def beauty(image):
    org_h, org_w, _ = imread(image).shape

    face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    # Read the input image
    img = cv2.imread(image)
    # Convert into grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # Detect faces
    faces = face_cascade.detectMultiScale(gray, 1.1, 5)
    # Draw rectangle around the faces
    face_num = 1
    for (x, y, w, h) in faces:
        # img = cv2.imread(args.no_makeup)

        # cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 0)
        print(x, y, w, h)
        im1 = img[y:y + h, x:x + w, ::-1]

        # Display the output
        imsave(f'im{face_num}.jpg', im1)

        batch_size = 1
        img_size = 256
        no_makeup = cv2.resize(imread(f'im{face_num}.jpg'),
                               (img_size, img_size))
        X_img = np.expand_dims(preprocess(no_makeup), 0)
        makeups = glob.glob(os.path.join('imgs', 'makeup', '*.*'))
        result = np.ones((img_size, (len(makeups) + 1) * img_size, 3))
        result[:img_size, :img_size] = no_makeup / 255.
        final = np.ones((org_h, (len(makeups) + 1) * org_w, 3))

        tf.reset_default_graph()
        sess = tf.Session()
        sess.run(tf.global_variables_initializer())

        saver = tf.train.import_meta_graph(os.path.join('model', 'model.meta'))
        saver.restore(sess, tf.train.latest_checkpoint('model'))

        graph = tf.get_default_graph()
        X = graph.get_tensor_by_name('X:0')
        Y = graph.get_tensor_by_name('Y:0')
        Xs = graph.get_tensor_by_name('generator/xs:0')

        for i in range(len(makeups)):
            makeup = cv2.resize(imread(makeups[i]), (img_size, img_size))
            Y_img = np.expand_dims(preprocess(makeup), 0)
            Xs_ = sess.run(Xs, feed_dict={X: X_img, Y: Y_img})
            Xs_ = deprocess(Xs_)
            final[:org_h, (i + 1) * org_w:(i + 2) * org_w] = img

            # result[:img_size, (i + 1) * img_size: (i + 2) * img_size] = makeup / 255.
            result[:img_size, (i + 1) * img_size:(i + 2) * img_size] = Xs_[0]

        imsave('result_.jpg', result)
        result_ = cv2.resize(imread('result_.jpg'),
                             (w * (len(makeups) + 1), h))

        imsave('result_.jpg', result_)
        # result_=Image.open('result_.jpg')
        im_cut = []
        final = final[:, :, ::-1]
        if face_num == 1:
            for i in range(len(makeups)):
                # print(i*w,0, (i+1)*w, h)
                final[y:y + h, x + (i + 1) * org_w:x + w +
                      (i + 1) * org_w] = result_[:h, (i + 1) * w:(i + 2) * w]
            imsave('result.jpg', final)
        else:
            final = cv2.imread('result.jpg')[:, :, ::-1]
            for i in range(len(makeups)):
                # print(i*w,0, (i+1)*w, h)
                final[y:y + h, x + (i + 1) * org_w:x + w +
                      (i + 1) * org_w] = result_[:h, (i + 1) * w:(i + 2) * w]
            imsave('result.jpg', final)

        os.remove(f'im{face_num}.jpg')

        face_num += 1

    os.remove('result_.jpg')
Example #34
0
             total_Oaccuracy += sess.run(Oaccuracy,
                                         feed_dict={
                                             x_n: batch_x,
                                             y: batch_y,
                                             keep_prob: 1.,
                                             is_train: False
                                         })
         print 'Iteration %i, Accuracy: %.2f' % (i_iter,
                                                 total_Oaccuracy / mb_idx)
     # Store images
     if i_iter % store_img_iter == 0 or i_iter == max_iter - 1:
         # Store Generated
         genmix_imgs = (np.transpose(gen_img, [0, 2, 3, 1]) + 1.) * 127.5
         genmix_imgs = np.uint8(genmix_imgs[:, :, :, ::-1])
         genmix_imgs = drawblock(genmix_imgs, 10)
         imsave(os.path.join(gen_dir, '%i.jpg' % i_iter), genmix_imgs)
         # Store Generated 96
         genmix_imgs = (np.transpose(gen_img128, [0, 2, 3, 1]) + 1.) * 127.5
         genmix_imgs = np.uint8(genmix_imgs[:, :, :, ::-1])
         genmix_imgs = drawblock(genmix_imgs, 10)
         imsave(os.path.join(gen_dir128, '%i.jpg' % i_iter), genmix_imgs)
         # Store Real
         real_imgs = (np.transpose(batch_x, [0, 2, 3, 1]) + 1.) * 127.5
         real_imgs = np.uint8(real_imgs[:, :, :, ::-1])
         real_imgs = drawblock(real_imgs, 10)
         imsave(os.path.join(real_dir, '%i.jpg' % i_iter), real_imgs)
     # Store model
     if i_iter % save_iter == 0 or i_iter == max_iter - 1 or i_iter == max_iter:
         save_path = saver.save(sess, dir_name + '/cdgan%i.ckpt' % i_iter)
 coord.request_stop()
 coord.join(threads)
Example #35
0
	        if f1[i,j] == 0: canvas[i,j] = im1[i,j]
	        else: canvas[i,j] = im2[i,j]
	return canvas

plane = read_image('plane2.png')
pilot = read_image("pilot.jpg")

r,c = 100,425
plane = plane[r:r + 470, c: c + 299]

assert plane.shape == pilot.shape

pilot_fft = fftpack.fftshift(fftpack.fft2(pilot))
plane_fft = fftpack.fftshift(fftpack.fft2(plane))

imageio.imsave('pilot_fft.png', (np.log(abs(pilot_fft))* 255 /np.amax(np.log(abs(pilot_fft)))).astype(np.uint8))
imageio.imsave('plane_fft.png', (np.log(abs(plane_fft))* 255 /np.amax(np.log(abs(plane_fft)))).astype(np.uint8))

pilot_im = read_image('pilot_fft.png')
plane_im = read_image('plane_fft.png')

pilot_low = filter_im(pilot_im)
pilot_high = filter_im(pilot_im, True)

plane_low = filter_im(plane_im)
plane_high = filter_im(plane_im, True)

pp1 = join_im(pilot_low, plane_high, pilot_fft, plane_fft)
pp2 = join_im(pilot_high, plane_low, pilot_fft, plane_fft)

Example #36
0
def saveImage(path, image):
    imageio.imsave(path, image)
Example #37
0
    import torchvision.transforms as transforms
    from tqdm import tqdm
    from imageio import imsave
    import scipy.io as sio

    # meta = sio.loadmat('/home/shirgur/ext/Data/Datasets/temp/ILSVRC2012_devkit_t12/data/meta.mat', squeeze_me=True)['synsets']

    # Data
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    test_img_trans = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        normalize,
    ])
    test_lbl_trans = transforms.Compose([
        transforms.Resize((224, 224), Image.NEAREST),
    ])

    ds = Imagenet_Segmentation(
        '/home/shirgur/ext/Data/Datasets/imagenet-seg/other/gtsegs_ijcv.mat',
        transform=test_img_trans,
        target_transform=test_lbl_trans)

    for i, (img, tgt) in enumerate(tqdm(ds)):
        tgt = (tgt.numpy() * 255).astype(np.uint8)
        imsave('/home/shirgur/ext/Code/C2S/run/imagenet/gt/{}.png'.format(i),
               tgt)

    print('here')
Example #38
0
# magnitude = np.load(path.join(npy_path, 'magnitude.npy'))
# gx = np.load(path.join(npy_path, 'gx.npy'))
# gy = np.load(path.join(npy_path, 'gy.npy'))
# magnitude_after_sup = canny.non_maxima_suppression(gx, gy, magnitude)
# np.save(path.join(npy_path, 'magnitude_after_sup'), magnitude_after_sup)
# imageio.imsave(path.join(output_path, 'magnitude_after_sup.bmp'), magnitude_after_sup)

gx = np.load(path.join(npy_path, 'gx.npy'))
gy = np.load(path.join(npy_path, 'gy.npy'))
magnitude = np.load(path.join(npy_path, 'magnitude.npy'))
magnitude_after_sup = np.load(path.join(npy_path, 'magnitude_after_sup.npy'))
output_1 = canny_detector.thresholding(magnitude_after_sup, 0.1)
output_2 = canny_detector.thresholding(magnitude_after_sup, 0.3)
output_3 = canny_detector.thresholding(magnitude_after_sup, 0.5)
imageio.imsave(path.join(output_path, 'output_1.bmp'), output_1[0])
imageio.imsave(path.join(output_path, 'output_2.bmp'), output_2[0])
imageio.imsave(path.join(output_path, 'output_3.bmp'), output_3[0])

#
# plt.figure(3, figsize = (8,8))
# plt.subplot(311)
# plt.imshow(gx, cmap='gray')
# plt.subplot(312)
# plt.imshow(gy, cmap='gray')
# plt.subplot(313)
# plt.imshow(magnitude, cmap='gray')
# plt.show()
#
# magnitude_after_sup = canny.non_maxima_suppression(gx, gy, magnitude)
# plt.figure(4)
Example #39
0
def main():
    a = get_args()

    # Load CLIP models
    model_clip, _ = clip.load(a.model)
    if a.verbose is True: print(' using model', a.model)
    xmem = {'RN50': 0.5, 'RN50x4': 0.16, 'RN101': 0.33}
    if 'RN' in a.model:
        a.samples = int(a.samples * xmem[a.model])
    workdir = os.path.join(a.out_dir, basename(a.in_txt))
    workdir += '-%s' % a.model if 'RN' in a.model.upper() else ''
    os.makedirs(workdir, exist_ok=True)

    if a.diverse != 0:
        a.samples = int(a.samples * 0.5)

    norm_in = torchvision.transforms.Normalize(
        (0.48145466, 0.4578275, 0.40821073),
        (0.26862954, 0.26130258, 0.27577711))

    if a.in_txt0 is not None:
        if a.verbose is True: print(' subtract text:', basename(a.in_txt0))
        if a.translate:
            translator = Translator()
            a.in_txt0 = translator.translate(a.in_txt0, dest='en').text
            if a.verbose is True: print(' translated to:', a.in_txt0)
        if a.multilang is True:
            model_lang = SentenceTransformer(
                'clip-ViT-B-32-multilingual-v1').cuda()
            txt_enc0 = model_lang.encode(
                [a.in_txt0], convert_to_tensor=True,
                show_progress_bar=False).detach().clone()
            del model_lang
        else:
            txt_enc0 = model_clip.encode_text(clip.tokenize(
                a.in_txt0).cuda()).detach().clone()

    # make init
    global params_start, params_ema
    params_shape = [1, 3, a.size[0], a.size[1] // 2 + 1, 2]
    params_start = torch.randn(*params_shape).cuda()  # random init
    params_ema = 0.
    if a.resume is not None and os.path.isfile(a.resume):
        if a.verbose is True: print(' resuming from', a.resume)
        params_start = load_params(a.resume).cuda()
        if a.keep > 0:
            params_ema = params_start[0].detach().clone()
    else:
        a.resume = 'init.pt'

    torch.save(params_start, 'init.pt')  # final init
    shutil.copy(a.resume,
                os.path.join(workdir, '000-%s.pt' % basename(a.resume)))

    prev_enc = 0

    def process(txt, num):

        sd = 0.01
        if a.keep > 0: sd = a.keep + (1 - a.keep) * sd
        params, image_f = fft_image([1, 3, *a.size],
                                    resume='init.pt',
                                    sd=sd,
                                    decay_power=a.decay)
        image_f = to_valid_rgb(image_f, colors=a.colors)

        if a.prog is True:
            lr1 = a.lrate * 2
            lr0 = a.lrate * 0.1
        else:
            lr0 = a.lrate
        optimizer = torch.optim.Adam(params, lr0)

        if a.verbose is True: print(' ref text: ', txt)
        if a.translate:
            translator = Translator()
            txt = translator.translate(txt, dest='en').text
            if a.verbose is True: print(' translated to:', txt)
        if a.multilang is True:
            model_lang = SentenceTransformer(
                'clip-ViT-B-32-multilingual-v1').cuda()
            txt_enc = model_lang.encode(
                [txt], convert_to_tensor=True,
                show_progress_bar=False).detach().clone()
            del model_lang
        else:
            txt_enc = model_clip.encode_text(
                clip.tokenize(txt).cuda()).detach().clone()
        if a.notext > 0:
            txt_plot = torch.from_numpy(plot_text(txt, a.modsize) /
                                        255.).unsqueeze(0).permute(0, 3, 1,
                                                                   2).cuda()
            txt_plot_enc = model_clip.encode_image(txt_plot).detach().clone()
        else:
            txt_plot_enc = None

        out_name = '%03d-%s' % (num + 1, txt_clean(txt))
        out_name += '-%s' % a.model if 'RN' in a.model.upper() else ''
        tempdir = os.path.join(workdir, out_name)
        os.makedirs(tempdir, exist_ok=True)

        pbar = ProgressBar(a.steps // a.fstep)
        for i in range(a.steps):
            loss = 0

            noise = a.noise * torch.randn(1, 1, *params[0].shape[2:4],
                                          1).cuda() if a.noise > 0 else None
            img_out = image_f(noise)

            imgs_sliced = slice_imgs([img_out],
                                     a.samples,
                                     a.modsize,
                                     norm_in,
                                     a.overscan,
                                     micro=None)
            out_enc = model_clip.encode_image(imgs_sliced[-1])
            loss -= torch.cosine_similarity(txt_enc, out_enc, dim=-1).mean()
            if a.notext > 0:
                loss += a.notext * torch.cosine_similarity(
                    txt_plot_enc, out_enc, dim=-1).mean()
            if a.diverse != 0:
                imgs_sliced = slice_imgs([image_f(noise)],
                                         a.samples,
                                         a.modsize,
                                         norm_in,
                                         a.overscan,
                                         micro=None)
                out_enc2 = model_clip.encode_image(imgs_sliced[-1])
                loss += a.diverse * torch.cosine_similarity(
                    out_enc, out_enc2, dim=-1).mean()
                del out_enc2
                torch.cuda.empty_cache()
            if a.expand > 0:
                global prev_enc
                if i > 0:
                    loss += a.expand * torch.cosine_similarity(
                        out_enc, prev_enc, dim=-1).mean()
                prev_enc = out_enc.detach().clone()
            if a.in_txt0 is not None:  # subtract text
                loss += torch.cosine_similarity(txt_enc0, out_enc,
                                                dim=-1).mean()
            del img_out, imgs_sliced, out_enc
            torch.cuda.empty_cache()

            if a.prog is True:
                lr_cur = lr0 + (i / a.steps) * (lr1 - lr0)
                for g in optimizer.param_groups:
                    g['lr'] = lr_cur

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i % a.fstep == 0:
                with torch.no_grad():
                    img = image_f(contrast=a.contrast).cpu().numpy()[0]
                checkout(img,
                         os.path.join(tempdir, '%04d.jpg' % (i // a.fstep)),
                         verbose=a.verbose)
                pbar.upd()
                del img

        if a.keep > 0:
            global params_start, params_ema
            params_ema = ema(params_ema, params[0].detach().clone(), num + 1)
            torch.save((1 - a.keep) * params_start + a.keep * params_ema,
                       'init.pt')

        torch.save(params[0], '%s.pt' % os.path.join(workdir, out_name))
        shutil.copy(
            img_list(tempdir)[-1],
            os.path.join(workdir, '%s-%d.jpg' % (out_name, a.steps)))
        os.system('ffmpeg -v warning -y -i %s\%%04d.jpg "%s.mp4"' %
                  (tempdir, os.path.join(workdir, out_name)))

    with open(a.in_txt, 'r', encoding="utf-8") as f:
        texts = f.readlines()
        texts = [
            tt.strip() for tt in texts if len(tt.strip()) > 0 and tt[0] != '#'
        ]
    if a.verbose is True:
        print(' total lines:', len(texts))
        print(' samples:', a.samples)

    for i, txt in enumerate(texts):
        process(txt, i)

    vsteps = int(a.length * 25 / len(texts))  # 25 fps
    tempdir = os.path.join(workdir, '_final')
    os.makedirs(tempdir, exist_ok=True)

    def read_pt(file):
        return torch.load(file).cuda()

    if a.verbose is True: print(' rendering complete piece')
    ptfiles = file_list(workdir, 'pt')
    pbar = ProgressBar(vsteps * len(ptfiles))
    for px in range(len(ptfiles)):
        params1 = read_pt(ptfiles[px])
        params2 = read_pt(ptfiles[(px + 1) % len(ptfiles)])

        params, image_f = fft_image([1, 3, *a.size],
                                    resume=params1,
                                    sd=1.,
                                    decay_power=a.decay)
        image_f = to_valid_rgb(image_f, colors=a.colors)

        for i in range(vsteps):
            with torch.no_grad():
                img = image_f(
                    (params2 - params1) *
                    math.sin(1.5708 * i / vsteps)**2)[0].permute(1, 2, 0)
                img = torch.clip(img * 255, 0,
                                 255).cpu().numpy().astype(np.uint8)
            imsave(os.path.join(tempdir, '%05d.jpg' % (px * vsteps + i)), img)
            if a.verbose is True: cvshow(img)
            pbar.upd()

    os.system('ffmpeg -v warning -y -i %s\%%05d.jpg "%s.mp4"' %
              (tempdir, os.path.join(a.out_dir, basename(a.in_txt))))
    if a.keep > 0: os.remove('init.pt')
Example #40
0
    def predict_with_tiles(self, filename: str, resized_size: int=None, tile_size: int=500,
                           min_overlap: float=0.2, linear_interpolation: bool=True):

        # TODO this part should only happen if self.predict_mode == 'resized_images'

        if resized_size is None or resized_size < 0:
            image_np = imread(filename)
            h, w = image_np.shape[:2]
            batch_size = 1
        else:
            raise NotImplementedError
        assert h > tile_size, w > tile_size
        # Get x and y coordinates of beginning of tiles and compute prediction for each tile
        y_step = np.ceil((h - tile_size) / (tile_size * (1 - min_overlap)))
        x_step = np.ceil((w - tile_size) / (tile_size * (1 - min_overlap)))
        y_pos = np.round(np.arange(y_step + 1) / y_step * (h - tile_size)).astype(np.int32)
        x_pos = np.round(np.arange(x_step + 1) / x_step * (w - tile_size)).astype(np.int32)

        all_outputs = list()
        with tempfile.TemporaryDirectory() as tmpdirname:
            for i, y in enumerate(y_pos):
                inside_list = list()
                for j, x in enumerate(x_pos):
                    filename_tile = os.path.join(tmpdirname, 'tile{}{}.png'.format(i, j))
                    imsave(filename_tile, image_np[y:y + tile_size, x:x + tile_size])
                    inside_list.append(self.predict(filename_tile))#, prediction_key='probs'))
                all_outputs.append(inside_list)

        def _merge_x(full_output, assigned_up_to, new_input, begin_position):
            assert full_output.shape[1] == new_input.shape[1], \
                "Shape full output is {}, but shape new_input is {}".format(full_output.shape[1], new_input.shape[1])
            overlap_size = assigned_up_to - begin_position
            normal_part_size = new_input.shape[2] - overlap_size
            assert normal_part_size > 0
            full_output[:, :, assigned_up_to:assigned_up_to + normal_part_size] = new_input[:, :, overlap_size:]
            if overlap_size > 0:
                weights = np.arange(0, overlap_size) / overlap_size
                full_output[:, :, begin_position:assigned_up_to] = (1 - weights)[:, None] * full_output[:, :,
                                                                                            begin_position:assigned_up_to] + \
                                                                   weights[:, None] * new_input[:, :, :overlap_size]

        def _merge_y(full_output, assigned_up_to, new_input, begin_position):
            assert full_output.shape[2] == new_input.shape[2]
            overlap_size = assigned_up_to - begin_position
            normal_part_size = new_input.shape[1] - overlap_size
            assert normal_part_size > 0
            full_output[:, assigned_up_to:assigned_up_to + normal_part_size] = new_input[:, overlap_size:]
            if overlap_size > 0:
                weights = np.arange(0, overlap_size) / overlap_size
                full_output[:, begin_position:assigned_up_to] = (1 - weights)[:, None, None] * full_output[:,
                                                                                               begin_position:assigned_up_to] + \
                                                                weights[:, None, None] * new_input[:, :overlap_size]

        result = {k: np.empty([batch_size, h, w] + list(v.shape[3:]), v.dtype) for k, v in all_outputs[0][0].items()
                  if k != _original_shape_key}  # do not try to merge 'original_shape' content...
        if linear_interpolation:
            for k in result.keys():
                assigned_up_to_y = 0
                for y, y_outputs in zip(y_pos, all_outputs):
                    s = list(result[k].shape)
                    tmp = np.zeros([batch_size, tile_size] + s[2:], result[k].dtype)
                    assigned_up_to_x = 0
                    for x, output in zip(x_pos, y_outputs):
                        _merge_x(tmp, assigned_up_to_x, output[k], x)
                        assigned_up_to_x = x + tile_size
                    _merge_y(result[k], assigned_up_to_y, tmp, y)
                    assigned_up_to_y = y + tile_size
        else:
            for k in result.keys():
                for y, y_outputs in zip(y_pos, all_outputs):
                    for x, output in zip(x_pos, y_outputs):
                        result[k][:, y:y + tile_size, x:x + tile_size] = output[k]

        result[_original_shape_key] = np.array([h, w], np.uint)
        return result
Example #41
0
from nlmpy import nlmpy
import matplotlib.pyplot as plt
import numpy as np
import imageio

# make zero array with centre as 1
b = np.zeros([100, 100]).astype(int)
b[50, 50] = 1
c = nlmpy.mpd(nRow=100, nCol=100, h=0.75)
c = np.interp(c, (c.min(), c.max()), (0, 255)).astype(int)
# make distance gradient as int of 0 - 255
a = nlmpy.distanceGradient(source=b)
a = np.interp(a, (a.min(), a.max()), (0, 255)).astype(int)
land = np.stack((b, b, a, c), axis=-1)
# save as png
imageio.imsave("test_small.png", land)
Example #42
0
def save_image(image, name):
    imsave("%s.png" % name, image, format="png")
Example #43
0
def train(bs, sample, vasample, ep, ilr, mode):
    # Initialize learning rate decay and learning rate
    lr_dec = 1
    init_lr = ilr
    # model
    model = Cuda(UNet())
    # initialize weight
    init_weights(model)
    # optimizer
    opt = torch.optim.Adam(model.parameters(), lr=init_lr)
    opt.zero_grad()
    # train and validation samples
    rows_trn = len(sample['Label'])
    rows_val = len(vasample['Label'])
    # Batch per epoch
    batches_per_epoch = rows_trn // bs
    losslists = []
    vlosslists = []
    Fscorelist = []
    PPVlist = []

    for epoch in range(ep):
        # Learning rate
        lr = init_lr * lr_dec
        order = np.arange(rows_trn)
        losslist = []
        tr_metric_list = []
        va_metric_list = []
        tr_F_list = []
        va_F_list = []
        for itr in range(batches_per_epoch):
            rows = order[itr * bs:(itr + 1) * bs]
            if itr + 1 == batches_per_epoch:
                rows = order[itr * bs:]
            # read in a batch
            trim = sample['Image'][rows[0]]
            trla = sample['Label'][rows[0]]
            trga = sample['Weight'][rows[0]]
            # read in augmented images
            for iit in range(6):
                trimm = trim[iit:iit + 1, :, :, :]
                trlaa = trla[iit:iit + 1, :, :, :]
                trgaa = trga[iit:iit + 1, :, :, :]
                label_ratio = (trlaa > 0).sum() / (
                    trlaa.shape[1] * trlaa.shape[2] * trlaa.shape[3] -
                    (trlaa > 0).sum())
                # If smaller than 1, add weight to positive prediction
                if label_ratio < 1:
                    add_weight = (trlaa[0, 0, :, :] + trgaa[0, 0, :, :] * 2 /
                                  (1 / label_ratio - 1)) / 255
                    add_weight = np.clip(add_weight / add_weight.max() * 255,
                                         40, None)
                    loss_fn = torch.nn.BCEWithLogitsLoss(weight=Cuda(
                        torch.from_numpy(add_weight).type(torch.FloatTensor)))
                # If smaller than 1, add weight to negative prediction
                elif label_ratio > 1:
                    add_weight = (trlaa[0, 0, :, :] + trgaa[0, 0, :, :] * 2 /
                                  (label_ratio - 1)) / 255
                    add_weight = np.clip(add_weight / add_weight.max() * 255,
                                         40, None)
                    loss_fn = torch.nn.BCEWithLogitsLoss(weight=Cuda(
                        torch.from_numpy(add_weight).type(torch.FloatTensor)))
                # If equal to 1, no weight added
                elif label_ratio == 1:
                    add_weight = (np.ones([
                        1, 1, trlaa.shape[2], trlaa.shape[3]
                    ])) * trgaa[0, 0, :, :]
                    loss_fn = torch.nn.BCEWithLogitsLoss(weight=Cuda(
                        torch.from_numpy(add_weight).type(torch.FloatTensor)))
                # Cuda and tensor inputs and label
                x = Cuda(
                    Variable(torch.from_numpy(trimm).type(torch.FloatTensor)))
                y = Cuda(
                    Variable(
                        torch.from_numpy(trlaa / 255).type(torch.FloatTensor)))
                # Prediction
                pred_mask = model(x)
                # BCE and dice loss
                loss = loss_fn(pred_mask, y).cpu() + dice_loss(
                    F.sigmoid(pred_mask), y)
                losslist.append(loss.data.numpy())
                loss.backward()
                # ppv metric
                tr_metric = metric(F.sigmoid(pred_mask), y)
                tr_metric_list.append(tr_metric)
                tr_F = Fscore(F.sigmoid(pred_mask), y)
                tr_F_list.append(tr_F)
            opt.step()
            opt.zero_grad()

        vlosslist = []
        # For validation set
        for itr in range(rows_val):
            vaim = vasample['Image'][itr]
            vala = vasample['Label'][itr]
            vaga = vasample['Weight'][itr]
            for iit in range(1):
                # Load one batch
                vaimm = vaim[iit:iit + 1, :, :, :]
                valaa = vala[iit:iit + 1, :, :, :]
                vagaa = vaga[iit:iit + 1, :, :, :]
                # Calculate label positive and negative ratio
                label_ratio = (valaa > 0).sum() / (
                    valaa.shape[1] * valaa.shape[2] * valaa.shape[3] -
                    (valaa > 0).sum())
                # If smaller than 1, add weight to positive prediction
                if label_ratio < 1:
                    add_weight = (valaa[0, 0, :, :] + vagaa[0, 0, :, :] * 2 /
                                  (1 / label_ratio - 1)) / 255
                    add_weight = np.clip(add_weight / add_weight.max() * 255,
                                         40, None)
                    loss_fn = torch.nn.BCEWithLogitsLoss(weight=Cuda(
                        torch.from_numpy(add_weight).type(torch.FloatTensor)))
                # If smaller than 1, add weight to negative prediction
                elif label_ratio > 1:
                    add_weight = (valaa[0, 0, :, :] / 255 +
                                  vagaa[0, 0, :, :] * 2 /
                                  (label_ratio - 1)) / 255
                    add_weight = np.clip(add_weight / add_weight.max() * 255,
                                         40, None)
                    loss_fn = torch.nn.BCEWithLogitsLoss(weight=Cuda(
                        torch.from_numpy(add_weight).type(torch.FloatTensor)))
                # If equal to 1, no weight added
                elif label_ratio == 1:
                    add_weight = (np.ones([
                        1, 1, valaa.shape[2], valaa.shape[3]
                    ])) * vagaa[0, 0, :, :]
                    loss_fn = torch.nn.BCEWithLogitsLoss(weight=Cuda(
                        torch.from_numpy(add_weight).type(torch.FloatTensor)))
                # cuda and tensor sample
                xv = Cuda(
                    Variable(torch.from_numpy(vaimm).type(torch.FloatTensor)))
                yv = Cuda(
                    Variable(
                        torch.from_numpy(valaa / 255).type(torch.FloatTensor)))
                # prediction
                pred_maskv = model(xv)
                # dice and BCE loss
                vloss = loss_fn(pred_maskv, yv).cpu() + dice_loss(
                    F.sigmoid(pred_maskv), yv)
                vlosslist.append(vloss.data.numpy())
                # ppv metric
                va_metric = metric(F.sigmoid(pred_maskv), yv)
                va_metric_list.append(va_metric)
                va_F = Fscore(F.sigmoid(pred_maskv), yv)
                va_F_list.append(va_F)

        lossa = np.mean(losslist)
        vlossa = np.mean(vlosslist)
        tr_score = np.mean(tr_metric_list)
        va_score = np.mean(va_metric_list)
        tr_F_list = np.nan_to_num(tr_F_list)
        va_F_list = np.nan_to_num(va_F_list)
        tr_Fscore = np.mean(tr_F_list)
        va_Fscore = np.mean(va_F_list)
        # Print epoch summary
        print(
            'Epoch {:>3} |lr {:>1.5f} | Loss {:>1.5f} | VLoss {:>1.5f} | Train F1 {:>1.5f} | Val F1 {:>1.5f} | Train PPV {:>1.5f} | Val PPV {:>1.5f}'
            .format(epoch + 1, lr, lossa, vlossa, tr_Fscore, va_Fscore,
                    tr_score, va_score))
        losslists.append(lossa)
        vlosslists.append(vlossa)
        Fscorelist.append(va_Fscore)
        PPVlist.append(va_score)

        for param_group in opt.param_groups:
            param_group['lr'] = lr
        # Save models
        if vlossa == np.min(vlosslists):
            print('Min loss found:')
            print(vlossa)
            checkpoint = {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'optimizer': opt.state_dict(),
            }
            torch.save(checkpoint, '../' + output + '/' + mode + 'loss_unet')
        if va_Fscore == np.max(Fscorelist):
            print('Max F found:')
            print(va_Fscore)
            checkpoint = {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'optimizer': opt.state_dict(),
            }
            torch.save(checkpoint, '../' + output + '/' + mode + 'F_unet')

        if va_score == np.max(PPVlist):
            print('Max PPV found:')
            print(va_score)
            checkpoint = {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'optimizer': opt.state_dict(),
            }
            torch.save(checkpoint, '../' + output + '/' + mode + 'PPV_unet')

        # if no change or increase in loss for consecutive 6 epochs, decrease learning rate by 10 folds
        if epoch > 6:
            if losscp(losslists[-5:]) or losscp(vlosslists[-5:]):
                lr_dec = lr_dec / 10
        # if no change or increase in loss for consecutive 15 epochs, save validation predictions and stop training
        if epoch > 15:
            if losscp(losslists[-15:]) or losscp(
                    vlosslists[-15:]) or epoch + 1 == ep:
                for itr in range(rows_val):
                    vaim = vasample['Image'][itr]
                    for iit in range(1):
                        vaimm = vaim[iit:iit + 1, :, :, :]
                        xv = Cuda(
                            Variable(
                                torch.from_numpy(vaimm).type(
                                    torch.FloatTensor)))
                        pred_maskv = model(xv)
                        pred_np = (F.sigmoid(pred_maskv)).cpu().data.numpy()
                        ppp = pred_np[0, 0, :, :]
                        pred_np = pred_np.round().astype(np.uint8)
                        pred_np = pred_np[0, 0, :, :]
                        pww = pred_np
                        if not os.path.exists('../' + output + '/' + mode +
                                              'validation/'):
                            os.makedirs('../' + output + '/' + mode +
                                        'validation/')
                        pred_np = mph.remove_small_objects(
                            pred_np.astype(bool), min_size=30,
                            connectivity=2).astype(np.uint8)
                        pred_np = mph.remove_small_holes(pred_np.astype(bool),
                                                         min_size=30,
                                                         connectivity=2)
                        if np.max(pred_np) == np.min(pred_np):
                            print('1st_BOOM!')
                            print(vasample['ID'][itr])
                            if np.max(pww) == np.min(pww):
                                print('2nd_BOOM!')
                                if ppp.max() == 0 or ppp.min() == 1:
                                    print('3rd_BOOM!')
                                    imsave(
                                        '../' + output + '/' + mode +
                                        'validation/' + vasample['ID'][itr] +
                                        '.png', ppp.astype(np.uint8))
                                else:
                                    ppp = (ppp / ppp.max()) * 1
                                    ppp = (ppp > 0.95).astype(np.uint8)
                                    imsave(
                                        '../' + output + '/' + mode +
                                        'validation/' + vasample['ID'][itr] +
                                        '.png', ((ppp / ppp.max()) *
                                                 255).astype(np.uint8))
                            else:
                                imsave(
                                    '../' + output + '/' + mode +
                                    'validation/' + vasample['ID'][itr] +
                                    '.png',
                                    ((pww / pww.max()) * 255).astype(np.uint8))
                        else:
                            imsave(
                                '../' + output + '/' + mode + 'validation/' +
                                vasample['ID'][itr] + '.png',
                                ((pred_np / pred_np.max()) * 255).astype(
                                    np.uint8))
                break

    # Loss figures
    plt.plot(losslists)
    plt.plot(vlosslists)
    plt.title('Train & Validation Loss')
    plt.legend(['Train', 'Validation'], loc='upper right')
    plt.savefig('../' + output + '/' + mode + '_loss.png')
Example #44
0
def evaluate(data_root, model, result_path, split):
    train_id2label_id = {0: 7,
                         1: 8,
                         2: 11,
                         3: 12,
                         4: 13,
                         5: 17,
                         6: 19,
                         7: 20,
                         8: 21,
                         9: 22,
                         10: 23,
                         11: 24,
                         12: 25,
                         13: 26,
                         14: 27,
                         15: 28,
                         16: 31,
                         17: 32,
                         18: 33}
    mean = [0.2997, 0.3402, 0.3072]
    std = [0.1549, 0.1579, 0.1552]

    trans_norm = transforms.Compose([transforms.ToTensor(),
                                     transforms.Normalize(mean=mean, std=std)])

    # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
    # 1. Inference Model
    # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
    data_root = os.path.join(data_root, split)
    org_data_sub = os.listdir(data_root)
    org_data_sub.sort()

    tt_time = time.time()
    for idx in np.arange(len(org_data_sub)):
        city_name = org_data_sub[idx]
        print("> # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #")
        print("> 2. Processing City # {}...".format(city_name))
        curr_city_path = os.path.join(data_root, city_name)
        images_name = os.listdir(curr_city_path)
        images_name.sort()

        for img_id in np.arange(len(images_name)):
            curr_image = images_name[img_id]
            # print("> # ------------------------------------------------------------------------- #")
            print("> Processing City # {}, Image: {}...".format(city_name, curr_image))

            with torch.no_grad():
                # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
                # 2.1 Pre-processing Image
                # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
                curr_img_path = os.path.join(curr_city_path, curr_image)
                image = Image.open(curr_img_path).convert('RGB')
                image = np.array(image, dtype=np.uint8)
                image = np.array(image[:, :, ::-1], dtype=np.uint8)  # From RGB to BGR
                image = trans_norm(image)
                image = torch.unsqueeze(image, dim=0).cuda()  # [N, C, H, W]

                # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
                # 2.2 Prediction/Inference
                # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
                start_time = time.time()
                prediction = F.softmax(model(image), dim=1).argmax(dim=1)
                print("> Inference Time: {}s".format(time.time() - start_time))
                prediction = np.squeeze(prediction.cpu().numpy())

                mapper = lambda t: train_id2label_id[t]
                vfunc = np.vectorize(mapper)
                prediction = vfunc(prediction)

                # fun_classes = np.unique(prediction)
                # print('> {} Classes found: {}'.format(len(fun_classes), fun_classes))
                print("> Processed City #{}, Image: {}, Time: {}s".format(city_name, curr_image,
                                                                         (time.time() - start_time)))

                # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
                # 2.3 Saving prediction result
                # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
                save_path = os.path.join(result_path, city_name)
                if not os.path.exists(save_path):
                    os.makedirs(save_path, exist_ok=True)

                # cv2.namedWindow("Prediction", cv2.WINDOW_NORMAL)
                # cv2.imshow("Prediction", prediction)
                # cv2.waitKey(0)

                prediction = prediction.astype(np.uint8)
                save_name = os.path.basename(curr_image)[:-15] + 'pred_labelIds.png'
                save_path = os.path.join(save_path, save_name)
                imageio.imsave(save_path, prediction)

    print("> # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #")
    print("> Total Time Cost: {}".format(time.time() - tt_time))
    print("> Done!!!")
    print("> # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #")
Example #45
0
        return g6b_64, g6b


# Call functions
samples, samples128 = generator(z, iny)

# Initialize the variables
init = tf.global_variables_initializer()
# Config for session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# Generate
with tf.Session(config=config) as sess:
    sess.run(init)
    saver = tf.train.Saver(max_to_keep=None)
    saver.restore(sess=sess, save_path='models/Style128GANAE/cdgan49999.ckpt')

    # run generator
    gen_img, gen_img128 = sess.run([samples, samples128])

    # Store Generated
    genmix_imgs = (np.transpose(gen_img, [0, 2, 3, 1]) + 1.) * 127.5
    genmix_imgs = np.uint8(genmix_imgs[:, :, :, ::-1])
    genmix_imgs = drawblock(genmix_imgs, n_classes, fixed=3, flip=False)
    imsave(os.path.join(gen_dir, 'sample.jpg'), genmix_imgs)
    # Store Generated 128
    genmix_imgs = (np.transpose(gen_img128, [0, 2, 3, 1]) + 1.) * 127.5
    genmix_imgs = np.uint8(genmix_imgs[:, :, :, ::-1])
    genmix_imgs = drawblock(genmix_imgs, n_classes, fixed=3, flip=False)
    imsave(os.path.join(gen_dir128, 'sample.jpg'), genmix_imgs)
Example #46
0
def predict(patient_num=1,
            img_directory='./',
            target_directory='./',
            weights_directory='./',
            modeltype='convunet'):

    patient_num = patient_num
    frame_ = ['ED', 'ES']
    flip_axes = [0, 1]

    print('-' * 30)
    print('Loading and preprocessing test data...')
    print('-' * 30)

    imgs_pred, msks_pred = dataload.load_data(patient_index=patient_num,
                                              frame=frame_[0],
                                              flip_axes=flip_axes)

    imgs_pred = np.array(imgs_pred).astype('float64')
    mean = np.mean(imgs_pred)  # mean for data centering
    std = np.std(imgs_pred)  # std for data normalization
    imgs_pred -= mean
    imgs_pred /= std

    imgs_pred = np.array(imgs_pred)[:, :, :, None]

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)

    _, height, width, channels = imgs_pred.shape
    #_, _, _, classes = np.array(msks_pred).shape
    classes = 2

    if modeltype == 'convunet':
        model = convunet.unet(height=height,
                              width=width,
                              channels=channels,
                              classes=classes,
                              features=32,
                              depth=3,
                              padding='same',
                              temperature=1,
                              batchnorm=False,
                              dropout=0.5)

    elif modeltype == 'dilatedunet':
        model = dilatedunet.dilated_unet(height=height,
                                         width=width,
                                         channels=channels,
                                         classes=classes,
                                         features=32,
                                         depth=3,
                                         padding='same',
                                         temperature=1,
                                         batchnorm=False,
                                         dropout=0.5)
    else:
        print("no model type exists!")

    # ========================================================
    # ========================================================

    print('-' * 30)
    print('Loading saved weights...')
    print('-' * 30)

    model.load_weights(weights_directory)

    print('-' * 30)
    print('Predicting masks on test data...')
    print('-' * 30)

    pred_dir = target_directory

    imgs_mask_pred = model.predict(imgs_pred, verbose=1)
    np.save(pred_dir + '/imgs_test.npy', imgs_mask_pred)

    print('-' * 30)
    print('Saving predicted masks to files...')
    print('-' * 30)

    for idx, image in enumerate(imgs_mask_pred):
        max = np.max(image[:, :, 0])
        min = np.min(image[:, :, 0])
        #image = (255*(image[:, :, 0]-min)/(max-min)).astype(np.uint8)
        imageio.imsave(
            os.path.join(pred_dir, 'prediction_0_' + str(idx) + '_pred.jpg'),
            image[:, :, 0])

    for idx, image in enumerate(imgs_mask_pred):
        max = np.max(image[:, :, 1])
        min = np.min(image[:, :, 1])
        #image = (255*(image[:, :, 1]-min)/(max-min)).astype(np.uint8)
        imageio.imsave(
            os.path.join(pred_dir, 'prediction_1_' + str(idx) + '_pred.jpg'),
            image[:, :, 0])
        image_pred = uabUtilreader.un_patchify_shrink(result,
                                                      [tile_size[0] + pad, 4576 + pad],
                                                      [5000, 4576],
                                                      input_size,
                                                      [input_size[0] - pad, input_size[1] - pad],
                                                      overlap=pad)
        pred_overall = util_functions.get_pred_labels(image_pred) * 1
        pred_overall = np.roll(pred_overall, shift=slide_step, axis=1)
        pred_overall = pred_overall[:, 1000:-1000]
        #pred_overall = pred_overall[:, shift_max-slide_step:-slide_step-1]
        truth_label_img = imageio.imread(os.path.join(parent_dir_truth, file_name_truth))
        #truth_label_img = np.roll(truth_label_img, -slide_step, axis=1)
        truth_label_img = truth_label_img[:, :4576]
        truth_label_img = truth_label_img[:, 1000:-1000]
        iou = util_functions.iou_metric(truth_label_img, pred_overall, divide_flag=True)
        duration = time.time() - start_time
        print('{} mean IoU={:.3f}, duration: {:.3f}'.format(tile_name, iou[0] / iou[1], duration))
        iou_record.append(iou)

        pred_save_dir = os.path.join(score_save_dir, 'pred')
        if not os.path.exists(pred_save_dir):
            os.makedirs(pred_save_dir)
        imageio.imsave(os.path.join(pred_save_dir, tile_name + '.png'), pred_overall.astype(np.uint8))
        with open(os.path.join(score_save_dir, 'result.txt'), 'a+') as file:
            file.write('{} {}\n'.format(tile_name, iou))
    iou_record = np.array(iou_record)
    mean_iou = np.sum(iou_record[:, 0]) / np.sum(iou_record[:, 1])
    print('Overall mean IoU={:.3f}'.format(mean_iou))
    with open(os.path.join(score_save_dir, 'result.txt'), 'a+') as file:
        file.write('{}'.format(mean_iou))
Example #48
0
    img_input = cv2.imread(img_input_filename)
    img_input = cv2.cvtColor(img_input, cv2.COLOR_BGR2RGB)
    no_makeup = cv2.resize(img_input, (model_size, model_size))

    makeups = glob.glob(os.path.join('imgs', 'makeup', '*.*'))

    result = np.ones((2 * model_size, (len(makeups) + 1) * model_size, 3))
    result[model_size: 2 * model_size, :model_size] = no_makeup / 255.

    model = init_model()

    X_img = np.expand_dims(preprocess(no_makeup), 0)
    for i in range(len(makeups)):
        makeup = cv2.resize(imread(makeups[i]), (model_size, model_size))
        Y_img = np.expand_dims(preprocess(makeup), 0)
        Xs_ = model['sess'].run(model['Xs'], feed_dict={
                                model['X']: X_img, model['Y']: Y_img})
        Xs_ = deprocess(Xs_)
        result[:model_size, (i + 1) * model_size: (i + 2)
               * model_size] = makeup / 255.
        result[model_size: 2 * model_size,
               (i + 1) * model_size: (i + 2) * model_size] = Xs_[0]
        
        if True:
            image_return = Image.fromarray(makeup / 255., mode="RGB")
            out_file = os.path.join(script_dir, 'new_face.jpg')
            image_return.save(out_file)
            print('new image saved to {:s} done!'.format(out_file))

    imsave('result.jpg', result)
Example #49
0
 def eval_masks(self, sess, dir):
     for key in self.pm.patches.keys():
         mask_tf = self.pm.patches[key].mask_tf
         mask = (mask_tf.eval(session=sess) * 255).astype(np.uint8)
         imageio.imsave(dir + key + ".png", mask)
Example #50
0
from scipy.ndimage.filters import gaussian_filter
import numpy as np
from imageio import imread, imsave

def dodge(front,back):
    result=front*255/(255-back)
    result[result>255]=255
    result[back==255]=255
    return result.astype(np.uint8)


def grayscale(rgb):
    return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])


s = imread('source.jpeg')
g=grayscale(s)
i = 255-g

b = gaussian_filter(i,sigma=10)
r= dodge(b,g)


imsave('result.png', r)
        loss_value = outs[0]
        grad_values = outs[1].flatten().astype('float64')
        self.loss_value = loss_value
        self.grads_values = grad_values
        return self.loss_value

    def grad(self, x):
        assert self.loss_value is not None
        grad_values = np.copy(self.grads_values)
        self.loss_value = None
        self.grads_values = None
        return grad_values


evaluator = Evaluator()

result_prefix = 'style_transfer_result'
iterations = 20
x = prepossess_image(target_image_path)
x = x.flatten()
for i in range(iterations):

    x, min_val, info = fmin_l_bfgs_b(evaluator.loss,
                                     x,
                                     fprime=evaluator.grad,
                                     maxfun=20)
    img = x.copy().reschape((img_height, img_width, 3))
    img = deprocess_image(img)
    fname = f'{result_prefix}_at_iteration{i}.png'
    imageio.imsave(fname, img)
Example #52
0
def fit(train_loader, val_loader, model, exp_path, label_preprocess, loss_fcn,
        onehot_fcn, n_classes=10, optimizer='adam', learnrate=1e-4, cuda=True,
        patience=10, max_epochs=200, resume=False):

    if cuda:
        model = model.cuda()

    if not os.path.isdir(exp_path):
        os.makedirs(exp_path)
    statsfile = os.path.join(exp_path,'stats.json')

    optimizer = {'adam':torch.optim.Adam(model.parameters(),lr=learnrate),
                 'sgd':torch.optim.SGD(
                     model.parameters(),lr=learnrate,momentum=0.9),
                 'adamax':torch.optim.Adamax(model.parameters(),lr=learnrate)
                 }[optimizer.lower()]

    # load a single example from the iterator to get the image size
    x = train_loader.sampler.data_source.__getitem__(0)[0]
    img_size = list(x.numpy().shape[1:])

    if not resume:
        stats = {'loss':{'train':[],'val':[]},
                 'mean_output':{'train':[],'val':[]}}
        best_val = np.inf
        stall = 0
        start_epoch = 0
        generated = []
        plots = []
    else:
        with open(statsfile,'r') as js:
            stats = json.load(js)
        best_val = np.min(stats['loss']['val'])
        stall = len(stats['loss']['val'])-np.argmin(stats['loss']['val'])-1
        start_epoch = len(stats['loss']['val'])-1
        generated = list(np.load(os.path.join(exp_path,'generated.npy')))
        plots = list(np.load(os.path.join(exp_path,'generated_plots.npy')))
        print('Resuming from epoch %i'%start_epoch)

    def save_img(x,filename):
        Image.fromarray((255*x).astype('uint8')).save(filename)

    def epoch(dataloader,training):
        bar = ProgressBar()
        losses = []
        mean_outs = []
        for x,y in bar(dataloader):
            label = label_preprocess(x)
            if cuda:
                x,y = x.cuda(),y.cuda()
                label = label.cuda()
            x,y = Variable(x),Variable(y)
            label = Variable(label)
            if training:
                optimizer.zero_grad()
                model.train()
            else:
                model.eval()
            output = model(x,y)
            loss = loss_fcn(output,label)
            # track mean output
            output = output.data.cpu().numpy()
            mean_outs.append(np.mean(np.argmax(output,axis=1))/output.shape[1])
            if training:
                loss.backward()
                optimizer.step()
            losses.append(loss.data.cpu().numpy())
        clearline()
        return float(np.mean(losses)), np.mean(mean_outs)

    for e in range(start_epoch,max_epochs):
        # Training
        t0 = time.time()
        loss,mean_out = epoch(train_loader,training=True)
        time_per_example = (time.time()-t0)/len(train_loader.dataset)
        stats['loss']['train'].append(loss)
        stats['mean_output']['train'].append(mean_out)
        print(('Epoch %3i:    Training loss = %6.4f    mean output = %1.2f    '
               '%4.2f msec/example')%(e,loss,mean_out,time_per_example*1000))

        # Validation
        t0 = time.time()
        loss,mean_out = epoch(val_loader,training=False)
        time_per_example = (time.time()-t0)/len(val_loader.dataset)
        stats['loss']['val'].append(loss)
        stats['mean_output']['val'].append(mean_out)
        print(('            Validation loss = %6.4f    mean output = %1.2f    '
               '%4.2f msec/example')%(loss,mean_out,time_per_example*1000))

        # Generate images and update gif
        new_frame = tile_images(generate_images(model, img_size, n_classes,
                                                onehot_fcn, cuda))
        generated.append(new_frame)

        # Update gif with loss plot
        plot_frame = plot_loss(stats['loss']['train'],stats['loss']['val'])
        if new_frame.ndim==2:
            new_frame = np.repeat(new_frame[:,:,np.newaxis],3,axis=2)
        nw = int(new_frame.shape[1]*plot_frame.shape[0]/new_frame.shape[0])
        new_frame = resize(new_frame,[plot_frame.shape[0],nw],
                           order=0, preserve_range=True, mode='constant')
        plots.append(np.concatenate((plot_frame.astype('uint8'),
                                     new_frame.astype('uint8')),
                                    axis=1))

        # Save gif arrays so it can resume training if interrupted
        np.save(os.path.join(exp_path,'generated.npy'),generated)
        np.save(os.path.join(exp_path,'generated_plots.npy'),plots)

        # Save stats and update training curves
        with open(statsfile,'w') as sf:
            json.dump(stats,sf)
        plot_stats(stats,exp_path)

        # Early stopping
        torch.save(model,os.path.join(exp_path,'last_checkpoint'))
        if loss<best_val:
            best_val = loss
            stall = 0
            torch.save(model,os.path.join(exp_path,'best_checkpoint'))
            imageio.imsave(os.path.join(exp_path, 'best_generated.jpeg'),
                           generated[-1].astype('uint8'))
            imageio.imsave(os.path.join(exp_path, 'best_generated_plots.jpeg'),
                           plots[-1].astype('uint8'))
            imageio.mimsave(os.path.join(exp_path, 'generated.gif'),
                            np.array(generated), format='gif', loop=0, fps=2)
            imageio.mimsave(os.path.join(exp_path, 'generated_plot.gif'),
                            np.array(plots), format='gif', loop=0, fps=2)
        else:
            stall += 1
        if stall>=patience:
            break
def main(_args=None):
    if _args is None:
        parser = argparse.ArgumentParser()
        parser.add_argument('-c', '--classes', type=str, default='02958343')
        parser.add_argument('-l', '--load', type=bool, default=False)
        parser.add_argument('-t',
                            '--template-mesh',
                            type=str,
                            default=os.path.join(data_dir,
                                                 'obj/sphere/sphere_1352.obj'))
        parser.add_argument('-o',
                            '--output-dir',
                            type=str,
                            default=os.path.join(data_dir,
                                                 'results/output_reconstruct'))
        parser.add_argument('-b', '--batch-size', type=int, default=64)
        parser.add_argument('-n', '--train-num', type=int, default=5000)
        args = parser.parse_args()
    else:
        args = _args

    os.makedirs(args.output_dir, exist_ok=True)

    model = nn.Sequential(Encoder(), Decoder(args.template_mesh)).cuda()

    if args.load:
        model.load_state_dict(
            torch.load(
                os.path.join(args.output_dir, 'v1-%d.pth' % args.train_num)))

    renderer = sr.SoftRenderer(image_size=64,
                               sigma_val=1e-4,
                               aggr_func_rgb='hard',
                               camera_mode='look_at',
                               viewing_angle=15)

    # read training images and camera poses
    dataset_train = ShapeNet(os.path.join(data_dir, 'dataset'),
                             args.classes.split(','), 'train')
    dataset_val = ShapeNet(os.path.join(data_dir, 'dataset'),
                           args.classes.split(','), 'val')
    optimizer = torch.optim.Adam(model.parameters(),
                                 0.0001,
                                 betas=(0.9, 0.999))

    train_num = args.train_num
    if not args.load:
        writer = imageio.get_writer(os.path.join(args.output_dir, 'train.gif'),
                                    mode='I')

        loop = tqdm.tqdm(list(range(0, train_num)))
        Loss_list = np.zeros(train_num)
        for i in loop:
            images, distances, elevations, viewpoints = dataset_train.get_random_batch(
                args.batch_size)
            images_gt = images.cuda()

            mesh, laplacian_loss, flatten_loss = model(images_gt)
            renderer.transform.set_eyes_from_angles(distances, elevations,
                                                    viewpoints)
            images_pred = renderer.render_mesh(mesh)

            # optimize mesh with silhouette reprojection error and
            # geometry constraints
            loss = neg_iou_loss(images_pred[:, 3], images_gt[:, 3]) + \
                   0.05 * laplacian_loss + \
                   0.001 * flatten_loss

            Loss_list[i] = loss
            loop.set_description('Loss: %.4f' % (loss.item()))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i % 100 == 0:
                image = images_pred.detach().cpu().numpy()[0].transpose(
                    (1, 2, 0))
                writer.append_data((255 * image).astype(np.uint8))
                imageio.imsave(
                    os.path.join(args.output_dir, 'deform_%05d.png' % i),
                    (255 * image[..., -1]).astype(np.uint8))

        torch.save(model.state_dict(),
                   os.path.join(args.output_dir, 'v1-%d.pth' % train_num))
        fig = plt.figure()
        x = range(0, train_num)
        y = Loss_list
        plt.plot(x, y)
        plt.xlabel('Iteration')
        plt.ylabel('Test loss')
        plt.show()
        fig.savefig("loss_original.jpg")
        np.set_printoptions(threshold=np.inf)
        print(Loss_list)

    val_iou_loss = 0
    for i in tqdm.tqdm(range(0, dataset_val.images.shape[0], 24)):
        val_imgs = torch.from_numpy(
            dataset_val.images[i:i + 24].astype('float32') / 255.)
        val_imgs = val_imgs.cuda()
        val_distances = torch.ones(val_imgs.size(0)) * dataset_val.distance
        val_elevations = torch.ones(val_imgs.size(0)) * dataset_val.elevation
        val_viewpoint_ids = torch.ones(24)
        for v in range(24):
            val_viewpoint_ids[v] = v

        val_mesh, val_laplacian_loss, val_flatten_loss = model(val_imgs)
        renderer.transform.set_eyes_from_angles(val_distances, val_elevations,
                                                -val_viewpoint_ids * 15)
        val_images_pred = renderer.render_mesh(val_mesh)
        val_iou_loss += neg_iou_loss(val_images_pred[:, 3],
                                     val_imgs[:, 3]).item()
    print('Val IOU loss: ', val_iou_loss / (dataset_val.images.shape[0] / 24))

    # save optimized mesh
    model(dataset_val.get_val(args.classes, 0,
                              4).cuda())[0].save_obj(os.path.join(
                                  args.output_dir, '1.obj'),
                                                     save_texture=False)
    model(dataset_val.get_val(args.classes, 1,
                              16).cuda())[0].save_obj(os.path.join(
                                  args.output_dir, '2.obj'),
                                                      save_texture=False)
    model(dataset_val.get_val(args.classes, 2,
                              8).cuda())[0].save_obj(os.path.join(
                                  args.output_dir, '3.obj'),
                                                     save_texture=False)
    model(dataset_val.get_val(args.classes, 3,
                              20).cuda())[0].save_obj(os.path.join(
                                  args.output_dir, '4.obj'),
                                                      save_texture=False)

    img1 = dataset_val.get_val(args.classes, 0,
                               4).detach().cpu().numpy()[0].transpose(
                                   (1, 2, 0))
    img2 = dataset_val.get_val(args.classes, 1,
                               16).detach().cpu().numpy()[0].transpose(
                                   (1, 2, 0))
    img3 = dataset_val.get_val(args.classes, 2,
                               8).detach().cpu().numpy()[0].transpose(
                                   (1, 2, 0))
    img4 = dataset_val.get_val(args.classes, 3,
                               20).detach().cpu().numpy()[0].transpose(
                                   (1, 2, 0))
    # imageio.imsave(os.path.join(args.output_dir, '233.png'), (255 * img233[..., -1]).astype(np.uint8))
    # imageio.imsave(os.path.join(args.output_dir, '235.png'), (255 * img235[..., -1]).astype(np.uint8))
    imageio.imsave(os.path.join(args.output_dir, '1.png'),
                   (255 * img1).astype(np.uint8))
    imageio.imsave(os.path.join(args.output_dir, '2.png'),
                   (255 * img2).astype(np.uint8))
    imageio.imsave(os.path.join(args.output_dir, '3.png'),
                   (255 * img3).astype(np.uint8))
    imageio.imsave(os.path.join(args.output_dir, '4.png'),
                   (255 * img4).astype(np.uint8))
Example #54
0
def eval_one_epoch(sess, ops, num_votes=1, topk=1):
    error_cnt = 0
    is_training = False
    total_correct = 0
    total_seen = 0
    loss_sum = 0
    total_seen_class = [0 for _ in range(NUM_CLASSES)]
    total_correct_class = [0 for _ in range(NUM_CLASSES)]
    fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')
    for fn in range(len(TEST_FILES)):
        log_string('----'+str(fn)+'----')
        file_names = [line.strip() for line in open(TEST_FILES[fn])]
        current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
        current_data = current_data[:,0:NUM_POINT,:]
        current_label = np.squeeze(current_label)
        print(current_data.shape)
        
        file_size = current_data.shape[0]
        num_batches = file_size // BATCH_SIZE
        print(file_size)
        
        for batch_idx in range(num_batches):
            start_idx = batch_idx * BATCH_SIZE
            end_idx = (batch_idx+1) * BATCH_SIZE
            cur_batch_size = end_idx - start_idx
            
            # Aggregating BEG
            batch_loss_sum = 0 # sum of losses for the batch
            batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES)) # score for classes
            batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes
            for vote_idx in range(num_votes):
                rotated_data = provider.rotate_point_cloud_by_angle(current_data[start_idx:end_idx, :, :],
                                                  vote_idx/float(num_votes) * np.pi * 2)
                feed_dict = {ops['pointclouds_pl']: rotated_data,
                             ops['labels_pl']: current_label[start_idx:end_idx],
                             ops['is_training_pl']: is_training}
                loss_val, pred_val = sess.run([ops['loss'], ops['pred']],
                                          feed_dict=feed_dict)
                batch_pred_sum += pred_val
                batch_pred_val = np.argmax(pred_val, 1)
                for el_idx in range(cur_batch_size):
                    batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1
                batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))
            # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]
            # pred_val = np.argmax(batch_pred_classes, 1)
            pred_val = np.argmax(batch_pred_sum, 1)
            # Aggregating END
            
            correct = np.sum(pred_val == current_label[start_idx:end_idx])
            # correct = np.sum(pred_val_topk[:,0:topk] == label_val)
            total_correct += correct
            total_seen += cur_batch_size
            loss_sum += batch_loss_sum

            for i in range(start_idx, end_idx):
                l = current_label[i]
                total_seen_class[l] += 1
                total_correct_class[l] += (pred_val[i-start_idx] == l)
                fout.write('%s, %d, %d\n' % (file_names[i], pred_val[i-start_idx], l))
                
                if pred_val[i-start_idx] != l and FLAGS.visu: # ERROR CASE, DUMP!
                    img_filename = '%d_label_%s_pred_%s.jpg' % (error_cnt, SHAPE_NAMES[l],
                                                           SHAPE_NAMES[pred_val[i-start_idx]])
                    img_filename = os.path.join(DUMP_DIR, img_filename)
                    output_img = pc_util.point_cloud_three_views(np.squeeze(current_data[i, :, :]))
                    imageio.imsave(img_filename, output_img)
                    error_cnt += 1
                
    log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('eval accuracy: %f' % (total_correct / float(total_seen)))
    log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
    
    class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)
    for i, name in enumerate(SHAPE_NAMES):
        log_string('%10s:\t%0.3f' % (name, class_accuracies[i]))
Example #55
0
def main():

    # This program defines a single-stage imaging pipeline that
    # brightens an image.

    # First we'll load the input image we wish to brighten.
    image_path = os.path.join(os.path.dirname(__file__),
                              "../../tutorial/images/rgb.png")

    # We create a hl.Buffer object to wrap the numpy array
    input = hl.Buffer(imageio.imread(image_path))
    assert input.type() == hl.UInt(8)

    # Next we define our hl.Func object that represents our one pipeline
    # stage.
    brighter = hl.Func("brighter")

    # Our hl.Func will have three arguments, representing the position
    # in the image and the color channel. Halide treats color
    # channels as an extra dimension of the image.
    x, y, c = hl.Var("x"), hl.Var("y"), hl.Var("c")

    # Normally we'd probably write the whole function definition on
    # one line. Here we'll break it apart so we can explain what
    # we're doing at every step.

    # For each pixel of the input image.
    value = input[x, y, c]
    assert type(value) == hl.Expr

    # Cast it to a floating point value.
    value = hl.cast(hl.Float(32), value)

    # Multiply it by 1.5 to brighten it. Halide represents real
    # numbers as floats, not doubles, so we stick an 'f' on the end
    # of our constant.
    value = value * 1.5

    # Clamp it to be less than 255, so we don't get overflow when we
    # hl.cast it back to an 8-bit unsigned int.
    value = hl.min(value, 255.0)

    # Cast it back to an 8-bit unsigned integer.
    value = hl.cast(hl.UInt(8), value)

    # Define the function.
    brighter[x, y, c] = value

    # The equivalent one-liner to all of the above is:
    #
    # brighter(x, y, c) = hl.cast<uint8_t>(hl.min(input(x, y, c) * 1.5f, 255))
    # brighter[x, y, c] = hl.cast(hl.UInt(8), hl.min(input[x, y, c] * 1.5, 255))
    #
    # In the shorter version:
    # - I skipped the hl.cast to float, because multiplying by 1.5f does
    #   that automatically.
    # - I also used integer constants in hl.clamp, because they get hl.cast
    #   to match the type of the first argument.
    # - I left the h. off hl.clamp. It's unnecessary due to Koenig
    #   lookup.

    # Remember. All we've done so far is build a representation of a
    # Halide program in memory. We haven't actually processed any
    # pixels yet. We haven't even compiled that Halide program yet.

    # So now we'll realize the hl.Func. The size of the output image
    # should match the size of the input image. If we just wanted to
    # brighten a portion of the input image we could request a
    # smaller size. If we request a larger size Halide will throw an
    # error at runtime telling us we're trying to read out of bounds
    # on the input image.
    output_image = brighter.realize(input.width(), input.height(),
                                    input.channels())
    assert output_image.type() == hl.UInt(8)

    # Save the output for inspection. It should look like a bright parrot.
    # python3-imageio versions <2.5 expect a numpy array
    imageio.imsave("brighter.png", np.asanyarray(output_image))
    print("Created brighter.png result file.")

    print("Success!")
    return 0
Example #56
0
def main():

    parser = optparse.OptionParser()
    parser.add_option(
        '-a',
        '--angle',
        type='float',
        dest='angle',
        default=None,
        help='angulo de rotação medido em graus no sentido anti-horário')
    parser.add_option('-e',
                      '--scale',
                      type='float',
                      dest='scale',
                      default=None,
                      help='fator de escala')
    parser.add_option('-d',
                      '--dimension',
                      type='int',
                      dest='dimension',
                      default=None,
                      help='dimensão da imagem de saída em pixels',
                      nargs=2)
    parser.add_option('-i',
                      '--input',
                      type='string',
                      dest='input',
                      default=None,
                      help='caminho da imagem de entrada')
    parser.add_option('-o',
                      '--output',
                      type='string',
                      dest='output',
                      default="out.png",
                      help='caminho da imagem de entrada')
    parser.add_option('-m',
                      '--mode',
                      type='choice',
                      choices=['0', '1', '2', '3'],
                      dest='mode',
                      default=0,
                      help='interpolacao a ser usada')
    (options, args) = parser.parse_args()

    img = imageio.imread(options.input)

    if (options.angle != None):
        saida = interpolacoes[int(options.mode)].rotacao(img, options.angle)
    elif (options.scale != None):
        saida = interpolacoes[int(options.mode)].interpolacao(
            img, options.scale)
    elif (options.dimension != None):
        saida = interpolacoes[int(options.mode)].escala(
            img, options.dimension[0], options.dimension[1])

    imageio.imsave(options.output, saida)

    plt.imshow(img, cmap="gray")
    plt.title("Imagem de Entrada")
    plt.show()
    plt.imshow(saida, cmap="gray")
    plt.title("Imagem de Saida")
    plt.show()
            valid_outputs_list, valid_loss_list = [], []
            for v_i, v_data in enumerate(valid_loader):
                model.set_input(v_data)
                model.test()
                valid_outputs_list.append(model.get_current_np_outputs())
                valid_loss_list.append(model.get_current_losses())
            model.train()
            valid_mean_loss = calculate_mean_loss(valid_loss_list)
            valid_summary.add_summary(valid_mean_loss, global_step=epoch)
            print('mean loss', 'train:', train_mean_loss, 'valid',
                  valid_mean_loss)

            # save sample to file
            valid_outputs_list = np.concatenate(valid_outputs_list, axis=0)
            for i in range(len(valid_outputs_list)):
                saving_img = np.hstack(
                    [zscore2(_) for _ in valid_outputs_list[i]])
                saving_img = np.array(imnorm(saving_img) * 255.0,
                                      dtype=np.uint8)
                fitswrite(
                    '%s/epoch_%d__%d.fits' % (args.samples_dir, epoch, i),
                    valid_outputs_list[i])
                imsave('%s/epoch_%d__%d.jpg' % (args.samples_dir, epoch, i),
                       saving_img)

        # **************************    save model    ************************** #
        if epoch % args.save_model_freq == 0:
            model.save_networks(epoch)

        model.update_learning_rate(epoch)
Example #58
0
def main():
    teeth_data = TeethBoxesDataset()

    # Define paths
    train_index_file = os.path.join(data_path, 'train/index.txt')
    if not os.path.exists(train_index_file):
        open(train_index_file, 'a').close()

    val_index_file = os.path.join(data_path, 'val/index.txt')
    if not os.path.exists(val_index_file):
        open(val_index_file, 'a').close()

    test_index_file = os.path.join(data_path, 'test/index.txt')
    if not os.path.exists(test_index_file):
        open(test_index_file, 'a').close()

    # Split to train/val/test sets
    ratio_list = np.asarray([0.8, 0.15, 0.05])
    split_nums = (teeth_data.bb_size * ratio_list).astype(np.int)
    left_over = teeth_data.bb_size - np.sum(split_nums)
    split_nums[0] = split_nums[0] + left_over
    assert (np.sum(split_nums) == teeth_data.bb_size)

    # Make randomized splits to make the data
    # Calculate the number of bounding boxes
    nums = [x for x in range(teeth_data.bb_size)]
    random.shuffle(nums)

    # Total bounding boxes in the dataset
    train_nums = nums[:split_nums[0]]
    val_nums = nums[split_nums[0]:np.sum(split_nums[:2])]
    test_nums = nums[np.sum(split_nums[:2]):np.sum(split_nums)]
    print('Splits divided into', len(train_nums), len(val_nums),
          len(test_nums))

    out_index = 0
    train_index = 0
    val_index = 0
    test_index = 0

    max_width = 0
    max_height = 0

    for i in range(teeth_data.size):
        # for i in range(5):
        if VISUALIZE:
            fig = plt.figure()

        index = i
        print("Processing Image #" + str(index))

        # work with the first instances of images
        im = teeth_data.get_image(index)
        print("Image size: ", im.shape)
        boxes = teeth_data.get_boxes(index)

        print("Number of bounding boxes: " + str(boxes.shape[0]))
        inf_boxes = copy.deepcopy(boxes)

        for b in range(boxes.shape[0]):
            # Inflate the bounding box region to get back/fore ground
            box_range = boxes[b, :]
            inf_range = inf_box_range(inf_boxes[b, :], width_inf, height_inf)

            # Check for whether the inflated range is within the network size
            assert (inf_range[0] > 0 and inf_range[1] > 0)
            assert (inf_range[2] < 512 and inf_range[3] < 512)

            # image i axis is axis 2 for numpy
            assert (inf_range[0] + inf_range[2] < im.shape[1])
            # image j axis is axis 1 for numpy
            assert (inf_range[1] + inf_range[3] < im.shape[0])

            if VISUALIZE:
                ax = fig.add_subplot(2, 2, 1)
                plt.imshow(im)
                rect = patches.Rectangle((box_range[0], box_range[1]),
                                         box_range[2],
                                         box_range[3],
                                         linewidth=1,
                                         edgecolor='r',
                                         facecolor='none')
                ax.add_patch(rect)

                rect = patches.Rectangle((inf_range[0], inf_range[1]),
                                         inf_range[2],
                                         inf_range[3],
                                         linewidth=1,
                                         edgecolor='g',
                                         facecolor='none')
                ax.add_patch(rect)
                plt.draw()

            # Save the image as the part of the dataset
            cropped_im = crop_im(im, inf_range)
            # print(cropped_im.shape)
            cropped_im = cv2.cvtColor(cropped_im, cv2.COLOR_BGR2GRAY)
            cropped_im = cv2.equalizeHist(cropped_im)
            cropped_im = cv2.cvtColor(cropped_im, cv2.COLOR_GRAY2BGR)
            # print(cropped_im.shape)

            width = inf_range[2]
            height = inf_range[3]
            if max_width < width:
                max_width = width
            if max_height < height:
                max_height = height

            # print(width, length)

            # warp the image to a fixed size
            im_out = cv2.resize(cropped_im, dsize=OUT_IM_SIZE)
            # print(im_out.shape)

            # Save the bounding box as the label image
            la_out = np.zeros(
                (cropped_im.shape[0], cropped_im.shape[1])).astype(np.uint8)
            width = box_range[2]
            length = box_range[3]
            new_x, new_y = math.floor((cropped_im.shape[1] - width) / 2), \
                           math.floor((cropped_im.shape[0] - length) / 2)
            end_x, end_y = new_x + width, new_y + length
            la_out[new_y:end_y, new_x:end_x] = 255
            la_out = cv2.resize(la_out,
                                dsize=OUT_IM_SIZE,
                                interpolation=cv2.INTER_AREA)
            # print(la_out.shape)

            ### -------- Saving boxes -------- ###
            if out_index in train_nums:
                imageio.imsave(train_im_path + str(train_index) + '.png',
                               im_out)
                imageio.imsave(train_la_path + str(train_index) + '.png',
                               la_out)

                with open(train_index_file, 'ab') as f:
                    np.savetxt(f, [(1, i, b)], delimiter=',', fmt='%d')

                train_index += 1
            elif out_index in val_nums:
                imageio.imsave(val_im_path + str(val_index) + '.png', im_out)
                imageio.imsave(val_la_path + str(val_index) + '.png', la_out)

                with open(val_index_file, 'ab') as f:
                    np.savetxt(f, [(1, i, b)], delimiter=',', fmt='%d')

                val_index += 1
            else:
                imageio.imsave(test_im_path + str(test_index) + '.png', im_out)
                imageio.imsave(test_la_path + str(test_index) + '.png', la_out)

                with open(test_index_file, 'ab') as f:
                    np.savetxt(f, [(1, i, b)], delimiter=',', fmt='%d')

                test_index += 1

            out_index += 1
            # print(out_index)

            if VISUALIZE:
                # Display cropped image
                ax = fig.add_subplot(2, 2, 2)
                plt.imshow(im_out)

                # Display label
                la_out = np.expand_dims(la_out, axis=2)
                la_out = np.repeat(la_out, 3, axis=2)
                # print(la_out.shape)
                ax = fig.add_subplot(2, 2, 3)
                plt.imshow(la_out)

                # Display image with label
                ax = fig.add_subplot(2, 2, 4)
                la_out = (la_out > 128).astype(bool)
                masked_im = im_out[:, :, :] * la_out
                plt.imshow(masked_im)
                plt.pause(1)

        # print(max_width, max_height)
        if VISUALIZE:
            plt.close(fig)
Example #59
0

#load model and weights
model = get_model(MODEL, args.dataset, N_CLASSES, N_BANDS, PATCH_SIZE)
print('Loading weights from %s' % WEIGHTS + '/model_best.pth')
model = model.to(device)
model.load_state_dict(torch.load(WEIGHTS + '/model_best.pth'))
model.eval()

#testing model
probabilities = test(model, WEIGHTS, img, PATCH_SIZE, N_CLASSES, device=device)
prediction = np.argmax(probabilities, axis=-1)

run_results = metrics(prediction, gt, n_classes=N_CLASSES)

prediction[gt < 0] = -1

#color results
colored_gt = color_results(gt + 1, palette)
colored_pred = color_results(prediction + 1, palette)

outfile = os.path.join(OUTPUT, DATASET, MODEL)
os.makedirs(outfile, exist_ok=True)

imageio.imsave(os.path.join(outfile, DATASET + '_gt.png'), colored_gt)
imageio.imsave(os.path.join(outfile, DATASET + '_' + MODEL + '_out.png'),
               colored_pred)

show_results(run_results, label_values=LABEL_VALUES)
del model
Example #60
0
def save_linear_image(path):
	with rawpy.imread(path) as raw:
		rgb = raw.postprocess(gamma=(1, 1), no_auto_bright=True, output_bps=16)
	imageio.imsave('samples/linear.tiff', rgb)