コード例 #1
0
    def interpolate_full(self, img0, img1, interp=serp, x0=-0.5, x1=1.5, delta=1/32.0):
        """Return a visualization of an interpolation between img0 and img1,
        using interpolation method interp.  The interpolation starts
        with parameter x0 and goes to x1, in increments of delta.
        Note that img0 corresponds to parameter x0=0 and img1 to
        parameter x1=1.  The default is to start outside that range,
        and so we do some extrapolation.

        """
        z0 = self.get_lv(img0).reshape((100,))
        z1 = self.get_lv(img1).reshape((100,))
        ps = np.arange(x0, x1-0.000001, delta)
        n = ps.size
        arrays = [lerp(z0, z1, p) for p in ps]
        z = np.stack(arrays); print z.shape
        zmb = floatX(z[0 : n, :]); print zmb.shape
        xmb = self.model._gen(zmb); print xmb.shape
        samples = [xmb]
        samples = np.concatenate(samples, axis=0)
        samples = self.model.inverse_transform(
            samples, npx=self.model.npx, nc=self.model.nc)
        samples = (samples * 255).astype(np.uint8)
        m = math.ceil(math.sqrt(n))
        img_vis = utils.grid_vis(samples, m, m)
        return img_vis
コード例 #2
0
ファイル: gui_vis.py プロジェクト: Larry-u/iGAN
    def update_vis(self):
        ims = self.opt_engine.get_images(self.frame_id)

        if ims is not None:
            self.ims = ims

        if self.ims is None:
            return

        ims_show = []
        n_imgs = self.ims.shape[0]
        for n in range(n_imgs):
            # im = ims[n]
            im_s = cv2.resize(self.ims[n], (self.width, self.width),
                              interpolation=cv2.INTER_CUBIC)
            if n == self.select_id and self.topK > 1:
                t = 3  # thickness
                cv2.rectangle(im_s, (t, t), (self.width - t, self.width - t),
                              (0, 255, 0), t)
            im_s = im_s[np.newaxis, ...]
            ims_show.append(im_s)
        if ims_show:
            ims_show = np.concatenate(ims_show, axis=0)
            g_tmp = utils.grid_vis(ims_show, self.grid_size[1],
                                   self.grid_size[0])  # (nh, nw)
            self.vis_results = g_tmp.copy()
            self.update()
コード例 #3
0
def interpolate(url0, url1, output_image):
    model_class = locate('model_def.dcgan_theano')
    model_file = './models/handbag_64.dcgan_theano'
    model = model_class.Model(
        model_name="handbag_64", model_file=model_file)
    # save images
    for j, url in enumerate([url0, url1]):
        r = requests.get(url)
        i = Image.open(StringIO(r.content))
        i.save("pics/url"+str(j)+".jpg")
    z0 = iGAN_predict.find_latent(url=url0).reshape((100,))
    z1 = iGAN_predict.find_latent(url=url1).reshape((100,))
    delta = 1.0/32.0
    arrays = [p*z0+(1-p)*z1 for p in np.arange(-16*delta, 1+16*delta-0.0001, delta)]
    z = np.stack(arrays)
    print(z.shape)
    zmb = floatX(z[0 : 64, :])
    xmb = model._gen(zmb)
    samples = [xmb]
    samples = np.concatenate(samples, axis=0)
    print(samples.shape)
    samples = model.inverse_transform(samples, npx=model.npx, nc=model.nc)
    samples = (samples * 255).astype(np.uint8)
    # generate grid visualization
    im_vis = utils.grid_vis(samples, 8, 8)
    # write to the disk
    im_vis = cv2.cvtColor(im_vis, cv2.COLOR_BGR2RGB)
    cv2.imwrite(output_image, im_vis)
コード例 #4
0
 def imagify(self, z):
     """Return an image corresponding to the latent vector z."""
     z = np.stack([z.reshape((100,))])
     zmb = floatX(z[0 : 1, :]);
     xmb = self.model._gen(zmb);
     samples = np.concatenate([xmb], axis=0)
     samples = self.model.inverse_transform(
         samples, npx=self.model.npx, nc=self.model.nc)
     samples = (samples * 255).astype(np.uint8)
     img_vis = utils.grid_vis(samples, 1, 1)
     return img_vis
コード例 #5
0
def generate(z, output_image):
    model_class = locate('model_def.dcgan_theano')
    model_file = './models/handbag_64.dcgan_theano'
    model = model_class.Model(
        model_name="handbag_64", model_file=model_file)
    samples = []
    n = 1
    batch_size = 1
    z = z.reshape((1, 100))
    zmb = floatX(z[0 : n, :])
    xmb = model._gen(zmb)
    samples.append(xmb)
    samples = np.concatenate(samples, axis=0)
    samples = model.inverse_transform(samples, npx=model.npx, nc=model.nc)
    samples = (samples * 255).astype(np.uint8)
    #samples = model.gen_samples(z0=None, n=196, batch_size=49, use_transform=True)
    # generate grid visualization
    im_vis = utils.grid_vis(samples, 1, 1)
    # write to the disk
    im_vis = cv2.cvtColor(im_vis, cv2.COLOR_BGR2RGB)
    cv2.imwrite(output_image, im_vis)
コード例 #6
0
ファイル: gui_vis.py プロジェクト: YuhangSong/iGAN
    def update_vis(self):
        ims = self.opt_engine.get_images(self.frame_id)

        if ims is not None:
            self.ims = ims

        if self.ims is None:
            return

        ims_show = []
        n_imgs = self.ims.shape[0]
        for n in range(n_imgs):
            # im = ims[n]
            im_s = cv2.resize(self.ims[n], (self.width, self.width), interpolation=cv2.INTER_CUBIC)
            if n == self.select_id and self.topK > 1:
                t = 3  # thickness
                cv2.rectangle(im_s, (t, t), (self.width - t, self.width - t), (0, 255, 0), t)
            im_s = im_s[np.newaxis, ...]
            ims_show.append(im_s)
        if ims_show:
            ims_show = np.concatenate(ims_show, axis=0)
            g_tmp = utils.grid_vis(ims_show, self.grid_size[1], self.grid_size[0]) # (nh, nw)
            self.vis_results = g_tmp.copy()
            self.update()
コード例 #7
0
# load data from hdf5 file
tr_data, te_data, tr_stream, te_stream, ntrain, ntest = load.load_imgs(
    ntrain=None,
    ntest=None,
    batch_size=args.batch_size,
    data_file=args.data_file)
te_handle = te_data.open()
test_x, = te_data.get_data(te_handle, slice(0, ntest))

# generate real samples and test transform/inverse_transform
test_x = train_dcgan_utils.transform(test_x, nc=nc)
vis_idxs = py_rng.sample(np.arange(len(test_x)), n_vis)
vaX_vis = train_dcgan_utils.inverse_transform(test_x[vis_idxs], npx=npx, nc=nc)
# st()
n_grid = int(np.sqrt(n_vis))
grid_real = utils.grid_vis((vaX_vis * 255.0).astype(np.uint8), n_grid, n_grid)
train_dcgan_utils.save_image(grid_real,
                             os.path.join(sample_dir, 'real_samples.png'))

# define DCGAN model
disc_params = train_dcgan_utils.init_disc_params(n_f=n_f,
                                                 n_layers=n_layers,
                                                 nc=nc)
gen_params = train_dcgan_utils.init_gen_params(nz=nz,
                                               n_f=n_f,
                                               n_layers=n_layers,
                                               nc=nc)
x = T.tensor4()
z = T.matrix()

gx = train_dcgan_utils.gen(z, gen_params, n_layers=n_layers, n_f=n_f, nc=nc)
コード例 #8
0
ファイル: generate_samples.py プロジェクト: YuhangSong/iGAN
    parser.add_argument('--model_type', dest='model_type', help='the generative models and its deep learning framework', default='dcgan_theano', type=str)
    parser.add_argument('--framework', dest='framework', help='deep learning framework', default='theano')
    parser.add_argument('--model_file', dest='model_file', help='the file that stores the generative model', type=str, default=None)
    parser.add_argument('--output_image', dest='output_image', help='the name of output image', type=str, default=None)

    args = parser.parse_args()
    return args

if __name__ == '__main__':
    args = parse_args()
    if not args.model_file:  #if model directory is not specified
        args.model_file = './models/%s.%s' % (args.model_name, args.model_type)

    if not args.output_image:
        args.output_image = '%s_%s_samples.png' % (args.model_name, args.model_type)

    for arg in vars(args):
        print('[%s] =' % arg, getattr(args, arg))

    # initialize model and constrained optimization problem
    model_class = locate('model_def.%s' % args.model_type)
    model = model_class.Model(model_name=args.model_name, model_file=args.model_file)
    # generate samples
    samples = model.gen_samples(z0=None, n=196, batch_size=49, use_transform=True)
    # generate grid visualization
    im_vis = utils.grid_vis(samples, 14, 14)
    # write to the disk
    im_vis = cv2.cvtColor(im_vis, cv2.COLOR_BGR2RGB)
    cv2.imwrite(args.output_image, im_vis)
    print('samples_shape', samples.shape)
    print('save image to %s' % args.output_image)
コード例 #9
0
ファイル: train_dcgan.py プロジェクト: YuhangSong/iGAN
web_dir = os.path.join(args.cache_dir, 'web_dcgan')
html = image_save.ImageSave(web_dir, expr_name, append=True)
utils.mkdirs([sample_dir, model_dir, log_dir, web_dir])

# load data from hdf5 file
tr_data, te_data, tr_stream, te_stream, ntrain, ntest = load.load_imgs(ntrain=None, ntest=None, batch_size=args.batch_size,data_file=args.data_file)
te_handle = te_data.open()
test_x, = te_data.get_data(te_handle, slice(0, ntest))

# generate real samples and test transform/inverse_transform
test_x = train_dcgan_utils.transform(test_x, nc=nc)
vis_idxs = py_rng.sample(np.arange(len(test_x)), n_vis)
vaX_vis = train_dcgan_utils.inverse_transform(test_x[vis_idxs], npx=npx, nc=nc)
# st()
n_grid = int(np.sqrt(n_vis))
grid_real = utils.grid_vis((vaX_vis*255.0).astype(np.uint8), n_grid, n_grid)
train_dcgan_utils.save_image(grid_real, os.path.join(sample_dir, 'real_samples.png'))


# define DCGAN model
disc_params = train_dcgan_utils.init_disc_params(n_f=n_f, n_layers=n_layers, nc=nc)
gen_params = train_dcgan_utils.init_gen_params(nz=nz, n_f=n_f, n_layers=n_layers, nc=nc)
x = T.tensor4()
z = T.matrix()

gx = train_dcgan_utils.gen(z, gen_params, n_layers=n_layers, n_f=n_f, nc=nc)
p_real = train_dcgan_utils.discrim(x, disc_params, n_layers=n_layers)
p_gen = train_dcgan_utils.discrim(gx, disc_params, n_layers=n_layers)

d_cost_real = costs.bce(p_real, T.ones(p_real.shape))
d_cost_gen = costs.bce(p_gen, T.zeros(p_gen.shape))
コード例 #10
0
    return args


if __name__ == '__main__':
    args = parse_args()
    if not args.model_file:  #if model directory is not specified
        args.model_file = './models/%s.%s' % (args.model_name, args.model_type)

    if not args.output_image:
        args.output_image = '%s_%s_samples.png' % (args.model_name,
                                                   args.model_type)

    for arg in vars(args):
        print('[%s] =' % arg, getattr(args, arg))

    # initialize model and constrained optimization problem
    model_class = locate('model_def.%s' % args.model_type)
    model = model_class.Model(model_name=args.model_name,
                              model_file=args.model_file)
    # generate samples
    samples = model.gen_samples(z0=None,
                                n=196,
                                batch_size=49,
                                use_transform=True)
    # generate grid visualization
    im_vis = utils.grid_vis(samples, 14, 14)
    # write to the disk
    im_vis = cv2.cvtColor(im_vis, cv2.COLOR_BGR2RGB)
    cv2.imwrite(args.output_image, im_vis)
    print('samples_shape', samples.shape)
    print('save image to %s' % args.output_image)