def main_demo(env, func):
    df = iter(make_dataflow_demo(env))
    nr_samples = get_env('demo.nr_samples', 40 * 8)
    grid_desc = get_env('demo.grid_desc', ('20v', '16h'))

    while True:
        all_imgs_ab = []
        all_imgs_ba = []
        for i in range(nr_samples):
            feed_dict = next(df)
            results = func(**feed_dict)
            img_a, img_b = feed_dict['img_a'][0], feed_dict['img_b'][0]
            img_ab, img_ba = results['img_ab'][0] * 255, results['img_ba'][
                0] * 255
            img_aba, img_bab = results['img_aba'][0] * 255, results['img_bab'][
                0] * 255

            all_imgs_ab.append(np.hstack([img_a, img_ab]).astype('uint8'))
            all_imgs_ba.append(np.hstack([img_b, img_ba]).astype('uint8'))

        all_imgs_ab = image.image_grid(all_imgs_ab, grid_desc)
        all_imgs_ba = image.image_grid(all_imgs_ba, grid_desc)
        sep = np.ones((all_imgs_ab.shape[0], 64, 3), dtype='uint8') * 255
        all_imgs = np.hstack([all_imgs_ab, sep, all_imgs_ba])
        image.imwrite('discogan.png', all_imgs)
        image.imshow('AtoB; BtoA', all_imgs)
Example #2
0
def batch_show(batch,
               nr_show=16,
               grid_desc=('4v', '4h'),
               resize=(600, 800),
               title='batch_show'):
    """
    Show a batch of images.

    :param batch: The batched data: can be either a ndarray of shape (batch_size, h, w, c) or a list
    of images.
    :param nr_show: Number of images to be displayed. Default set to be 16.
    :param grid_desc: Grid description. See `tartist.image.image_grid` for details.
    :param resize: Resize factor, a tuple (min_dim, max_dim).
    :param title: The title of the shown window.
    """

    batch = batch[:nr_show]
    batch = np.array(batch)

    if len(batch) < 16:
        batch = np.concatenate([
            batch,
            np.zeros([
                16 - len(batch), batch.shape[1], batch.shape[2], batch.shape[3]
            ],
                     dtype=batch.dtype)
        ],
                               axis=0)

    img = image.image_grid(batch, grid_desc)
    img = image.resize_minmax(img, *resize, interpolation='NEAREST')
    image.imshow(title, img)
Example #3
0
def main():
    m = rl.custom.MazeEnv(enable_noaction=False, visible_size=7)

    m.restart()
    demo = [r(m)]
    for i in range(19):
        a = random.choice(4)
        m.action(a)
        demo.append(r(m))

    i = image.image_grid(demo, ['5v', '4h'])
    image.imshow('Maze', i)
def main_demo_infogan(env, func):
    net = env.network
    samples = net.zc_distrib.numerical_sample(net.zc_distrib_num_prior)
    df = {'zc': samples.reshape(samples.shape[0], 1, -1)}
    df = flow.DictOfArrayDataFlow(df)

    all_outputs = []
    for data in tqdm.tqdm(df, total=len(df), **get_tqdm_defaults()):
        res = func(**data)
        all_outputs.append(res['output'][0, :, :, 0])

    grid_desc = get_env('demo.infogan.grid_desc')
    final = image.image_grid(all_outputs, grid_desc)
    final = (final * 255).astype('uint8')
    image.imwrite('infogan.png', final)
Example #5
0
def main():
    m = rl.custom.MazeEnv(map_size=15, enable_noaction=True, visible_size=None) 
    obstacles = itertools.chain(
            [(i, 7) for i in range(15) if i not in (3, 11)], 
            [(7, i) for i in range(15) if i not in (3, 11)]
    )
    m.restart(obstacles=obstacles, start_point=(3, 3), finish_point=(11, 11))

    demo = [r(m)]
    for i in range(19):
        a = random.choice(4)
        m.action(a)
        demo.append(r(m))
   
    i = image.image_grid(demo, ['5v', '4h'])
    image.imshow('Maze', i) 
Example #6
0
def demo_draw(feed_dict, result, extra_info):
    reconstruct = get_env('demo.is_reconstruct', False)
    grid_desc = get_env('demo.draw.grid_desc')

    all_outputs = []
    for i in range(1000):
        name = 'canvas_step{}'.format(i)
        if name in result:
            all_outputs.append(result[name][0, :, :, 0])

    final = image.image_grid(all_outputs, grid_desc)
    final = (final * 255).astype('uint8')

    if reconstruct:
        img = feed_dict['img'][0, :, :, 0]
        h = final.shape[0]
        w = int(img.shape[1] * h / img.shape[0])
        img = (img * 255).astype('uint8')
        img = image.resize(img, (h, w))
        final = np.hstack((img, final))

    final = image.resize_minmax(final, 480, 720)

    image.imshow('demo', final)