Exemplo n.º 1
0
def main():
    if (FLAGS.vector_opt == 0):
        z = np.zeros(shape=(FLAGS.batch_size, DIM))
    elif (FLAGS.vector_opt == 1):
        z = np.random.uniform(-0.5, 0.5, size=(FLAGS.batch_size, DIM))
    elif (FLAGS.vector_opt == 2):
        z = load_vector()
    elif (FLAGS.vector_opt == 3):
        z = gaussian_cf_sampler(np.load(FLAGS.vector))
    elif (FLAGS.vector_opt == 4):
        z = esm_cf_sampler(np.load(FLAGS.vector), 0.5)
    else:
        print(
            "Invalid value for vector_opt argument. 0-4 are the only acceptable values."
        )
        print("Use -h or --help flags for more information.")
        return

    np.save(
        "./prev_img_vector.npy",
        z)  # save our vectors to a file, so if we like one we can replicate
    network, tf_session = restore_model()

    if (FLAGS.gif):
        base_images = [
            np.load('./counterfactualGANs/base_vectors/%s' % i)
            for i in BASE_VECTORS
        ]
        utils.make_gif(base_images, 'bases.gif')
    else:
        generate_image(z, network, tf_session, VIZ_OPTION)
Exemplo n.º 2
0
def main():
    counter = 20
    rn = "golf"
    nz = 100
    parallel = True

    W = torch.load('runs%d/nets_%s/netZ_glo.pth' % (counter, rn))
    W = W['emb.weight'].data.cpu().numpy()

    netG = model_video_orig.netG_new(nz).cuda()
    if torch.cuda.device_count() > 1:
        parallel = True
        print("Using", torch.cuda.device_count(), "GPUs!")
        netG = nn.DataParallel(netG)

    Zs = utils.sample_gaussian(torch.from_numpy(W), 10000)
    Zs = Zs.data.cpu().numpy()

    state_dict = torch.load('runs%d/nets_%s/netG_glo.pth' % (counter, rn))
    netG.load_state_dict(state_dict)  # load the weights for generator (GLO)
    if parallel:
        netG = netG.module

    gmm = GaussianMixture(n_components=100,
                          covariance_type='full',
                          max_iter=100,
                          n_init=10)
    gmm.fit(W)

    z = torch.from_numpy(gmm.sample(100)[0]).float().cuda()
    video = netG(z)
    utils.make_gif(video, 'runs%d/ims_%s/sample' % (counter, rn), 16)
    return video
Exemplo n.º 3
0
def main():
    counter = 21
    rn = "golf"
    nz = 100
    parallel = True
    W = torch.load('runs%d/nets_%s/netZ_glo.pth' % (counter, rn))
    W = W['emb.weight'].data.cpu().numpy()

    netG = model_video_orig.netG_new(nz).cuda()


    if torch.cuda.device_count() > 1:
        parallel = True
        print("Using", torch.cuda.device_count(), "GPUs!")
        netG = nn.DataParallel(netG)

    state_dict = torch.load('runs%d/nets_%s/netG_glo.pth' % (counter, rn))
    netG.load_state_dict(state_dict) # load the weights for generator (GLO)

    if parallel:
        netG = netG.module


    d = 16
    nepoch = 200
    vaet = vae.VAETrainer(W, d)
    vaet.train_vae(nepoch)
    torch.save(vaet.vae.netVAE.state_dict(), 'runs%d/nets_%s/netVAE.pth' % (counter, rn))

    z = vaet.vae.netVAE.decode(torch.randn(100, d).cuda())
    video = netG(z)
    print("video shape is", video.shape)

    utils.make_gif(video, 'runs%d/ims_%s/sample' % (counter, rn), 16)
    return video
    def play(self, coord):

        total_steps = 0
        test_episode_count = 0

        print("Starting agent " + str(self.thread_id))
        while not coord.should_stop():

            episode_regret = 0
            episode_suboptimal_arm = 0
            episode_reward = [0 for _ in range(FLAGS.nb_actions)]
            episode_step_count = 0
            episode_frames = []
            d = False
            r = 0
            a = 0
            t = 0
            self.env.set(self.settings["envs"][test_episode_count])

            while not d:
                a = random.randint(0, FLAGS.nb_actions - 1)
                r, d, t = self.env.pull_arm(a)

                episode_frames.append(
                    set_image_bandit(episode_reward, self.env.get_bandit(), a,
                                     t))

                episode_regret += self.env.get_timestep_regret(a)
                optimal_action = self.env.get_optimal_arm()
                if optimal_action != a:
                    episode_suboptimal_arm += 1

                episode_reward[a] += r
                total_steps += 1
                episode_step_count += 1

            self.episode_rewards.append(np.sum(episode_reward))

            self.episodes_suboptimal_arms.append(episode_suboptimal_arm)
            self.episode_regrets.append(episode_regret)

            self.images = np.array(episode_frames)
            make_gif(self.images,
                     FLAGS.frames_test_dir + '/image' +
                     str(test_episode_count) + '.gif',
                     duration=len(self.images) * 0.1,
                     true_image=True)

            if test_episode_count == FLAGS.nb_test_episodes - 1:
                mean_regret = np.mean(self.episode_regrets)
                mean_nb_suboptimal_arms = np.mean(
                    self.episodes_suboptimal_arms)

                print("Mean regret for the model is {}".format(mean_regret))
                print("Regret in terms of suboptimal arms is {}".format(
                    mean_nb_suboptimal_arms))
                return 1

            test_episode_count += 1
Exemplo n.º 5
0
def train():
    tf.keras.backend.clear_session()
    with strategy.scope() if config['use_tpu'] else empty_context_mgr():

        model = GAN_PG(**config)

        # Define optimizers
        optimizer_g = tf.train.AdamOptimizer(
            learning_rate=config['learning_rate'], beta1=0.0)
        optimizer_d = tf.train.AdamOptimizer(
            learning_rate=config['learning_rate'], beta1=0.0)

        # Compile the model
        model.compile_model(optimizer_g=optimizer_g,
                            optimizer_d=optimizer_d,
                            loss=config['gan_mode'],
                            tpu_strategy=strategy,
                            resolver=resolver,
                            config=config['sess_config'])

        if config['restore']:
            model.load_weights('{}/weights'.format(config['folder']))

        # Prepare inputs
        inputs = (X_train, y_train) if config['conditional'] else X_train

        # Train
        for stage in config['train_stages']:
            # Get training stage num
            stage_num = config['train_stages'].index(stage)

            print(
                '\nProcessing stage: {}  with image size {} =========================================='
                .format(stage_num, stage['size']))

            # Define schedulers
            alpha_scheduler = Scheduler(stage['train_epochs'], [0, 0.5],
                                        [0, 1.0])
            learning_rate_scheduler = Scheduler(
                stage['train_epochs'], [0, 0.5],
                [stage['lr'] * 0.1, stage['lr']])

            model.fit_stage(inputs,
                            config['batch_size'],
                            stage_num=stage_num,
                            alpha_scheduler=alpha_scheduler,
                            learning_rate_scheduler=learning_rate_scheduler,
                            folder=config['folder'],
                            save_epoch=config['save_epoch'],
                            seed_noise=seed_noise,
                            seed_labels=seed_labels)

    make_gif(
        glob.iglob('{}/progress/*.png'.format(config['folder'])),
        '{}/progress/{}_{}.gif'.format(config['folder'], config['gan_mode'],
                                       'progress'))
def main():
    # initial values
    x, y = 1, 2

    gd = GradientDescent(x, y, parameters["SGD"])

    for step in range(0, parameters["n_epochs"]):
        gd.minimize()
    # save animation
    save_anim(parameters["n_epochs"], gd.x_hist, gd.y_hist, gd.z_hist, "SGD")
    make_gif(parameters["n_epochs"])
Exemplo n.º 7
0
    def generate_random_walking(self):
        """Generate fonts with random walking

        Generate fonts from random walking inputs.
        Results are changed gradually.
        """
        for c in self.embedding_chars:
            dst_dir = os.path.join(self.dst_walk, c)
            if not os.path.exists(dst_dir):
                os.mkdir(dst_dir)
        batch_n = (self.char_embedding_n * FLAGS.char_img_n) // self.batch_size
        c_ids = self.real_dataset.get_ids_from_labels(self.embedding_chars)
        for batch_i in tqdm(range(batch_n)):
            style_id_start = batch_i
            if batch_i == batch_n - 1:
                style_id_end = 0
            else:
                style_id_end = batch_i + 1
            generated_imgs = self.sess.run(
                self.generated_imgs,
                feed_dict={
                    self.style_ids_x:
                    np.ones(self.batch_size) * style_id_start,
                    self.style_ids_y:
                    np.ones(self.batch_size) * style_id_end,
                    self.style_ids_alpha:
                    np.repeat(
                        np.linspace(0., 1., num=self.walk_step,
                                    endpoint=False), self.char_embedding_n),
                    self.char_ids_x:
                    np.tile(c_ids, self.batch_size // self.char_embedding_n),
                    self.char_ids_y:
                    np.tile(c_ids, self.batch_size // self.char_embedding_n),
                    self.char_ids_alpha:
                    np.zeros(self.batch_size)
                })
            for img_i in range(generated_imgs.shape[0]):
                img = generated_imgs[img_i]
                img = (img + 1.) * 127.5
                pil_img = Image.fromarray(np.uint8(img))
                pil_img.save(
                    os.path.join(
                        self.dst_walk,
                        str(self.embedding_chars[img_i %
                                                 self.char_embedding_n]),
                        '{:05d}.png'.format(
                            (batch_i * self.batch_size + img_i) //
                            self.char_embedding_n)))
        print('making gif animations...')
        for i in range(self.char_embedding_n):
            make_gif(
                os.path.join(self.dst_walk, self.embedding_chars[i]),
                os.path.join(self.dst_walk, self.embedding_chars[i] + '.gif'))
Exemplo n.º 8
0
def main():
    #定义网络
    net = models.LeNetWithAngle(classes_num)
    if use_gpu:
        net = net.cuda()
    #定义优化器
    optimizer = torch.optim.SGD(net.parameters(),
                                lr=model_lr,
                                weight_decay=1e-5,
                                nesterov=True,
                                momentum=0.9)
    print("net and optimzer load succeed")
    #定义数据加载
    trainloader, testloader = dataloader.get_loader(batch_size=batch_size,
                                                    root_path="./data/MNIST")
    print("data load succeed")
    #定义logger
    logger = utils.Logger(tb_path="./logs/tblog/")
    #定义学习率调整器
    scheduler = lr_sche.StepLR(optimizer, 30, 0.1)
    #定义损失函数
    criterion = a_softmax.AngleSoftmaxLoss(gamma=0)
    best_acc = 0
    #开始训练
    for i in range(1, epochs + 1):
        scheduler.step(epoch=i)
        net.train()
        train_acc,train_loss,all_feature,all_labels=\
            train(net,optimizer,criterion,trainloader,i)
        utils.plot_features(all_feature, all_labels, classes_num, i,
                            "./logs/images/train/train_{}.png")
        net.eval()
        test_acc, test_loss, all_feature, all_labels = test(
            net, criterion, testloader, i)
        utils.plot_features(all_feature, all_labels, classes_num, i,
                            "./logs/images/test/test_{}.png")
        print("{} epoch end, train acc is {:.4f}, test acc is {:.4f}".format(
            i, train_acc, test_acc))
        content = {
            "Train/acc": train_acc,
            "Test/acc": test_acc,
            "Train/loss": train_loss,
            "Test/loss": test_loss
        }
        logger.log(step=i, content=content)
        if best_acc < test_acc:
            best_acc = test_acc
        utils.save_checkpoints("./logs/weights/net_{}.pth",i,\
            net.state_dict(),(best_acc==test_acc))
    utils.make_gif("./logs/images/train/", "./logs/train.gif")
    utils.make_gif("./logs/images/test/", "./logs/test.gif")
    print("Traing finished...")
Exemplo n.º 9
0
    def play(self, coord):

        total_steps = 0
        test_episode_count = 0

        print("Starting agent " + str(self.thread_id))
        while not coord.should_stop():

            episode_regret = 0
            episode_suboptimal_arm = 0
            episode_reward = [0 for _ in range(FLAGS.nb_actions)]
            episode_step_count = 0
            episode_frames = []
            d = False
            r = 0
            a = 0
            t = 0
            self.env.set(self.settings["envs"][test_episode_count])

            while not d:
                a = random.randint(0, FLAGS.nb_actions - 1)
                r, d, t = self.env.pull_arm(a)

                episode_frames.append(set_image_bandit(episode_reward, self.env.get_bandit(), a, t))

                episode_regret += self.env.get_timestep_regret(a)
                optimal_action = self.env.get_optimal_arm()
                if optimal_action != a:
                    episode_suboptimal_arm += 1

                episode_reward[a] += r
                total_steps += 1
                episode_step_count += 1

            self.episode_rewards.append(np.sum(episode_reward))

            self.episodes_suboptimal_arms.append(episode_suboptimal_arm)
            self.episode_regrets.append(episode_regret)

            self.images = np.array(episode_frames)
            make_gif(self.images, FLAGS.frames_test_dir + '/image' + str(test_episode_count) + '.gif',
                     duration=len(self.images) * 0.1, true_image=True)

            if test_episode_count == FLAGS.nb_test_episodes - 1:
                mean_regret = np.mean(self.episode_regrets)
                mean_nb_suboptimal_arms = np.mean(self.episodes_suboptimal_arms)

                print("Mean regret for the model is {}".format(mean_regret))
                print("Regret in terms of suboptimal arms is {}".format(mean_nb_suboptimal_arms))
                return 1

            test_episode_count += 1
Exemplo n.º 10
0
    def visualize(self, epoch, opt_params):
        Igen = self.netG(self.fixed_noise)  # GLO on a noise
        utils.make_gif(
            Igen,
            'runs%d/ims_%s/generations_epoch_%03d' % (counter, self.rn, epoch),
            opt_params.batch_size)

        z = utils.sample_gaussian(self.netZ.emb.weight.clone().cpu(),
                                  self.vis_n)
        Igauss = self.netG(z)  # GLO on gaussian
        utils.make_gif(
            Igauss,
            'runs%d/ims_%s/gaussian_epoch_%03d' % (counter, self.rn, epoch),
            opt_params.batch_size)
Exemplo n.º 11
0
def celeb():
    """ Function that loads Celeb data, reshapes it to TF desired input (hight, width, color)
    Then, the function defines the shape of the discriminator and generator
    """

    # This just gets a list of filenames to be loaded in dynamically due to their large number and size
    X = utils.load_Celeb()

    dimensions = 64  # Assumes images are square - uses only 1 dimension
    colors = 3

    # Hyperparamters gathered from other official implementations that worked! Selected with hyper param optimisation techniques

    # Hyperparameter keys:
    # conv layer: (feature maps, filter size, stride=2, batch norm used?)
    # dense layer: (hidden units, batch norm used?)
    discriminator_sizes = {
        'conv_layers': [(64, 5, 2, False), (128, 5, 2, True),
                        (256, 5, 2, True), (512, 5, 2, True)],
        'dense_layers': []
    }

    # Hyperparameter keys:
    # z : latent variable dimensionality (drawing uniform random samples from it)
    # projection: initial number of feature maps (flat vector -> 3D image!)
    # batchNorm_after_projection: flag, showing, if we want to use batchnorm after projecting the flat vector
    # conv layer: (feature maps, filter size, stride=2, batch norm used?)
    # dense layer: (hidden units, batch norm used?)
    # output_action: activation function - using sigmoid since the Celeb data is scaled between {-1, 1} - This is recommended by GAN researchers
    generator_sizes = {
        'z':
        100,
        'projection':
        512,
        'batchNorm_after_projection':
        True,
        'conv_layers': [(256, 5, 2, True), (128, 5, 2, True), (64, 5, 2, True),
                        (colors, 5, 2, False)],
        'dense_layers': [],
        'output_activation':
        tf.tanh,
    }

    # Create the DCGAN and fit it to the images
    name = 'Celeb'
    GAN = DCGAN(dimensions, colors, discriminator_sizes, generator_sizes)
    GAN.fit(X, name)

    utils.make_gif(name)
Exemplo n.º 12
0
def mnist():
    """ Function that loads MNIST, reshapes it to TF desired input (hight, width, color)
    Then, the function defines the shape of the discriminator and generator
    """

    X, _ = utils.load_MNIST()
    X = X.reshape(len(X), 28, 28, 1)
    dimensions = X.shape[
        1]  # Assumes images are square - uses only 1 dimension
    colors = X.shape[-1]

    # Hyperparamters gathered from other official implementations that worked! Selected with hyper param optimisation techniques

    # Hyperparameter keys:
    # conv layer: (feature maps, filter size, stride=2, batch norm used?)
    # dense layer: (hidden units, batch norm used?)
    discriminator_sizes = {
        'conv_layers': [(2, 5, 2, False), (64, 5, 2, True)],
        'dense_layers': [(1024, True)]
    }

    # Hyperparameter keys:
    # z : latent variable dimensionality (drawing uniform random samples from it)
    # projection: initial number of feature maps (flat vector -> 3D image!)
    # batchNorm_after_projection: flag, showing, if we want to use batchnorm after projecting the flat vector
    # conv layer: (feature maps, filter size, stride=2, batch norm used?)
    # dense layer: (hidden units, batch norm used?)
    # output_action: activation function - using sigmoid since MNIST varies between {0, 1}
    generator_sizes = {
        'z': 100,
        'projection': 128,
        'batchNorm_after_projection': False,
        'conv_layers': [(128, 5, 2, True), (colors, 5, 2, False)],
        'dense_layers': [(1024, True)],
        'output_activation': tf.sigmoid,
    }

    # Create the DCGAN and fit it to the images
    name = 'MNIST'
    GAN = DCGAN(dimensions, colors, discriminator_sizes, generator_sizes)
    GAN.fit(X, name)

    utils.make_gif(name)
Exemplo n.º 13
0
def build_weixin_message(weixin):
    items = []
    index = 0
    message_title = ''
    if weixin.cover.name != u'' and weixin.title != u'' and weixin.recommended_reason != u'':
        message_title = weixin.title
        items.append(WeixinMessageItem(image=weixin.cover.path, title=weixin.title, digest=weixin.title, content=weixin.recommended_reason))
        index += 1
    for news in weixin.news.all():
        title = u'游戏情报站  -  %s' % news.brief_comment
        if index == 0:
            message_title = title
        items.append(WeixinMessageItem(image=make_gif(news), title=title, digest=title, content=news.recommended_reason + u'<br><br><font color="gray">点击“阅读原文”查看更多</font>', sourceurl=u'http://cow.bestgames7.com/d/%s/' % news.id))
        index += 1
    for game in weixin.games.all():
        title = u'%s  -  %s' % (game.name, game.brief_comment)
        if index == 0:
            message_title = title
        if index > 0:
            items.append(WeixinMessageItem(image=make_gif(game), title=title, digest=title, content=_normalize_content(game), sourceurl=u'http://cow.bestgames7.com/d/%s/' % game.id))
        else:
            items.append(WeixinMessageItem(image=make_gif(game), title=title, digest=title, content=_normalize_content(game), sourceurl=u'http://cow.bestgames7.com/d/%s/' % game.id))
        index += 1
    for player in weixin.players.all():
        title = u'我是玩家  -  %s' % player.brief_comment
        if index == 0:
            message_title = title
        items.append(WeixinMessageItem(image=player.image_url.path, title=title, digest=title, content=player.recommended_reason))
        index += 1
    for puzzle in weixin.puzzles.all():
        title = u'趣味答题  -  %s' % puzzle.title
        if index == 0:
            message_title = title
        content = puzzle.description + '<br>'
        content += 'A.' + puzzle.option1 + '<br>'
        content += 'B.' + puzzle.option2 + '<br>'
        content += 'C.' + puzzle.option3 + '<br>'
        content += 'D.' + puzzle.option4 + '<br></br>'
        content += u'<font color="gray">回复"答题",参与答题得积分换礼品的活动吧</font><br>'
        items.append(WeixinMessageItem(image=puzzle.image_url.path, title=title, digest=title, content=content))
        index += 1
    return WeixinMessage(weixin.id, message_title, items)
Exemplo n.º 14
0
def get_optimized_model_camera(mesh, camera, config):
    model = ModelCamera(mesh.vertices, mesh.faces, mesh.regions, config.CAMERA.REF_SILHOUETTE,
                        camera.x, camera.y, camera.z, min_distance=config.CAMERA.MIN_DISTANCE,
                        max_rotation_l_r=config.CAMERA.MAX_ROTATION_LR, max_rotation_u_d=config.CAMERA.MAX_ROTATION_UD,
                        use_anchor_points=config.CAMERA.USE_ANCHOR_POINTS,
                        silhouette_nose=config.CAMERA.ANCHOR_NOSE_IMG, silhouette_mouth=config.CAMERA.ANCHOR_MOUTH_IMG)
    model.cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=config.OPT.LR_CAMERA)

    dir_imgs = os.path.join(config.PATH.TMP, "camera")
    if not os.path.isdir(dir_imgs):
        os.mkdir(dir_imgs)

    loop = tqdm(range(config.OPT.ITER_CAMERA))
    for i in loop:
        loop.set_description(f"Optimizing camera")
        optimizer.zero_grad()
        loss = model()
        loss.backward()
        optimizer.step()

        images, _, _ = model.renderer(model.vertices, model.faces, torch.tanh(model.textures))
        image = images.detach().cpu().numpy()[0].transpose(1, 2, 0)
        imsave(os.path.join(dir_imgs, "%04d.png" % i), (255 * image).astype(np.uint8))

    # Save output igms and gif
    make_gif(os.path.join(config.PATH.OUT, "camera.gif"), dir_imgs)

    # Silhouette faces
    image = model.renderer(model.vertices, model.faces, mode='silhouettes')
    image = image.detach().cpu().numpy().transpose(1, 2, 0)
    imsave(os.path.join(config.PATH.OUT, "silhouette_camera.png"), (255 * image).astype(np.uint8))
    # Silhouette nose
    image = model.renderer(model.vertices, model.triangles_nose, mode='silhouettes')
    image = image.detach().cpu().numpy().transpose(1, 2, 0)
    imsave(os.path.join(config.PATH.OUT, "silhouette_nose.png"), (255 * image).astype(np.uint8))
    # Silhouette mouth
    image = model.renderer(model.vertices, model.triangles_mouth, mode='silhouettes')
    image = image.detach().cpu().numpy().transpose(1, 2, 0)
    imsave(os.path.join(config.PATH.OUT, "silhouette_mouth.png"), (255 * image).astype(np.uint8))

    return model
Exemplo n.º 15
0
def post_process_and_save_he(sols,
                             results_dir="./data/HE",
                             name_simulation="HE_sim_default_name",
                             save_plots=False):

    try:
        os.makedirs(results_dir)
    except:
        pass

    np.save(os.path.join(results_dir, name_simulation + "_array"), sols)

    if save_plots:

        make_gif(sols,
                 os.path.join(results_dir, name_simulation + "gif" + ".gif"))

        ampsn = []
        for sol in sols:
            ampsn.append(np.sum(sol))
        ampsn = np.array(ampsn) / np.size(sol)

        amps_max = []
        for sol in sols:
            amps_max.append(np.max(sol))
        amps_max = np.array(amps_max)

        fig = plt.figure(figsize=(15, 15))

        plt.plot(np.arange(0, len(ampsn), 1), np.abs(ampsn), label="avg amp")
        plt.legend()
        fig.savefig(
            os.path.join(results_dir, name_simulation + "amplitude_avg.png"))

        fig = plt.figure(figsize=(15, 15))
        plt.plot(np.arange(0, len(amps_max), 1),
                 np.abs(amps_max),
                 label="amp max")
        plt.legend()
        fig.savefig(
            os.path.join(results_dir, name_simulation + "amplitude_max.png"))
Exemplo n.º 16
0
def post_process_and_save_ac(sols,
                             results_dir="./data/AC",
                             name_simulation="AC_sim_default_name",
                             save_plots=False,
                             duration=0.05):

    try:
        os.makedirs(results_dir)
    except:
        pass

    np.save(os.path.join(results_dir, name_simulation + "_array"), sols)

    if save_plots:

        make_gif(sols,
                 os.path.join(results_dir, name_simulation + "gif" + ".gif"),
                 duration=duration)

        amps = []
        for sol in sols:
            amps.append(np.sum(np.abs(sol)))
        amps = np.array(amps) / np.size(sol)

        ampsn = []
        for sol in sols:
            ampsn.append(np.sum(sol))
        ampsn = np.array(ampsn) / np.size(sol)

        fig = plt.figure(figsize=(15, 15))

        plt.plot(np.arange(0, len(amps), 1), amps, label="avg abs phase")
        plt.plot(np.arange(0, len(ampsn), 1),
                 np.abs(ampsn),
                 label="abs avg phase")

        plt.legend()
        plt.savefig(
            os.path.join(results_dir, name_simulation + "avg_phases_time.png"))
Exemplo n.º 17
0
    def on_epoch_end(self, trainer, model):

        #epoch = trainer.current_epoch
        self._epoch += 1
        epoch = int(self._epoch / 2)  #dirty fix

        if epoch % self._plot_every == 0:
            plt.close("all")

            fig = plot_2D_comparison_analytical(model,
                                                self._val_fun,
                                                title="epoch {}".format(epoch))

            array = fig_to_array(fig)

            self._arrays.append(array)

        if epoch % self._save_every == 0:

            make_gif(self._arrays,
                     self._results_dir + "/{}.gif".format(self._name),
                     size=self._size,
                     duration=0.5)
Exemplo n.º 18
0
def get_optimized_model_morphing(mesh, camera, config):
    model = ModelMorphing(mesh.vertices, mesh.faces, config.CAMERA.REF_SILHOUETTE, camera.x, camera.y, camera.z)
    model.cuda()
    optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config.OPT.LR_MORPHING)

    dir_imgs = os.path.join(config.PATH.TMP, "morphing")
    if not os.path.isdir(dir_imgs):
        os.mkdir(dir_imgs)

    loop = tqdm(range(config.OPT.ITER_MORPHING))
    for i in loop:
        loop.set_description(f"Morphing model")
        optimizer.zero_grad()
        loss = model()
        loss.backward()
        optimizer.step()

        images = model.renderer(model.vertices, model.faces, mode='silhouettes')
        image = images.detach().cpu().numpy().transpose(1, 2, 0)
        imsave(os.path.join(dir_imgs, "%04d.png" % i), (255 * image).astype(np.uint8))

    make_gif(os.path.join(config.PATH.OUT, "morphing.gif"), dir_imgs)

    return model
Exemplo n.º 19
0
def play_episode(model, env, memory, save_path):
    memory.clear()
    frame = env.reset()
    done = False
    cumulative_reward = 0
    frames = []
    while not done:
        frames.append(frame)
        with torch.no_grad():
            action = model(
                memory.get_state(frame)).max(1)[1][0].cpu()  # .numpy()

        next_frame, reward, done, info = env.step(action)
        cumulative_reward += reward
        memory.add_experience(frame, action, reward, next_frame, done)

        frame = next_frame

    save_path = Path(
        save_path.parent,
        save_path.stem + '_r' + str(int(cumulative_reward)) + save_path.suffix)
    # save_path = save_path.name + '_r' + cumulative_reward + save_path.suffix
    make_gif(frames, save_path)
    return cumulative_reward
    def play(self, sess, coord, saver):
        episode_count = sess.run(self.global_episode)

        total_steps = 0
        if not FLAGS.train:
            test_episode_count = 0

        print("Starting agent " + str(self.thread_id))
        with sess.as_default(), sess.graph.as_default():
            while not coord.should_stop():
                if FLAGS.train and episode_count > FLAGS.max_nb_episodes_train:
                    return 0

                sess.run(self.update_local_vars)
                episode_buffer = []

                # if not FLAGS.train:
                #     print("Episode {}".format(test_episode_count))

                episode_regret = 0
                episode_suboptimal_arm = 0
                episode_values = []
                episode_frames = []
                episode_reward = [0 for _ in range(FLAGS.nb_actions)]
                episode_step_count = 0
                d = False
                if FLAGS.meta:
                    r = 0
                a = 0
                t = 0
                if not FLAGS.resume and FLAGS.train:
                    self.env.reset()
                else:
                    #print(test_episode_count)
                    self.env.set(self.settings["envs"][test_episode_count])

                rnn_state = self.local_AC.state_init

                while not d:
                    if FLAGS.meta:
                        feed_dict = {
                            self.local_AC.prev_rewards: [[r]],
                            self.local_AC.timestep: [[t]],
                            self.local_AC.prev_actions: [a],
                            self.local_AC.state_in[0]: rnn_state[0],
                            self.local_AC.state_in[1]: rnn_state[1]
                        }
                    else:
                        feed_dict = {
                            self.local_AC.timestep: [[t]],
                            self.local_AC.prev_actions: [a],
                            self.local_AC.state_in[0]: rnn_state[0],
                            self.local_AC.state_in[1]: rnn_state[1]
                        }

                    pi, v, rnn_state_new = sess.run([
                        self.local_AC.policy, self.local_AC.value,
                        self.local_AC.state_out
                    ],
                                                    feed_dict=feed_dict)
                    a = np.random.choice(pi[0])
                    a = np.argmax(pi == a)

                    rnn_state = rnn_state_new
                    r, d, t = self.env.pull_arm(a)

                    # if not FLAGS.train:
                    episode_regret += self.env.get_timestep_regret(a)
                    optimal_action = self.env.get_optimal_arm()
                    if optimal_action != a:
                        episode_suboptimal_arm += 1

                    episode_buffer.append([a, r, t, d, v[0, 0]])
                    episode_values.append(v[0, 0])

                    if not FLAGS.game == '11arms':
                        episode_frames.append(
                            set_image_bandit(episode_reward,
                                             self.env.get_bandit(), a, t))
                    else:
                        episode_frames.append(
                            set_image_bandit_11_arms(
                                episode_reward, self.env.get_optimal_arm(), a,
                                t))

                    episode_reward[a] += r
                    total_steps += 1
                    episode_step_count += 1

                self.episode_rewards.append(np.sum(episode_reward))

                self.episodes_suboptimal_arms.append(episode_suboptimal_arm)
                self.episode_regrets.append(episode_regret)

                # if not FLAGS.train:
                #     print("Episode total reward was: {} vs optimal reward {}".format(np.sum(episode_reward),
                #                                                                      episode_rewards_for_optimal_arm))
                #     print("Regret is {}".format(max(episode_rewards_for_optimal_arm - np.sum(episode_reward), 0)))
                #     print("Suboptimal arms in the episode: {}".format(episode_suboptimal_arm))

                self.episode_lengths.append(episode_step_count)
                self.episode_mean_values.append(np.mean(episode_values))

                if len(episode_buffer) != 0 and FLAGS.train == True:
                    if episode_count % FLAGS.summary_interval == 0 and episode_count != 0:
                        l, v_l, p_l, e_l, g_n, v_n, ms = self.train(
                            episode_buffer, sess, 0.0, self.settings, True)
                    else:
                        self.train(episode_buffer, sess, 0.0, self.settings,
                                   False)

                if not FLAGS.train and test_episode_count == FLAGS.nb_test_episodes - 1:
                    # episode_regret = [max(o - r, 0) for (o, r) in
                    #                   zip(self.episode_optimal_rewards, self.episode_rewards)]
                    mean_regret = np.mean(self.episode_regrets)
                    mean_nb_suboptimal_arms = np.mean(
                        self.episodes_suboptimal_arms)

                    if self.settings["mode"] == "val":
                        with open(FLAGS.results_val_file, "a+") as f:
                            f.write(
                                "Model: game={} lr={} gamma={} mean_regret={} mean_nb_subopt_arms={}\n"
                                .format(self.settings["game"],
                                        self.settings["lr"],
                                        self.settings["gamma"], mean_regret,
                                        mean_nb_suboptimal_arms))
                    elif self.settings["mode"] == "test":
                        self.images = np.array(episode_frames)
                        #make_gif(self.images,
                        #         self.settings["frames_dir"] + '/image' + str(test_episode_count) + '.gif',
                        #        duration=len(self.images) * 0.1, true_image=True)
                        with open(FLAGS.results_test_file, "a+") as f:
                            f.write(
                                "Model: game={} lr={} gamma={} mean_regret={} mean_nb_subopt_arms={}\n"
                                .format(self.settings["game"],
                                        self.settings["lr"],
                                        self.settings["gamma"], mean_regret,
                                        mean_nb_suboptimal_arms))
                    else:
                        with open(FLAGS.results_eval_file, "a+") as f:
                            f.write("{} ".format(mean_regret))
                        print("Mean regret for the model is {}".format(
                            mean_regret))
                        print(
                            "Regret in terms of suboptimal arms is {}".format(
                                mean_nb_suboptimal_arms))
                    return 1

                if not FLAGS.train:
                    self.images = np.array(episode_frames)
                    make_gif(self.images,
                             FLAGS.frames_test_dir + '/image' +
                             str(episode_count) + '.gif',
                             duration=len(self.images) * 0.1,
                             true_image=True)

                if FLAGS.train == True and episode_count % FLAGS.checkpoint_interval == 0 and episode_count != 0:
                    saver.save(sess,
                               self.model_path + '/model-' +
                               str(episode_count) + '.cptk',
                               global_step=self.global_episode)
                    print("Saved Model at {}".format(self.model_path +
                                                     '/model-' +
                                                     str(episode_count) +
                                                     '.cptk'))

                if FLAGS.train and episode_count % FLAGS.summary_interval == 0 and episode_count != 0:
                    # if episode_count % FLAGS.frames_interval == 0 and self.name == 'agent_0':
                    #     self.images = np.array(episode_frames)
                    #     make_gif(self.images, self.settings.frames_dir + '/image' + str(episode_count) + '.gif',
                    #              duration=len(self.images) * 0.1, true_image=True)

                    mean_reward = np.mean(
                        self.episode_rewards[-FLAGS.summary_interval:])
                    mean_length = np.mean(
                        self.episode_lengths[-FLAGS.summary_interval:])
                    mean_value = np.mean(
                        self.episode_mean_values[-FLAGS.summary_interval:])
                    episode_regret = [
                        max(o - r, 0) for (o, r) in zip(
                            self.
                            episode_optimal_rewards[-FLAGS.summary_interval:],
                            self.episode_rewards[-50:])
                    ]
                    mean_regret = np.mean(episode_regret)
                    mean_nb_suboptimal_arms = np.mean(
                        self.episodes_suboptimal_arms[-FLAGS.summary_interval:]
                    )

                    self.summary.value.add(tag='Perf/Reward',
                                           simple_value=float(mean_reward))
                    self.summary.value.add(tag='Perf/Length',
                                           simple_value=float(mean_length))
                    self.summary.value.add(tag='Perf/Value',
                                           simple_value=float(mean_value))

                    self.summary.value.add(tag='Mean Regret',
                                           simple_value=float(mean_regret))
                    self.summary.value.add(
                        tag='Mean NSuboptArms',
                        simple_value=float(mean_nb_suboptimal_arms))
                    self.summary.value.add(tag='Losses/Total Loss',
                                           simple_value=float(l))
                    self.summary.value.add(tag='Losses/Value Loss',
                                           simple_value=float(v_l))
                    self.summary.value.add(tag='Losses/Policy Loss',
                                           simple_value=float(p_l))
                    self.summary.value.add(tag='Losses/Entropy',
                                           simple_value=float(e_l))
                    self.summary.value.add(tag='Losses/Grad Norm',
                                           simple_value=float(g_n))
                    self.summary.value.add(tag='Losses/Var Norm',
                                           simple_value=float(v_n))
                    summaries = tf.Summary().FromString(ms)
                    sub_summaries_dict = {}
                    for value in summaries.value:
                        value_field = value.WhichOneof('value')
                        value_ifo = sub_summaries_dict.setdefault(
                            value.tag, {
                                'value_field': None,
                                'values': []
                            })
                        if not value_ifo['value_field']:
                            value_ifo['value_field'] = value_field
                        else:
                            assert value_ifo['value_field'] == value_field
                        value_ifo['values'].append(getattr(value, value_field))

                    for name, value_ifo in sub_summaries_dict.items():
                        summary_value = self.summary.value.add()
                        summary_value.tag = name
                        if value_ifo['value_field'] == 'histo':
                            values = value_ifo['values']
                            summary_value.histo.min = min(
                                [x.min for x in values])
                            summary_value.histo.max = max(
                                [x.max for x in values])
                            summary_value.histo.num = sum(
                                [x.num for x in values])
                            summary_value.histo.sum = sum(
                                [x.sum for x in values])
                            summary_value.histo.sum_squares = sum(
                                [x.sum_squares for x in values])
                            for lim in values[0].bucket_limit:
                                summary_value.histo.bucket_limit.append(lim)
                            for bucket in values[0].bucket:
                                summary_value.histo.bucket.append(bucket)
                        else:
                            print(
                                'Warning: could not aggregate summary of type {}'
                                .format(value_ifo['value_field']))

                    self.summary_writer.add_summary(self.summary,
                                                    episode_count)

                    self.summary_writer.flush()

                if self.name == 'agent_0':
                    sess.run(self.increment_global_episode)
                if not FLAGS.train:
                    #     if self.settings["mode"] == "test":
                    #         self.images = np.array(episode_frames)
                    #         make_gif(self.images, self.settings["frames_dir"] + '/image' + str(test_episode_count) + '.gif',
                    #                  duration=len(self.images) * 0.1, true_image=True)
                    test_episode_count += 1
                episode_count += 1
Exemplo n.º 21
0
W = W['emb.weight'].data.cpu().numpy()

netG = modelvideo.netG_new(nz).cuda()

if torch.cuda.device_count() > 1:
    parallel = True
    print("Using", torch.cuda.device_count(), "GPUs!")
    netG = nn.DataParallel(netG)

state_dict = torch.load('runs%d/nets_%s/netG_glo.pth' % (counter, rn))
netG.load_state_dict(state_dict) # load the weights for generator (GLO)
if parallel:
    netG = netG.module

# d is the dimension of noise vector (e)
d = 16
nepoch = 50
icpt = icp.ICPTrainer(W, d)
icpt.train_icp(nepoch)
torch.save(icpt.icp.netT.state_dict(), 'runs%d/nets_%s/netT_nag.pth' % (counter, rn)) #saves the param of the netT

#Prediction
z = icpt.icp.netT(torch.randn(100, d).cuda())
print("z shape", z.shape)
video = netG(z)
print("video shape is", video.shape)


# utils.make_gif(utils.denorm(video), 'runs3/ims_%s/sample' % (rn), 1)
utils.make_gif(video, 'runs%d/ims_%s/sample' % (counter, rn), 5)
Exemplo n.º 22
0
#!/usr/bin/env python

import argparse
from glob import glob

from utils import make_gif

parser = argparse.ArgumentParser()
parser.add_argument("--model_name",
                    type=str,
                    default='ptb_2018-08-22_11-44-47')
parser.add_argument("--max_frame", type=int, default=5)
parser.add_argument("--output", type=str, default="sampe.gif")
parser.add_argument("--title", type=str, default="")

if __name__ == "__main__":
    args = parser.parse_args()

    paths = glob(f"./logs/{args.model_name}/networks/*.png")
    make_gif(paths,
             args.output,
             max_frame=args.max_frame,
             prefix=f"{args.title}\n" if args.title else "")
Exemplo n.º 23
0
            '''

        n_updates += 1

        counter += 1

        if (batch_index + 1) % 10 == 0:

            save_img(
                flow[0].data.cpu(), DIR_TO_SAVE + 'original_flow_%s_%s_a.jpg' %
                (current_epoch, batch_index))
            save_img(
                gen_flow[0].data.cpu(), DIR_TO_SAVE + 'fake_flow_%s_%s_a.jpg' %
                (current_epoch, batch_index))
            make_gif(
                denorm(videos.data.cpu()[0]), DIR_TO_SAVE +
                'original_gifs_%s_%s_b.gif' % (current_epoch, batch_index))
            save_img(
                denorm(first_frame[0].data.cpu()), DIR_TO_SAVE +
                'fake_gifs_%s_%s_a.jpg' % (current_epoch, batch_index))
            make_gif(
                denorm(fake_videos.data.cpu()[0]), DIR_TO_SAVE +
                'fake_gifs_%s_%s_b.gif' % (current_epoch, batch_index))
            make_gif(
                denorm(mask.data.cpu()[0]), DIR_TO_SAVE + 'mask__%s_%s_b.gif' %
                (current_epoch, batch_index))
            text_logger.info('Gifs saved at epoch: %d, batch_index: %d' %
                             (current_epoch, batch_index))

        if current_epoch % 100 == 0:
            torch.save(generator.state_dict(), './generator1.pkl')
Exemplo n.º 24
0
    # avg loss values for plot
    D_avg_losses.append(D_avg_loss)
    G_avg_losses.append(G_avg_loss)

    # Show result for test image
    gen_image = G(Variable(test_input))  #.cuda()))
    gen_image = gen_image.cpu().data
    utils.plot_test_result(test_input,
                           test_target,
                           gen_image,
                           epoch,
                           save=True,
                           save_dir=save_dir)

np.savetxt("D_losses.csv", D_losses, delimiter=",")
np.savetxt("G_losses.csv", G_losses, delimiter=",")
# Plot average losses
utils.plot_loss(D_losses,
                G_losses,
                params.num_epochs,
                save=True,
                save_dir=save_dir)

# Make gif
utils.make_gif(params.dataset, params.num_epochs, save_dir=save_dir)

# Save trained parameters of model
torch.save(G.state_dict(), model_dir + 'generator_param.pkl')
torch.save(D.state_dict(), model_dir + 'discriminator_param.pkl')
Exemplo n.º 25
0
                                             col=10,
                                             figsize=(50, 50),
                                             dataformat='CHW')
                    fig.savefig(PICTURE_DIR + "%s_%d.png" %
                                (str(count).zfill(3), index))
                    plt.close(fig)

                logger.info(
                    "Epoch({}/{}) D_loss : {}, G_loss : {}, Q_loss : {}".
                    format(epoch + 1, train_epochs, D_loss, G_loss, Q_loss))

                # Save model
                saver = tf.train.Saver(max_to_keep=10)
                saver.save(self.sess,
                           os.path.join(SAVE_DIR, 'model'),
                           global_step=epoch + 1)
                logger.info("Model save in %s" % SAVE_DIR)


if __name__ == '__main__':
    create_dir(SAVE_DIR)
    create_dir(PICTURE_DIR)
    infogan = InfoGAN()
    infogan.initialize()
    infogan.train(100000)

    for index in range(2):
        images_path = glob.glob(os.path.join(PICTURE_DIR, '*_%d.png' % index))
        gif_path = os.path.join(PICTURE_DIR, '%d.gif' % index)
        make_gif(sorted(images_path), gif_path)
Exemplo n.º 26
0
    def play(self, sess, coord, saver):
        episode_count = sess.run(self.global_episode)

        total_steps = 0
        if not FLAGS.train:
            test_episode_count = 0

        print("Starting agent " + str(self.thread_id))
        with sess.as_default(), sess.graph.as_default():
            while not coord.should_stop():
                if FLAGS.train and episode_count > FLAGS.max_nb_episodes_train:
                    return 0

                sess.run(self.update_local_vars)
                episode_buffer = []

                # if not FLAGS.train:
                #     print("Episode {}".format(test_episode_count))

                episode_regret = 0
                episode_suboptimal_arm = 0
                episode_values = []
                episode_frames = []
                episode_reward = [0 for _ in range(FLAGS.nb_actions)]
                episode_step_count = 0
                d = False
                if FLAGS.meta:
                    r = 0
                a = 0
                t = 0
                if not FLAGS.resume and FLAGS.train:
                    self.env.reset()
                else:
                    #print(test_episode_count)
                    self.env.set(self.settings["envs"][test_episode_count])

                rnn_state = self.local_AC.state_init

                while not d:
                    if FLAGS.meta:
                        feed_dict = {
                            self.local_AC.prev_rewards: [[r]],
                            self.local_AC.timestep: [[t]],
                            self.local_AC.prev_actions: [a],
                            self.local_AC.state_in[0]: rnn_state[0],
                            self.local_AC.state_in[1]: rnn_state[1]}
                    else:
                        feed_dict = {
                            self.local_AC.timestep: [[t]],
                            self.local_AC.prev_actions: [a],
                            self.local_AC.state_in[0]: rnn_state[0],
                            self.local_AC.state_in[1]: rnn_state[1]}

                    pi, v, rnn_state_new = sess.run(
                        [self.local_AC.policy, self.local_AC.value, self.local_AC.state_out], feed_dict=feed_dict)
                    a = np.random.choice(pi[0])
                    a = np.argmax(pi == a)

                    rnn_state = rnn_state_new
                    r, d, t = self.env.pull_arm(a)

                    # if not FLAGS.train:
                    episode_regret += self.env.get_timestep_regret(a)
                    optimal_action = self.env.get_optimal_arm()
                    if optimal_action != a:
                        episode_suboptimal_arm += 1

                    episode_buffer.append([a, r, t, d, v[0, 0]])
                    episode_values.append(v[0, 0])

                    if not FLAGS.game == '11arms':
                        episode_frames.append(set_image_bandit(episode_reward, self.env.get_bandit(), a, t))
                    else:
                        episode_frames.append(
                            set_image_bandit_11_arms(episode_reward, self.env.get_optimal_arm(), a, t))

                    episode_reward[a] += r
                    total_steps += 1
                    episode_step_count += 1

                self.episode_rewards.append(np.sum(episode_reward))

                self.episodes_suboptimal_arms.append(episode_suboptimal_arm)
                self.episode_regrets.append(episode_regret)

                # if not FLAGS.train:
                #     print("Episode total reward was: {} vs optimal reward {}".format(np.sum(episode_reward),
                #                                                                      episode_rewards_for_optimal_arm))
                #     print("Regret is {}".format(max(episode_rewards_for_optimal_arm - np.sum(episode_reward), 0)))
                #     print("Suboptimal arms in the episode: {}".format(episode_suboptimal_arm))

                self.episode_lengths.append(episode_step_count)
                self.episode_mean_values.append(np.mean(episode_values))

                if len(episode_buffer) != 0 and FLAGS.train == True:
                    if episode_count % FLAGS.summary_interval == 0 and episode_count != 0:
                        l, v_l, p_l, e_l, g_n, v_n, ms = self.train(episode_buffer, sess, 0.0, self.settings, True)
                    else:
                        self.train(episode_buffer, sess, 0.0, self.settings, False)

                if not FLAGS.train and test_episode_count == FLAGS.nb_test_episodes - 1:
                    # episode_regret = [max(o - r, 0) for (o, r) in
                    #                   zip(self.episode_optimal_rewards, self.episode_rewards)]
                    mean_regret = np.mean(self.episode_regrets)
                    mean_nb_suboptimal_arms = np.mean(self.episodes_suboptimal_arms)

                    if self.settings["mode"] == "val":
                        with open(FLAGS.results_val_file, "a+") as f:
                            f.write("Model: game={} lr={} gamma={} mean_regret={} mean_nb_subopt_arms={}\n".format(
                                self.settings["game"],
                                self.settings["lr"],
                                self.settings["gamma"],
                                mean_regret,
                                mean_nb_suboptimal_arms))
                    elif self.settings["mode"] == "test":
                        self.images = np.array(episode_frames)
                        #make_gif(self.images,
                        #         self.settings["frames_dir"] + '/image' + str(test_episode_count) + '.gif',
                         #        duration=len(self.images) * 0.1, true_image=True)
                        with open(FLAGS.results_test_file, "a+") as f:
                            f.write("Model: game={} lr={} gamma={} mean_regret={} mean_nb_subopt_arms={}\n".format(
                                self.settings["game"],
                                self.settings["lr"],
                                self.settings["gamma"],
                                mean_regret,
                                mean_nb_suboptimal_arms))
                    else:
                        with open(FLAGS.results_eval_file, "a+") as f:
                            f.write("{} ".format(mean_regret))
                        print("Mean regret for the model is {}".format(mean_regret))
                        print("Regret in terms of suboptimal arms is {}".format(mean_nb_suboptimal_arms))
                    return 1

                if not FLAGS.train:
                    self.images = np.array(episode_frames)
                    make_gif(self.images, FLAGS.frames_test_dir + '/image' + str(episode_count) + '.gif',
                             duration=len(self.images) * 0.1, true_image=True)

                if FLAGS.train == True and episode_count % FLAGS.checkpoint_interval == 0 and episode_count != 0:
                    saver.save(sess, self.model_path + '/model-' + str(episode_count) + '.cptk',
                               global_step=self.global_episode)
                    print("Saved Model at {}".format(self.model_path + '/model-' + str(episode_count) + '.cptk'))

                if FLAGS.train and episode_count % FLAGS.summary_interval == 0 and episode_count != 0:
                    # if episode_count % FLAGS.frames_interval == 0 and self.name == 'agent_0':
                    #     self.images = np.array(episode_frames)
                    #     make_gif(self.images, self.settings.frames_dir + '/image' + str(episode_count) + '.gif',
                    #              duration=len(self.images) * 0.1, true_image=True)

                    mean_reward = np.mean(self.episode_rewards[-FLAGS.summary_interval:])
                    mean_length = np.mean(self.episode_lengths[-FLAGS.summary_interval:])
                    mean_value = np.mean(self.episode_mean_values[-FLAGS.summary_interval:])
                    episode_regret = [max(o - r, 0) for (o, r) in
                                      zip(self.episode_optimal_rewards[-FLAGS.summary_interval:],
                                          self.episode_rewards[-50:])]
                    mean_regret = np.mean(episode_regret)
                    mean_nb_suboptimal_arms = np.mean(self.episodes_suboptimal_arms[-FLAGS.summary_interval:])

                    self.summary.value.add(tag='Perf/Reward', simple_value=float(mean_reward))
                    self.summary.value.add(tag='Perf/Length', simple_value=float(mean_length))
                    self.summary.value.add(tag='Perf/Value', simple_value=float(mean_value))

                    self.summary.value.add(tag='Mean Regret', simple_value=float(mean_regret))
                    self.summary.value.add(tag='Mean NSuboptArms', simple_value=float(mean_nb_suboptimal_arms))
                    self.summary.value.add(tag='Losses/Total Loss', simple_value=float(l))
                    self.summary.value.add(tag='Losses/Value Loss', simple_value=float(v_l))
                    self.summary.value.add(tag='Losses/Policy Loss', simple_value=float(p_l))
                    self.summary.value.add(tag='Losses/Entropy', simple_value=float(e_l))
                    self.summary.value.add(tag='Losses/Grad Norm', simple_value=float(g_n))
                    self.summary.value.add(tag='Losses/Var Norm', simple_value=float(v_n))
                    summaries = tf.Summary().FromString(ms)
                    sub_summaries_dict = {}
                    for value in summaries.value:
                        value_field = value.WhichOneof('value')
                        value_ifo = sub_summaries_dict.setdefault(value.tag,
                                                                  {'value_field': None, 'values': []})
                        if not value_ifo['value_field']:
                            value_ifo['value_field'] = value_field
                        else:
                            assert value_ifo['value_field'] == value_field
                        value_ifo['values'].append(getattr(value, value_field))

                    for name, value_ifo in sub_summaries_dict.items():
                        summary_value = self.summary.value.add()
                        summary_value.tag = name
                        if value_ifo['value_field'] == 'histo':
                            values = value_ifo['values']
                            summary_value.histo.min = min([x.min for x in values])
                            summary_value.histo.max = max([x.max for x in values])
                            summary_value.histo.num = sum([x.num for x in values])
                            summary_value.histo.sum = sum([x.sum for x in values])
                            summary_value.histo.sum_squares = sum([x.sum_squares for x in values])
                            for lim in values[0].bucket_limit:
                                summary_value.histo.bucket_limit.append(lim)
                            for bucket in values[0].bucket:
                                summary_value.histo.bucket.append(bucket)
                        else:
                            print(
                                'Warning: could not aggregate summary of type {}'.format(value_ifo['value_field']))

                    self.summary_writer.add_summary(self.summary, episode_count)

                    self.summary_writer.flush()

                if self.name == 'agent_0':
                    sess.run(self.increment_global_episode)
                if not FLAGS.train:
                #     if self.settings["mode"] == "test":
                #         self.images = np.array(episode_frames)
                #         make_gif(self.images, self.settings["frames_dir"] + '/image' + str(test_episode_count) + '.gif',
                #                  duration=len(self.images) * 0.1, true_image=True)
                    test_episode_count += 1
                episode_count += 1
Exemplo n.º 27
0
PATH = sys.argv[1] if len(sys.argv) == 2 else str(Path(__file__).parent.absolute())[:-3]+'imgs/'
NAME = 'huge'

fst = Param(
    CLUSTERS = 12**2,
    CLUST_SIDE_LEN = 7,
    DAYS = 100,

    INFECTION_TIME = 20,
    DEATH_RATE = 0.4,
    INFECT_RATE = 0.3,
    MIGRATIONS_PER_DAY = 100,
    FEAR_RATE = 0.4,
    HEALTHCARE_CAPACITY = 2500
)


start = time()
cumulative, active, healed, dead, migrations, real_mig = run_simulation(fst, PATH)

print('time elapsed:', time() - start, flush=True) 
make_gif(PATH, NAME)

# plt.plot(range(len(migrations)), migrations, label='migrations', color='blue')
plt.plot(range(len(real_mig)), real_mig, label='migrations', color='blue', ls=':')
make_plot(fst, cumulative, active, healed, dead, label='', color='red')

plt.savefig(PATH+NAME+'.png')
print("The visualization of the simulation is available at './imgs/'", flush=True)

Exemplo n.º 28
0
        [0.65, 0.65, 2.5],
        [-0.65, 0.65, 2.5],
    ])

    # # PRIMEIRA QUESTÃO
    q1(cube_points, parallel_points, pyramid_points, tronco_points)

    # SEGUNDA QUESTÃO
    cube_points, parallel_points, pyramid_points, tronco_points = q2(
        cube_points, parallel_points, pyramid_points, tronco_points)
    # TERCEIRA QUESTÃO
    c_points, p_points = q3(copy(cube_points), copy(pyramid_points),
                            copy(parallel_points), copy(tronco_points),
                            np.array([3, -3, 3]), True)

    # QUARTA QUESTÃO
    q4(c_points, p_points, True)

    # OPCIONAL
    eyes = [np.array([i, -3, 3]) for i in np.arange(-2, 8, .5)]
    for i, eye in enumerate(eyes):
        # TERCEIRA QUESTÃO
        c_points, p_points = q3(copy(cube_points), copy(pyramid_points),
                                copy(parallel_points), copy(tronco_points),
                                eye)

        # QUARTA QUESTÃO
        q4(c_points, p_points, show=False)

    make_gif()
Exemplo n.º 29
0
def train_agent_model_free(agent, env, params):

    update_timestep = params['update_every_n_steps']
    seed = params['seed']
    log_interval = 1000
    gif_interval = 500000
    n_random_actions = params['n_random_actions']
    n_evals = params['n_evals']
    n_collect_steps = params['n_collect_steps']
    use_statefilter = params['obs_filter']
    save_model = params['save_model']

    assert n_collect_steps > agent.batchsize, "We must initially collect as many steps as the batch size!"

    avg_length = 0
    time_step = 0
    cumulative_timestep = 0
    cumulative_log_timestep = 0
    n_updates = 0
    i_episode = 0
    log_episode = 0
    samples_number = 0
    episode_rewards = []
    episode_steps = []

    if use_statefilter:
        state_filter = MeanStdevFilter(env.env.observation_space.shape[0])
    else:
        state_filter = None

    random.seed(seed)
    torch.manual_seed(seed)
    np.random.seed(seed)
    env.seed(seed)
    env.action_space.np_random.seed(seed)

    max_steps = env.spec.max_episode_steps

    writer = SummaryWriter()

    while samples_number < 3e7:
        time_step = 0
        episode_reward = 0
        i_episode += 1
        log_episode += 1
        state = env.reset()
        if state_filter:
            state_filter.update(state)
        done = False

        while (not done):
            cumulative_log_timestep += 1
            cumulative_timestep += 1
            time_step += 1
            samples_number += 1
            if samples_number < n_random_actions:
                action = env.action_space.sample()
            else:
                action = agent.get_action(state, state_filter=state_filter)
            nextstate, reward, done, _ = env.step(action)
            # if we hit the time-limit, it's not a 'real' done; we don't want to assign low value to those states
            real_done = False if time_step == max_steps else done
            agent.replay_pool.push(
                Transition(state, action, reward, nextstate, real_done))
            state = nextstate
            if state_filter:
                state_filter.update(state)
            episode_reward += reward
            # update if it's time
            if cumulative_timestep % update_timestep == 0 and cumulative_timestep > n_collect_steps:
                q1_loss, q2_loss, pi_loss, a_loss = agent.optimize(
                    update_timestep, state_filter=state_filter)
                n_updates += 1
            # logging
            if cumulative_timestep % log_interval == 0 and cumulative_timestep > n_collect_steps:
                writer.add_scalar('Loss/Q-func_1', q1_loss, n_updates)
                writer.add_scalar('Loss/Q-func_2', q2_loss, n_updates)
                writer.add_scalar('Loss/policy', pi_loss, n_updates)
                writer.add_scalar('Loss/alpha', a_loss, n_updates)
                writer.add_scalar('Values/alpha',
                                  np.exp(agent.log_alpha.item()), n_updates)
                avg_length = np.mean(episode_steps)
                running_reward = np.mean(episode_rewards)
                eval_reward = evaluate_agent(env,
                                             agent,
                                             state_filter,
                                             n_starts=n_evals)
                writer.add_scalar('Reward/Train', running_reward,
                                  cumulative_timestep)
                writer.add_scalar('Reward/Test', eval_reward,
                                  cumulative_timestep)
                print(
                    'Episode {} \t Samples {} \t Avg length: {} \t Test reward: {} \t Train reward: {} \t Number of Policy Updates: {}'
                    .format(i_episode, samples_number, avg_length, eval_reward,
                            running_reward, n_updates))
                episode_steps = []
                episode_rewards = []
            if cumulative_timestep % gif_interval == 0:
                make_gif(agent, env, cumulative_timestep, state_filter)
                if save_model:
                    make_checkpoint(agent, cumulative_timestep, params['env'])

        episode_steps.append(time_step)
        episode_rewards.append(episode_reward)
Exemplo n.º 30
0
def build_weixin_message(weixin):
    items = []
    index = 0
    message_title = ''
    if weixin.cover.name != u'' and weixin.title != u'' and weixin.recommended_reason != u'':
        message_title = weixin.title
        items.append(
            WeixinMessageItem(image=weixin.cover.path,
                              title=weixin.title,
                              digest=weixin.title,
                              content=weixin.recommended_reason))
        index += 1
    for news in weixin.news.all():
        title = u'游戏情报站  -  %s' % news.brief_comment
        if index == 0:
            message_title = title
        items.append(
            WeixinMessageItem(
                image=make_gif(news),
                title=title,
                digest=title,
                content=news.recommended_reason +
                u'<br><br><font color="gray">点击“阅读原文”查看更多</font>',
                sourceurl=u'http://cow.bestgames7.com/d/%s/' % news.id))
        index += 1
    for game in weixin.games.all():
        title = u'%s  -  %s' % (game.name, game.brief_comment)
        if index == 0:
            message_title = title
        if index > 0:
            items.append(
                WeixinMessageItem(
                    image=make_gif(game),
                    title=title,
                    digest=title,
                    content=_normalize_content(game),
                    sourceurl=u'http://cow.bestgames7.com/d/%s/' % game.id))
        else:
            items.append(
                WeixinMessageItem(
                    image=make_gif(game),
                    title=title,
                    digest=title,
                    content=_normalize_content(game),
                    sourceurl=u'http://cow.bestgames7.com/d/%s/' % game.id))
        index += 1
    for player in weixin.players.all():
        title = u'我是玩家  -  %s' % player.brief_comment
        if index == 0:
            message_title = title
        items.append(
            WeixinMessageItem(image=player.image_url.path,
                              title=title,
                              digest=title,
                              content=player.recommended_reason))
        index += 1
    for puzzle in weixin.puzzles.all():
        title = u'趣味答题  -  %s' % puzzle.title
        if index == 0:
            message_title = title
        content = puzzle.description + '<br>'
        content += 'A.' + puzzle.option1 + '<br>'
        content += 'B.' + puzzle.option2 + '<br>'
        content += 'C.' + puzzle.option3 + '<br>'
        content += 'D.' + puzzle.option4 + '<br></br>'
        content += u'<font color="gray">回复"答题",参与答题得积分换礼品的活动吧</font><br>'
        items.append(
            WeixinMessageItem(image=puzzle.image_url.path,
                              title=title,
                              digest=title,
                              content=content))
        index += 1
    return WeixinMessage(weixin.id, message_title, items)
Exemplo n.º 31
0
    def train(self):

        # Seed
        np.random.seed(self.manual_seed)
        random.seed(self.manual_seed)
        torch.manual_seed(self.manual_seed)

        # For fast training
        cudnn.benchmark = True

        # For BatchNorm
        self.G.train()
        self.D.train()

        # Fixed noise for sampling from G
        fixed_noise = torch.randn(self.batch_size,
                                  self.z_dim,
                                  device=self.device)
        if self.num_of_classes < self.batch_size:
            fixed_labels = torch.from_numpy(
                np.tile(np.arange(self.num_of_classes),
                        self.batch_size // self.num_of_classes +
                        1)[:self.batch_size]).to(self.device)
        else:
            fixed_labels = torch.from_numpy(np.arange(self.batch_size)).to(
                self.device)

        # For gan loss
        label = torch.full((self.batch_size, ), 1, device=self.device)
        ones = torch.full((self.batch_size, ), 1, device=self.device)

        # Losses file
        log_file_name = os.path.join(self.save_path, 'log.txt')
        log_file = open(log_file_name, "wt")

        # Init
        start_time = time.time()
        G_losses = []
        D_losses_real = []
        D_losses_fake = []
        D_losses = []
        D_xs = []
        D_Gz_trainDs = []
        D_Gz_trainGs = []

        # Instance noise - make random noise mean (0) and std for injecting
        inst_noise_mean = torch.full(
            (self.batch_size, 3, self.imsize, self.imsize),
            0,
            device=self.device)
        inst_noise_std = torch.full(
            (self.batch_size, 3, self.imsize, self.imsize),
            self.inst_noise_sigma,
            device=self.device)

        # Start training
        for self.step in range(self.start, self.total_step):

            # Instance noise std is linearly annealed from self.inst_noise_sigma to 0 thru self.inst_noise_sigma_iters
            inst_noise_sigma_curr = 0 if self.step > self.inst_noise_sigma_iters else (
                1 - self.step /
                self.inst_noise_sigma_iters) * self.inst_noise_sigma
            inst_noise_std.fill_(inst_noise_sigma_curr)

            # ================== TRAIN D ================== #

            for _ in range(self.d_steps_per_iter):

                # Zero grad
                self.reset_grad()

                # TRAIN with REAL

                # Get real images & real labels
                real_images, real_labels = self.get_real_samples()

                # Get D output for real images & real labels
                inst_noise = torch.normal(mean=inst_noise_mean,
                                          std=inst_noise_std).to(self.device)
                d_out_real = self.D(real_images + inst_noise, real_labels)

                # Compute D loss with real images & real labels
                if self.adv_loss == 'hinge':
                    d_loss_real = torch.nn.ReLU()(ones - d_out_real).mean()
                elif self.adv_loss == 'wgan_gp':
                    d_loss_real = -d_out_real.mean()
                else:
                    label.fill_(1)
                    d_loss_real = self.criterion(d_out_real, label)

                # Backward
                d_loss_real.backward()

                # TRAIN with FAKE

                # Create random noise
                z = torch.randn(self.batch_size,
                                self.z_dim,
                                device=self.device)

                # Generate fake images for same real labels
                fake_images = self.G(z, real_labels)

                # Get D output for fake images & same real labels
                inst_noise = torch.normal(mean=inst_noise_mean,
                                          std=inst_noise_std).to(self.device)
                d_out_fake = self.D(fake_images.detach() + inst_noise,
                                    real_labels)

                # Compute D loss with fake images & real labels
                if self.adv_loss == 'hinge':
                    d_loss_fake = torch.nn.ReLU()(ones + d_out_fake).mean()
                elif self.adv_loss == 'dcgan':
                    label.fill_(0)
                    d_loss_fake = self.criterion(d_out_fake, label)
                else:
                    d_loss_fake = d_out_fake.mean()

                # Backward
                d_loss_fake.backward()

                # If WGAN_GP, compute GP and add to D loss
                if self.adv_loss == 'wgan_gp':
                    d_loss_gp = self.lambda_gp * self.compute_gradient_penalty(
                        real_images, real_labels, fake_images.detach())
                    d_loss_gp.backward()

                # Optimize
                self.D_optimizer.step()

            # ================== TRAIN G ================== #

            for _ in range(self.g_steps_per_iter):

                # Zero grad
                self.reset_grad()

                # Get real images & real labels (only need real labels)
                real_images, real_labels = self.get_real_samples()

                # Create random noise
                z = torch.randn(self.batch_size, self.z_dim).to(self.device)

                # Generate fake images for same real labels
                fake_images = self.G(z, real_labels)

                # Get D output for fake images & same real labels
                inst_noise = torch.normal(mean=inst_noise_mean,
                                          std=inst_noise_std).to(self.device)
                g_out_fake = self.D(fake_images + inst_noise, real_labels)

                # Compute G loss with fake images & real labels
                if self.adv_loss == 'dcgan':
                    label.fill_(1)
                    g_loss = self.criterion(g_out_fake, label)
                else:
                    g_loss = -g_out_fake.mean()

                # Backward + Optimize
                g_loss.backward()
                self.G_optimizer.step()

            # Print out log info
            if self.step % self.log_step == 0:
                G_losses.append(g_loss.mean().item())
                D_losses_real.append(d_loss_real.mean().item())
                D_losses_fake.append(d_loss_fake.mean().item())
                D_loss = D_losses_real[-1] + D_losses_fake[-1]
                if self.adv_loss == 'wgan_gp':
                    D_loss += d_loss_gp.mean().item()
                D_losses.append(D_loss)
                D_xs.append(d_out_real.mean().item())
                D_Gz_trainDs.append(d_out_fake.mean().item())
                D_Gz_trainGs.append(g_out_fake.mean().item())
                curr_time = time.time()
                curr_time_str = datetime.datetime.fromtimestamp(
                    curr_time).strftime('%Y-%m-%d %H:%M:%S')
                elapsed = str(
                    datetime.timedelta(seconds=(curr_time - start_time)))
                log = (
                    "[{}] : Elapsed [{}], Iter [{} / {}], G_loss: {:.4f}, D_loss: {:.4f}, D_loss_real: {:.4f}, D_loss_fake: {:.4f}, D(x): {:.4f}, D(G(z))_trainD: {:.4f}, D(G(z))_trainG: {:.4f}\n"
                    .format(curr_time_str, elapsed, self.step, self.total_step,
                            G_losses[-1], D_losses[-1], D_losses_real[-1],
                            D_losses_fake[-1], D_xs[-1], D_Gz_trainDs[-1],
                            D_Gz_trainGs[-1]))
                print(log)
                log_file.write(log)
                log_file.flush()
                utils.make_plots(G_losses, D_losses, D_losses_real,
                                 D_losses_fake, D_xs, D_Gz_trainDs,
                                 D_Gz_trainGs, self.log_step, self.save_path)

            # Sample images
            if self.step % self.sample_step == 0:
                self.G.eval()
                fake_images = self.G(fixed_noise, fixed_labels)
                self.G.train()
                sample_images = utils.denorm(
                    fake_images.detach()[:self.save_n_images])
                # Save batch images
                vutils.save_image(
                    sample_images,
                    os.path.join(self.sample_path,
                                 'fake_{:05d}.png'.format(self.step)))
                # Save gif
                utils.make_gif(
                    sample_images[0].cpu().numpy().transpose(1, 2, 0) * 255,
                    self.step,
                    self.sample_path,
                    self.name,
                    max_frames_per_gif=self.max_frames_per_gif)

            # Save model
            if self.step % self.model_save_step == 0:
                utils.save_ckpt(self)
Exemplo n.º 32
0
            disc_loss_fake.append(d_loss_f)
        gen_loss = np.mean(np.asarray(gen_loss))
        disc_loss_real = np.mean(np.asarray(disc_loss_real))
        disc_loss_fake = np.mean(np.asarray(disc_loss_fake))

        if (np.isnan(gen_loss) or np.isnan(disc_loss_real)
                or np.isnan(disc_loss_fake)):
            print("Something broke.")
            break

        manager.save()
        generate_and_save_images(gen, ep + 1, seed, out_path)

        print("Time for epoch:", time.time() - start)
        print("Gen loss=", gen_loss)
        print("Disc loss real=", disc_loss_real)
        print("Disc loss fake=", disc_loss_fake)


if __name__ == "__main__":
    args = parse_args()
    epochs = args.epochs
    batch_size = args.batch
    ckpt_path = args.checkpoint
    imgs_path = args.imgs
    out_path = args.output
    lr = args.lr

    train(epochs, batch_size, ckpt_path, imgs_path, lr, out_path)
    make_gif(out_path)
Exemplo n.º 33
0
        if (batch_index + 1) % 5 == 0:
            text_logger.info("Epoch [%d/%d], Step[%d/%d], d_loss: %.4f, g_loss: %.4f, \
                             g_val_loss: %.4f, time: %4.4f" \
                             % (current_epoch, num_epoch, batch_index+1, num_batch, \
                             d_loss.data[0], g_loss.data[0], g_val_loss.data[0], time.time()-start_time))

        counter += 1

        if (batch_index + 1) % 100 == 0:
            gen_out = generator(sample_input)

            save_img(
                sample_input.data.cpu(), DIR_TO_SAVE +
                'fake_gifs_sample_%s_%s_a.jpg' % (current_epoch, batch_index))
            make_gif(
                denorm(gen_out.data.cpu()[0]), DIR_TO_SAVE +
                'fake_gifs_sample__%s_%s_b.gif' % (current_epoch, batch_index))

            save_img(
                first_frame[0].data.cpu(), DIR_TO_SAVE +
                'fake_gifs_%s_%s_a.jpg' % (current_epoch, batch_index))
            make_gif(
                denorm(fake_videos.data.cpu()[0]), DIR_TO_SAVE +
                'fake_gifs_%s_%s_b.gif' % (current_epoch, batch_index))

            text_logger.info('Gifs saved at epoch: %d, batch_index: %d' %
                             (current_epoch, batch_index))

        if (batch_index + 1) % 1000 == 0:
            torch.save(generator.state_dict(), './generator.pkl')
            torch.save(discriminator.state_dict(), './discriminator.pkl')
Exemplo n.º 34
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    with tf.Session() as sess:
        if FLAGS.dataset == 'mnist':
            dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, y_dim=10,
                    dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)
        else:
            dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,
                    dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)

        if FLAGS.is_train:
            dcgan.train(FLAGS)
        else:
            dcgan.load(FLAGS.checkpoint_dir)

        to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
                                      [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
                                      [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
                                      [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
                                      [dcgan.h4_w, dcgan.h4_b, None])

        # Below is codes for visualization
        OPTION = 2
        if OPTION == 0:
          z_sample = np.random.uniform(-0.5, 0.5, size=(FLAGS.batch_size, dcgan.z_dim))
          samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
          save_images(samples, [8, 8], './samples/test_%s.png' % strftime("%Y-%m-%d %H:%M:%S", gmtime()))
        elif OPTION == 1:
          values = np.arange(0, 1, 1./FLAGS.batch_size)
          for idx in xrange(100):
            print(" [*] %d" % idx)
            z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
            for kdx, z in enumerate(z_sample):
              z[idx] = values[kdx]

            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
            save_images(samples, [8, 8], './samples/test_arange_%s.png' % (idx))
        elif OPTION == 2:
          values = np.arange(0, 1, 1./FLAGS.batch_size)
          for idx in [random.randint(0, 99) for _ in xrange(100)]:
            print(" [*] %d" % idx)
            z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
            z_sample = np.tile(z, (FLAGS.batch_size, 1))
            #z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
            for kdx, z in enumerate(z_sample):
              z[idx] = values[kdx]

            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
            make_gif(samples, './samples/test_gif_%s.gif' % (idx))
        elif OPTION == 3:
          values = np.arange(0, 1, 1./FLAGS.batch_size)
          for idx in xrange(100):
            print(" [*] %d" % idx)
            z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
            for kdx, z in enumerate(z_sample):
              z[idx] = values[kdx]

            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
            make_gif(samples, './samples/test_gif_%s.gif' % (idx))
        elif OPTION == 4:
          image_set = []
          values = np.arange(0, 1, 1./FLAGS.batch_size)

          for idx in xrange(100):
            print(" [*] %d" % idx)
            z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
            for kdx, z in enumerate(z_sample): z[idx] = values[kdx]

            image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
            make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))

          new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) for idx in range(64) + range(63, -1, -1)]
          make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8)
        elif OPTION == 5:
          image_set = []
          values = np.arange(0, 1, 1./FLAGS.batch_size)
          z_idx = [[random.randint(0,99) for _ in xrange(5)] for _ in xrange(200)]

          for idx in xrange(200):
            print(" [*] %d" % idx)
            #z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
            z = np.random.uniform(-1e-1, 1e-1, size=(dcgan.z_dim))
            z_sample = np.tile(z, (FLAGS.batch_size, 1))

            for kdx, z in enumerate(z_sample):
              for jdx in xrange(5):
                z_sample[kdx][z_idx[idx][jdx]] = values[kdx]

            image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
            make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))

          new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 20]) for idx in range(64) + range(63, -1, -1)]
          make_gif(new_image_set, './samples/test_gif_random_merged.gif', duration=4)
        elif OPTION == 6:
          image_set = []

          values = np.arange(0, 1, 1.0/FLAGS.batch_size).tolist()
          z_idx = [[random.randint(0,99) for _ in xrange(10)] for _ in xrange(100)]

          for idx in xrange(100):
            print(" [*] %d" % idx)
            z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
            z_sample = np.tile(z, (FLAGS.batch_size, 1))

            for kdx, z in enumerate(z_sample):
              for jdx in xrange(10):
                z_sample[kdx][z_idx[idx][jdx]] = values[kdx]

            image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
            save_images(image_set[-1], [8, 8], './samples/test_random_arange_%s.png' % (idx))

          new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) for idx in range(64) + range(63, -1, -1)]
          make_gif(new_image_set, './samples/test_gif_merged_random.gif', duration=4)
        elif OPTION == 7:
          for _ in xrange(50):
            z_idx = [[random.randint(0,99) for _ in xrange(10)] for _ in xrange(8)]

            zs = []
            for idx in xrange(8):
              z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
              zs.append(np.tile(z, (8, 1)))

            z_sample = np.concatenate(zs)
            values = np.arange(0, 1, 1/8.)

            for idx in xrange(FLAGS.batch_size):
              for jdx in xrange(8):
                z_sample[idx][z_idx[idx/8][jdx]] = values[idx%8]

            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
            save_images(samples, [8, 8], './samples/multiple_testt_%s.png' % strftime("%Y-%m-%d %H:%M:%S", gmtime()))
        elif OPTION == 8:
          counter = 0
          for _ in xrange(50):
            import scipy.misc
            z_idx = [[random.randint(0,99) for _ in xrange(10)] for _ in xrange(8)]

            zs = []
            for idx in xrange(8):
              z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
              zs.append(np.tile(z, (8, 1)))

            z_sample = np.concatenate(zs)
            values = np.arange(0, 1, 1/8.)

            for idx in xrange(FLAGS.batch_size):
              for jdx in xrange(8):
                z_sample[idx][z_idx[idx/8][jdx]] = values[idx%8]

            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
            for sample in samples:
              scipy.misc.imsave('./samples/turing/%s.png' % counter, sample)
              counter += 1
        else:
          import scipy.misc
          from glob import glob

          samples = []
          fnames = glob("/Users/carpedm20/Downloads/x/1/*.png")
          fnames = sorted(fnames, key = lambda x: int(x.split("_")[1]) * 10000 + int(x.split('_')[2].split(".")[0]))
          for f in fnames:
            samples.append(scipy.misc.imread(f))
          make_gif(samples, './samples/training.gif', duration=8, true_image=True)