def generate_image(frame):
     samples = session.run(x_generated, feed_dict={z: fixed_noise})
     samples = (samples * 255.99).astype(
         'uint8')  # transform linearly from [0,1] to int[0,255]
     samples = samples.reshape(picture_dim)
     save_images.save_images(samples,
                             samples_dir + 'iteration_{}.png'.format(frame))
Esempio n. 2
0
def train(input, target, netG, model):
    onehot_class_vector = torch.zeros(1, 1000)
    onehot_class_vector[0][target] = 1
    onehot_class_vector = onehot_class_vector.cuda()

    netG.train()
    model.train()
    model = model.cuda()
    netG = netG.cuda()

    print('====> Test Image Norm & Restore')
    input_interpolate = F.interpolate(input, size=(args.size, args.size), mode='bilinear', align_corners=False).cuda()
    # save_as_images(_image_restore(input_interpolate).cpu().detach(), '{0}/input_interpolate'.format(basic_path))
    save_images.save_images(_image_restore(input_interpolate).cpu().numpy(), '{0}/input'.format(basic_path))
    # save_as_images(_image_restore(_image_norm(_image_restore(input_interpolate))).cpu().detach(), '{0}/input_interpolate_restore'.format(basic_path))

    input = input.cuda()
    print('input:', input.size())

    _, feature_ini = model(input, isda=True)

    # augmentation高斯采样
    # feature_objective = torch.zeros(args.aug_num, 2048)  # (aug_num, 2048)

    print('====> Start Training')
    # z_trained = F_inverse(model, netG, input, class_vector, feature_ini, feature_objective)
    F_inverse(model, netG, input, onehot_class_vector, feature_ini)
 def generate_image(frame):
     samples = session.run(x_generated, feed_dict={
         z: fixed_noise
     }).squeeze()
     # print samples.shape
     save_images.save_images(samples.reshape((fixed_noise_size, 28, 28)),
                             samples_dir + 'iteration_{}.png'.format(frame))
Esempio n. 4
0
def train(input, target, netG, model):
    onehot_class_vector = torch.zeros(1, 1000)
    onehot_class_vector[0][target] = 1
    onehot_class_vector = onehot_class_vector.cuda()

    netG.train()
    model.train()
    model = model.cuda()
    netG = netG.cuda()

    print('====> Test Image Norm & Restore')
    input_interpolate = F.interpolate(input,
                                      size=(512, 512),
                                      mode='bilinear',
                                      align_corners=True).cuda()

    save_as_images(
        _image_restore(input_interpolate).cpu().detach(),
        '{0}/input_interpolate'.format(basic_path))
    save_as_images(
        _image_restore(_image_norm(
            _image_restore(input_interpolate))).cpu().detach(),
        '{0}/input_interpolate_restore'.format(basic_path))

    save_images.save_images(_image_restore(input_interpolate).cpu().detach().numpy(), \
                            '{0}/input_interpolate'.format(basic_path))
    save_images.save_images(_image_restore(_image_norm(_image_restore(input_interpolate))).cpu().detach().numpy(), \
                            '{0}/input_interpolate_restore'.format(basic_path))

    # inputs = input.expand(args.aug_number, input.size(0), input.size(1), input.size(2)).cuda()
    input = input.cuda()
    print('input:', input.size())
    _, feature_ini = model(input, isda=True)
    '''
    print('====> Preparing Covariance')
    var_dir = './Covariance/' + str(target) + '_cov_imagenet' + '.csv'
    print(var_dir)

    var = np.loadtxt(var_dir, delimiter=' ')
    CV = np.diag(var)
    print('CV:', CV.shape)
    '''

    # augmentation高斯采样
    feature_objective = torch.zeros(args.aug_number,
                                    2048)  # (aug_number, 2048)
    '''
    for i in range(args.aug_number):
        aug_noise = np.random.multivariate_normal([0 for j in range(var.shape[0])], args.aug_alpha * CV)
        aug_noise = torch.Tensor(aug_noise).cuda()
        feature_objective[i] = feature_ini + aug_noise   # feature_objective: \tilde{a_i}

    print('feature_objective:', feature_objective.size())
    '''

    print('====> Start Training')
    # z_trained = F_inverse(model, netG, input, class_vector, feature_ini, feature_objective)
    F_inverse(model, netG, input, onehot_class_vector, feature_ini,
              feature_objective)
    '''fake_img_aug = netG(z_trained, class_vector).mul(0.5).add(0.5)
Esempio n. 5
0
def plot_anoms(res, anom_fn, n=128):
    res = sort_by_score(res)
    res = res[-n:]
    reals = res[:, 0]
    reals = np.stack(reals, axis=0)
    print(reals)
    print(reals.shape)
    saver.save_images(reals.reshape((n, 96, 96)), anom_fn)
Esempio n. 6
0
def plot_anoms(res, imarr, anom_fn, n=128):
    idx_sorted = np.argsort(res['anomaly_scores'])
    sample = idx_sorted[-n:]
    reals = np.array([imarr['images'][s] for s in sample])
    #res = sort_by_score(res)
    reals = reals.reshape((-1,96,96,NBANDS))
    print(reals.shape)
    reals = utils.luptonize(reals).astype('int')
    #reals = np.stack(reals, axis=0)
    #print(reals)
    #print(reals.shape)
    saver.save_images(reals, anom_fn)
 def generate_image(frame):
     samples = session.run(x_generated, feed_dict={z: fixed_noise}).squeeze()
     if type(frame) == int:
         save_images.save_images(
             samples.reshape((fixed_noise_size, 28, 28)),
             samples_dir + 'iteration_{}.png'.format(frame)
         )
     elif type(frame) == str:
         save_images.save_images(
             samples.reshape((fixed_noise_size, 28, 28)),
             samples_dir + frame + '.png'
         )
Esempio n. 8
0
    def generate(self, frame, netG):
        noise = torch.randn(self.opt.batch_size, self.opt.latent_dim)
        if self.opt.use_cuda:
            noise = noise.cuda(0)
        noisev = autograd.Variable(noise, volatile=True)

        ones = torch.ones(self.opt.batch_size, self.opt.x_dim * self.opt.y_dim, 1)
        if self.opt.use_cuda:
            ones = ones.cuda()

        seed = torch.bmm(ones, noisev.unsqueeze(1))

        samples = netG(self.x, self.y, self.r, seed)

        samples = samples.view(-1, self.opt.z_dim, self.opt.x_dim, self.opt.y_dim)
        samples = samples.mul(0.5).add(0.5)
        samples = samples.cpu().data.numpy()

        save_images(samples, './tmp/cppn/samples_{}.jpg'.format(frame))
Esempio n. 9
0
    def generate(self, frame, netG):
        noise = torch.randn(self.opt.batch_size, self.opt.latent_dim)
        if self.opt.use_cuda:
            noise = noise.cuda(0)
        noisev = autograd.Variable(noise, volatile=True)

        ones = torch.ones(self.opt.batch_size, self.opt.x_dim * self.opt.y_dim,
                          1)
        if self.opt.use_cuda:
            ones = ones.cuda()

        seed = torch.bmm(ones, noisev.unsqueeze(1))

        samples = netG(self.x, self.y, self.r, seed)

        samples = samples.view(-1, self.opt.z_dim, self.opt.x_dim,
                               self.opt.y_dim)
        samples = samples.mul(0.5).add(0.5)
        samples = samples.cpu().data.numpy()

        save_images(samples, './tmp/cppn/samples_{}.jpg'.format(frame))
def train(input, target, netG, model, vgg):
    onehot_class_vector = torch.zeros(1, 1000)
    onehot_class_vector[0][target] = 1
    onehot_class_vector = onehot_class_vector.cuda()

    netG.train()
    model.train()
    vgg.train()
    model = model.cuda()
    netG = netG.cuda()
    vgg = vgg.cuda()

    print('====> Test Image Norm & Restore')
    input_interpolate = F.interpolate(input, size=(args.size, args.size), mode='bilinear', align_corners=False).cuda()
    # save_as_images(_image_restore(input_interpolate).cpu().detach(), '{0}/input_interpolate'.format(basic_path))
    save_images.save_images(_image_restore(input_interpolate).cpu().numpy(), '{0}/input'.format(basic_path))
    # save_as_images(_image_restore(_image_norm(_image_restore(input_interpolate))).cpu().detach(), '{0}/input_interpolate_restore'.format(basic_path))

    input = input.cuda()
    print('input:', input.size())

    _, feature_ini = model(input, isda=True)

    # augmentation高斯采样
    feature_objective = torch.zeros(args.aug_number, 2048)  # (aug_number, 2048)
    '''
    for i in range(args.aug_number):
        aug_noise = np.random.multivariate_normal([0 for j in range(var.shape[0])], args.aug_alpha * CV)
        aug_noise = torch.Tensor(aug_noise).cuda()
        feature_objective[i] = feature_ini + aug_noise   # feature_objective: \tilde{a_i}

    print('feature_objective:', feature_objective.size())
    '''

    print('====> Start Training')
    # z_trained = F_inverse(model, netG, input, class_vector, feature_ini, feature_objective)
    F_inverse(model, netG, vgg, input, onehot_class_vector, feature_ini, feature_objective)

    '''fake_img_aug = netG(z_trained, class_vector).mul(0.5).add(0.5)
    plot.tick()

    # Generation and reconstruction
    if iter_ % test_interval == test_interval - 1:
        generator.eval()
        discriminator.eval()
        extractor.eval()
        generate_image(iter_, fixed_noise, generator, method, outf)
        ori_images = []
        gen_images = []
        for batch_id, data in enumerate(test_loader):
            input_, label = data
            input_ = input_.cuda()
            sample, sample_mean, sample_std = extractor(input_)
            rec_x, rex_x_mean, rec_x_std = generator(sample)
            ori_images.append(((input_.cpu() + 1.0) * 255.0 /
                               2).detach().numpy().astype('int32'))
            gen_images.append(((rec_x.cpu() + 1.0) * 255.0 /
                               2).detach().numpy().astype('int32'))
        ori_images = np.array(ori_images)
        gen_images = np.array(gen_images)
        save_images.save_images(
            ori_images.reshape((-1, 3, 32, 32)),
            os.path.join(outf, '{}/{:06d}_origin.png'.format(method, iter_)))
        save_images.save_images(
            gen_images.reshape((-1, 3, 32, 32)),
            os.path.join(outf, '{}/{:06d}_gen.png'.format(method, iter_)))
        generator.train()
        discriminator.train()
        extractor.train()
Esempio n. 12
0
def down_lst_img(specie, num_min=NUM_MIN):
    site = Site(specie.url, num_min)
    site.browser.quit()
    save_images(site.lst_img_path, specie.folder)
Esempio n. 13
0
    def run(self):
        x, yx, _ = self.load_data(self.batch_size)
        xs = tf.split(x, FLAGS.num_gpus)
        yxs = tf.split(yx, FLAGS.num_gpus)

        labels = tf.placeholder(
            tf.int32, [FLAGS.num_gpus, self.batch_size // FLAGS.num_gpus])
        z = tf.placeholder(
            tf.float32,
            [FLAGS.num_gpus, self.batch_size // FLAGS.num_gpus, 100])
        d_adam = tf.train.AdamOptimizer(4e-4, beta1=0.5, beta2=0.999)
        g_adam = tf.train.AdamOptimizer(1e-4, beta1=0.5, beta2=0.999)

        d_grads = []
        g_grads = []
        for i in range(FLAGS.num_gpus):
            with tf.device('/gpu:{:d}'.format(i)):
                with tf.variable_scope('D', reuse=tf.AUTO_REUSE):
                    Dx, Dx_logits = self.discriminator(xs[i], yxs[i])
                with tf.variable_scope('G', reuse=tf.AUTO_REUSE):
                    G = self.generator(z[i], labels[i])
                with tf.variable_scope('D', reuse=tf.AUTO_REUSE):
                    Dg, Dg_logits = self.discriminator(G, labels[i])

                loss_d, loss_g = self.losses(Dx_logits, Dg_logits, Dx, Dg)

                vars = tf.trainable_variables()
                for v in vars:
                    print(v.name)
                d_params = [v for v in vars if v.name.startswith('D/')]
                g_params = [v for v in vars if v.name.startswith('G/')]

                d_grads.append(
                    d_adam.compute_gradients(loss_d, var_list=d_params))
                g_grads.append(
                    g_adam.compute_gradients(loss_g, var_list=g_params))

        d_opt = d_adam.apply_gradients(average_gradients(d_grads))
        g_opt = g_adam.apply_gradients(average_gradients(g_grads))

        d_saver = tf.train.Saver(d_params)
        g_saver = tf.train.Saver(g_params)

        start_time = time.time()
        if FLAGS.checkpoint_dir:
            sample_directory = FLAGS.checkpoint_dir
        else:
            sample_directory = 'generated_images/{}/{}'.format(
                self.name, start_time)
            if not os.path.exists(sample_directory):
                os.makedirs(sample_directory)
            shutil.copy(os.path.abspath(__file__), sample_directory)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as session:
            tf.local_variables_initializer().run()
            tf.global_variables_initializer().run()

            start_step = 1
            if FLAGS.checkpoint_dir:
                print('attempting to load checkpoint from {}'.format(
                    FLAGS.checkpoint_dir))

                d_checkpoint_dir = FLAGS.checkpoint_dir + "/disciminator_checkpoints"
                d_checkpoint = tf.train.get_checkpoint_state(d_checkpoint_dir)
                if d_checkpoint and d_checkpoint.model_checkpoint_path:
                    checkpoint_basename = os.path.basename(
                        d_checkpoint.model_checkpoint_path)
                    checkpoint_step = int(checkpoint_basename.split("-")[1])
                    print(
                        "starting training at step {}".format(checkpoint_step))
                    start_step = checkpoint_step
                    d_saver.restore(session,
                                    d_checkpoint.model_checkpoint_path)
                g_checkpoint_dir = FLAGS.checkpoint_dir + "/generator_checkpoints"
                g_checkpoint = tf.train.get_checkpoint_state(g_checkpoint_dir)
                if g_checkpoint and g_checkpoint.model_checkpoint_path:
                    g_saver.restore(session,
                                    g_checkpoint.model_checkpoint_path)
            else:
                print(
                    'no checkpoint specified, starting training from scratch')

            previous_step_time = time.time()
            d_epoch_losses = []
            g_epoch_losses = []
            for step in range(start_step, self.training_steps + 1):
                # update discriminator
                gen_labels = np.random.randint(
                    0, self.categories,
                    [FLAGS.num_gpus, self.batch_size // FLAGS.num_gpus])
                latent = 2 * np.random.rand(
                    FLAGS.num_gpus, self.batch_size // FLAGS.num_gpus, 100) - 1
                d_batch_loss, _ = session.run([loss_d, d_opt], {
                    labels: gen_labels,
                    z: latent
                })
                d_epoch_losses.append(d_batch_loss)

                # update generator
                gen_labels = np.random.randint(
                    0, self.categories,
                    [FLAGS.num_gpus, self.batch_size // FLAGS.num_gpus])
                latent = 2 * np.random.rand(
                    FLAGS.num_gpus, self.batch_size // FLAGS.num_gpus, 100) - 1
                g_batch_loss, _ = session.run([loss_g, g_opt], {
                    labels: gen_labels,
                    z: latent
                })
                g_epoch_losses.append(g_batch_loss)

                if step % 100 == 0:
                    current_step_time = time.time()
                    time_elapsed = current_step_time - previous_step_time
                    steps_per_sec = 100 / time_elapsed
                    eta_seconds = (self.training_steps - step) / \
                        (steps_per_sec + 0.0000001)
                    eta_minutes = eta_seconds / 60.0
                    print(
                        '[{:d}/{:d}] time: {:.2f}s, d_loss: {:.8f}, g_loss: {:.8f}, eta: {:.2f}m'
                        .format(step, self.training_steps, time_elapsed,
                                np.mean(d_epoch_losses),
                                np.mean(g_epoch_losses), eta_minutes))
                    d_epoch_losses = []
                    g_epoch_losses = []
                    previous_step_time = current_step_time

                if step % 1000 == 0:
                    # make an array of labels, with 10 labels each from batch_size/10 categories
                    gen_labels = np.tile(
                        np.repeat(
                            np.arange(
                                0, self.categories, self.categories /
                                (self.batch_size // FLAGS.num_gpus // 10)),
                            10), (FLAGS.num_gpus, 1))
                    print(gen_labels)
                    latent = 2 * np.random.rand(
                        FLAGS.num_gpus, self.batch_size // FLAGS.num_gpus,
                        100) - 1
                    gen_image, discriminator_confidence = session.run(
                        [G, Dg], {
                            labels: gen_labels,
                            z: latent
                        })
                    gen_image = np.transpose(gen_image, [0, 2, 3, 1])
                    save_images.save_images(
                        np.reshape(gen_image, [
                            self.batch_size // FLAGS.num_gpus, self.x, self.y,
                            3
                        ]), [self.batch_size // FLAGS.num_gpus // 10, 10],
                        sample_directory + '/{}gen.png'.format(step))

                if step % 1000 == 0 and self.output_real_images:
                    real_image, real_labels = session.run([x, yx])
                    real_image = np.transpose(real_image, [0, 2, 3, 1])
                    save_images.save_images(
                        np.reshape(real_image,
                                   [self.batch_size, self.x, self.y, 3]),
                        [self.batch_size // 10, 10],
                        sample_directory + '/{}real.png'.format(step))
                    print(real_labels)

                if step % 1000 == 0:
                    d_checkpoint_dir = sample_directory + "/disciminator_checkpoints"
                    if not os.path.exists(d_checkpoint_dir):
                        os.makedirs(d_checkpoint_dir)
                    d_saver.save(session,
                                 d_checkpoint_dir + '/discriminator.model',
                                 global_step=step)
                    g_checkpoint_dir = sample_directory + "/generator_checkpoints"
                    if not os.path.exists(g_checkpoint_dir):
                        os.makedirs(g_checkpoint_dir)
                    g_saver.save(session,
                                 g_checkpoint_dir + '/generator.model',
                                 global_step=step)

        total_time = time.time() - start_time
        print("{} steps took {} minutes".format(self.training_steps,
                                                total_time / 60))
Esempio n. 14
0
    def train(self, batch_size, training_steps, summary_steps,
              checkpoint_steps, save_steps):
        """
        参数:
        batch_size:
        training_steps:训练要经过多少迭代步
        summary_steps:每经过多少步就保存一次summary
        checkpoint_steps:每经过多少步就保存一次checkpoint文件
        save_steps:每经过多少步就保存一次图像
        """
        step_num = 0

        #读取最近保存的参数变量
        latest_checkpoint = tf.train.latest_checkpoint(self.checkpoint_dir)

        #存在已经保存的参数变量且读取成功
        if latest_checkpoint:
            #记录模型已训练的时间步
            step_num = int(os.path.basename(latest_checkpoint).split("-")
                           [1])  #checkpoint文件的命名格式为model-10000.index
            assert step_num > 0, "Please ensure checkpoint format is model-*.*."  #若时间步没有记录成功,查看命名格式是否有误

            #加载使用最新保存的checkpoint文件
            self.saver.restore(self.sess, latest_checkpoint)
            #写入日志文件
            logging.info(
                "{}:Resume training from step {}.Loaded checkpoint {}".format(
                    datetime.now(), step_num, latest_checkpoint))
        #若不存在保存好的训练文件,则初始化
        else:
            self.sess.run(tf.global_variables_initializer()
                          )  #初始化所有参数——那Unet网络中采用的初始化方法还有效吗???????
            #写入日志文件
            logging.info("{}:Init new training".format(datetime.now()))

        #调用Read_TFRecords类对象读取数据 [batch_size,512,512,1],[batch_size,324,324,1]
        tf_reader = Unet_readata.Read_TFRecords(filename=os.path.join(
            self.training_set, "image.tfrecords"),
                                                batch_size=batch_size,
                                                image_h=self.image_h,
                                                image_w=self.image_w,
                                                image_c=self.image_c)
        images, images_mask = tf_reader.read()

        #写入日志
        logging.info("{}:Done init data generators".format(datetime.now()))

        #启用线程协调器
        self.coord = tf.train.Coordinator()
        #使用tf.train.start_queue_runners之后,才会真正启动填充队列的线程。此后计算单元就可以拿到数据并进行计算
        threads = tf.train.start_queue_runners(sess=self.sess,
                                               coord=self.coord)
        #在会话中运行训练
        try:
            """
            如果当try后的语句执行时发生异常,python就跳回到try并执行第一个匹配该异常的except子句,
            异常处理完毕,控制流就通过整个try语句(除非在处理异常时又引发新的异常)。
            如果在try子句执行时没有发生异常,python将执行else语句后的语句(如果有else的话),然后控制流通过整个try语句。
            无论try语句中是否抛出异常,finally中的语句一定会被执行
            """

            #开始训练
            c_time = time.time()  #开始训练时间
            lrval = self.learning_rate  #对学习率的解释见前述注释
            #注意如果之前有读取已训练的模型。step_num已经初始化为已训练完的迭代步
            for c_step in range(step_num + 1, training_steps + 1):
                #5000个step后,学习率减半
                if c_step % 5000 == 0:
                    lrval = self.learning_rate * 0.5

                #循环读取TFRecord中的数据
                #b_time=time.time()
                batch_images, batch_images_masks = self.sess.run(
                    [images, images_mask])  #不需要用Iterator迭代器???????

                #实现反向传播优化
                #dq_time=time.time()-b_time#读取数据耗时
                #logging.info("{}:{}".format('读取数据',dq_time))
                #b_time=time.time()
                c_feed_dict = {
                    self.input_data: batch_images,
                    self.input_mask: batch_images_masks,
                    self.lr: lrval
                }
                self.sess.run(self.opt, feed_dict=c_feed_dict)
                #yh_time=time.time()-b_time#优化模型耗时
                #logging.info("{}:{}".format('运行模型',yh_time))
                #保存summary
                if c_step % summary_steps == 0:
                    #b_time=time.time()
                    c_summary = self.sess.run(self.summary,
                                              feed_dict=c_feed_dict)
                    self.writer.add_summary(c_summary, c_step)

                    e_time = time.time() - c_time  #该summary的总时长
                    time_periter = e_time / summary_steps  #平均每步迭代时长
                    #写入日志
                    logging.info("{}:Iteration_{}({:.4f}s/iter){}".format(
                        datetime.now(), c_step, time_periter,
                        self._print_summary(c_summary)))  #_print_summary函数见下文
                    #su_time=time.time()-b_time
                    #logging.info("{}:{}".format('保存summary',su_time))
                    c_time = time.time()  #重新计算起始时间

                #保存模型 checkpoint
                if c_step % checkpoint_steps == 0:
                    #b_time=time.time()
                    self.saver.save(self.sess,
                                    os.path.join(self.checkpoint_dir,
                                                 self.checkpoint_prefix),
                                    global_step=c_step)
                    #写入日志
                    logging.info("{}:Iteration_{}Saved checkpoint".format(
                        datetime.now(), c_step))
                    #ch_time=time.time()-b_time
                    #logging.info("{}:{}".format('保存checkpoint',ch_time))
                #保存图片
                if c_step % save_steps == 0:
                    #b_time=time.time()
                    #得到预测的分割mask和ground truth的mask
                    _, output_masks, input_masks = self.sess.run(
                        [self.input_data, self.output, self.input_mask],
                        feed_dict=c_feed_dict)  #此处可以把input_data去掉吗???????
                    #save_images函数见save_images.py文件,另保存位置可以更改一下
                    save_images(
                        None,
                        output_masks,
                        input_masks,
                        #self.sample_dir:train_results
                        input_path='{}/input_{:04d}.png'.format(
                            self.sample_dir, c_step),
                        image_path='{}/train_{:04d}.png'.format(
                            self.sample_dir, c_step))
                    #sa_time=time.time()-b_time
                    #logging.info("{}:{}".format('保存sample',sa_time))
        except KeyboardInterrupt:  #当错误类型为KeyboardInterrupt,即用户中断执行时
            print('Interrupted')
            self.coord.request_stop()  # 通知线程停止
        except Exception as e:  #错误类型为常规错误的基类
            self.coord.request_stop(e)  # 将异常抛给coordinator,通知线程停止
        finally:
            self.coord.request_stop()  #主线程计算完成,停止所有采集数据的进程
            self.coord.join(threads)  #等待其他线程结束
        #写入日志
        logging.info("{}: Done training".format(datetime.now()))
Esempio n. 15
0
def make_batch(arr, save_fn, n=128):
    arr = np.array(arr)
    ims = arr[:n]
    ims.reshape((-1, 96, 96, NBANDS))
    saver.save_images(ims, save_fn)
Esempio n. 16
0
    def train(self):
        self.net_mode(train=True)
        self.C_max = Variable(
            cuda(torch.FloatTensor([self.C_max]), self.use_cuda))
        out = False
        outFile = open("loss.txt", 'w')
        pbar = tqdm(total=self.max_iter)
        pbar.update(self.global_iter)

        while not out:
            mean_loss = 0
            for i in range(128):
                x = get_random_mnist_batch(self.x_train, 128)
                self.global_iter += 1
                pbar.update(1)
                x = torch.from_numpy(x).float()
                x = Variable(cuda(x, self.use_cuda))
                x_recon, mu, logvar = self.net(x)
                recon_loss = reconstruction_loss(x, x_recon, self.decoder_dist)
                total_kld, dim_wise_kld, mean_kld = kl_divergence(mu, logvar)

                if self.objective == 'H':
                    beta_vae_loss = recon_loss + self.beta * total_kld
                elif self.objective == 'B':
                    C = torch.clamp(
                        self.C_max / self.C_stop_iter * self.global_iter, 0,
                        self.C_max.data[0])
                    beta_vae_loss = recon_loss + self.gamma * (total_kld -
                                                               C).abs()

                self.optim.zero_grad()
                beta_vae_loss.backward()
                self.optim.step()
                mean_loss += beta_vae_loss.data.cpu().numpy()

                if self.viz_on and self.global_iter % self.gather_step == 0:
                    self.gather.insert(iter=self.global_iter,
                                       mu=mu.mean(0).data,
                                       var=logvar.exp().mean(0).data,
                                       recon_loss=recon_loss.data,
                                       total_kld=total_kld.data,
                                       dim_wise_kld=dim_wise_kld.data,
                                       mean_kld=mean_kld.data)

                if self.global_iter % self.display_step == 0:
                    fix_x_recon, fix_mu, fix_logvar = self.net(self.fix_x)
                    save_images.save_images(
                        np.transpose(
                            F.sigmoid(fix_x_recon).cpu().detach().numpy(),
                            axes=(0, 2, 3, 1)),
                        'reconstruction_{0}.png'.format(self.global_iter))
                    recon_loss_fix = reconstruction_loss(
                        self.fix_x, fix_x_recon, self.decoder_dist)
                    outFile.write(
                        "reconstruction loss: {0}\n".format(recon_loss_fix))
                    pbar.write(
                        '[{}] recon_loss:{:.3f} total_kld:{:.3f} mean_kld:{:.3f}'
                        .format(self.global_iter, recon_loss.data,
                                total_kld.data[0], mean_kld.data[0]))

                    var = logvar.exp().mean(0).data
                    var_str = ''
                    for j, var_j in enumerate(var):
                        var_str += 'var{}:{:.4f} '.format(j + 1, var_j)
                    pbar.write(var_str)

                    if self.objective == 'B':
                        pbar.write('C:{:.3f}'.format(C.data[0]))

                if self.global_iter % self.save_step == 0:
                    self.save_checkpoint('last')
                    pbar.write('Saved checkpoint(iter:{})'.format(
                        self.global_iter))

                if self.global_iter % 5000 == 0:
                    self.save_checkpoint(str(self.global_iter))

                if self.global_iter >= self.max_iter:
                    out = True
                    break
            mean_loss /= 128
            writer.add_scalar('mean_loss', mean_loss, self.global_iter)

        pbar.write("[Training Finished]")
        pbar.close()
        outFile.close()
    xr_train.append(c_xr)
xz_train = np.vstack(xz_train)
xr_train = np.vstack(xr_train)

batch_size = BATCH_SIZE
for i in range(0, x_test.shape[0], batch_size):
    c_xr, daata, c_xz = getZandRec(sess, x_test[i:i + batch_size])
    xz_test.append(c_xz)
    xr_test.append(c_xr)
xz_test = np.vstack(xz_test)
xr_test = np.vstack(xr_test)

xr_train_loss = np.mean(np.square(x_train - xr_train), (1, 2, 3))
xr_test_loss = np.mean(np.square(x_test - xr_test), (1, 2, 3))

save_images(x_test[test_idx_fake][:25], 'Results/Fake_orig.png').astype(np.int)
save_images(xr_test[test_idx_fake][:25],
            'Results/Fake_reconstruct.png').astype(np.int)

r_test_loss = xr_test_loss[test_idx_real]
r_fake_loss = xr_test_loss[test_idx_fake]
th_rloss, te_rloss = density_threshold(r_test_loss, r_fake_loss, step=0.0015)

ths_rloss = th_rloss[:, 1] + th_rloss[:, 2]
thv_rloss = te_rloss[np.argmin(ths_rloss)]
print('threshold value:', thv_rloss)
plt.plot(te_rloss, ths_rloss)
plt.title('threshold criterion')
plt.savefig("Results/Threshold_reconstruction.png")
plt.clf()
Esempio n. 18
0
def maybe_save_samples(step, gen_image, sample_directory):
    if step % 1000 == 0:
        save_images.save_images(np.reshape(gen_image,
                                           [BATCH_SIZE, 32, 32, 3]), [8, 8],
                                sample_directory + '/{}gen.png'.format(step))
Esempio n. 19
0
    for j in range(features.shape[0]):
        #features = features.mul(0.5).add(0.5)
        #samples = samples.cpu().data.numpy()
        feature = features.cpu().data.numpy()
        ans[j] = feature[j, :, :]
        feature_img = feature[j, :, :]
        feature_img = feature_img.reshape(1, feature_img.shape[0],
                                          feature_img.shape[1])
        #new_im=Image.fromarray(feature)

        #print(feature_img.shape)
        feature_img = np.asarray(feature_img * 255, dtype=np.uint8)

        dst_path = os.path.join(dst, i)
        make_dirs(dst_path)
        print(dst_path)
        #misc.imsave(dst_path+'/'+'layer1{}_{}.jpg'.format(i,j),feature_img)
        save_images.save_images(feature_img,
                                dst_path + '/' + '{}_{}.jpg'.format(i, j))
    save_images.save_images(ans, '{}.jpg'.format(i))

samples = samples.view(-1, 3, 32, 32)
samples = samples.mul(0.5).add(0.5)
samples = samples.cpu().data.numpy()

save_images.save_images(samples, 'samples_{}.jpg'.format(10))
#print(samples.shape)
'''print(activation['block1'].shape)
print(activation['block2'].shape)
print(activation['deconv_out'].view(-1,3,32,32))'''
Esempio n. 20
0
def JL_reconstruction(data='mnist',
                      JL_dim=32 * 32 / 2,
                      batch_size=100,
                      seed=None):
    # -------------------------------------------------------
    # get the dataset as infinite generator
    if seed is not None:
        np.random.seed(seed)

    if data == 'cifar10':
        data_dir = settings.filepath_cifar10
        train_gen, dev_gen = cifar10.load(batch_size, data_dir=data_dir)
        picture_size = 32 * 32 * 3
    elif data == 'celebA32':
        data_dir = settings.filepath_celebA32
        train_gen, dev_gen = celeba.load(batch_size,
                                         data_dir=data_dir,
                                         black_white=False)
        picture_size = 32 * 32 * 3
    elif data == 'mnist':
        filename = '../data/MNIST/mnist32_zoom_1'
        train_gen, n_samples_train, dev_gen, n_samples_test = preprocessing_mnist.load(
            filename, batch_size, npy=True)
        picture_size = 32 * 32
    elif data == 'celebA32_bw':
        data_dir = settings.filepath_celebA32
        train_gen, dev_gen = celeba.load(batch_size,
                                         data_dir=data_dir,
                                         black_white=True)
        picture_size = 32 * 32

    # -------------------------------------------------------
    # make directories
    dir1 = 'JL_reconstruction/'
    path = dir1 + data + '/'
    if not os.path.isdir(dir1):
        call(['mkdir', dir1])
    if not os.path.isdir(path):
        call(['mkdir', path])

    # -------------------------------------------------------
    # JL mapping
    A = np.random.randn(JL_dim, picture_size) / np.sqrt(picture_size)
    ATA = np.matmul(np.transpose(A), A)

    # JL error
    JL_error = np.round(np.sqrt(8 * np.log(2 * batch_size) / JL_dim),
                        decimals=4)
    print '\ndata dimension: {}'.format(picture_size)
    print 'JL dimension:   {}'.format(JL_dim)
    print 'batch size:     {}'.format(batch_size)
    print 'JL error:       {}\n'.format(JL_error)

    # -------------------------------------------------------
    # encode and decode data
    im = train_gen().next()[0]
    im1 = im / 255.99

    reconstruction = np.matmul(im1, ATA)  #/ float(picture_size)
    reconstruction = (255.99 * np.clip(reconstruction, 0, 1)).astype('uint8')

    # reconstruction = np.matmul(im, ATA)  # / float(picture_size)
    # reconstruction = (np.clip(reconstruction, 0, 255)).astype('uint8')

    save_images.save_images(im, save_path=path + 'true_images.png')
    save_images.save_images(reconstruction,
                            save_path=path + 'JL_reconstructed_image.png')

    im_d = np.zeros((100, picture_size))
    for i in range(batch_size):
        A = np.random.randn(JL_dim, picture_size) / np.sqrt(picture_size)
        ATA = np.matmul(np.transpose(A), A)
        reconstruction = np.matmul(im1[i].reshape((1, picture_size)),
                                   ATA)  # / float(picture_size)
        reconstruction = (255.99 *
                          np.clip(reconstruction, 0, 1)).astype('uint8')
        im_d[i] = reconstruction.reshape((picture_size, ))
    im_d = im_d.astype('uint8')
    save_images.save_images(im_d,
                            save_path=path +
                            'different_JL_reconstructed_image.png')
Esempio n. 21
0
def train():
    # Prepare Training Data
    (X_train, _), (X_test, _) = mnist.load_data()
    
    X_train_list = prepare_mnist_list(X_train)
    X_test_list  = prepare_mnist_list(X_test)
    
    # Initialize Models
    
    real_data = tf.placeholder(tf.float32, (None, *IMG_DIM))
    z_ph      = tf.placeholder(tf.float32, (None,  Z_DIM))
        
    fake_data      = generator_tf(z_ph, reuse = False)    
    
    
    d_on_real_data  = d_tf(real_data, reuse = False)
    d_on_fake_data  = d_tf(fake_data)
    q_on_fake_data  = q_tf(fake_data, reuse = False)
    
    
    alpha = tf.random_uniform(shape=[tf.shape(fake_data)[0], 1, 1, 1], minval=0., maxval=1.)
    interpolates      = real_data + alpha * (fake_data - real_data)
            
    
    gradients        = tf.gradients(d_tf(interpolates), [interpolates])[0]
    slopes           = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1,2,3]))
    gradient_penalty = tf.reduce_mean((slopes-1)**2)
    
    q_cost  = q_cost_tf(z_ph, q_on_fake_data)
    g_cost  = -tf.reduce_mean(d_on_fake_data)
    d_cost  =  tf.reduce_mean(d_on_fake_data) - tf.reduce_mean(d_on_real_data) + LAMBDA * gradient_penalty

    
    g_param     = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Generator')
    d_param     = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Discriminator')
    q_param     = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Q')
    
    
    g_train_op = tf.train.RMSPropOptimizer(learning_rate=LEARNING_RATE).minimize(g_cost, var_list=g_param)
    d_train_op = tf.train.RMSPropOptimizer(learning_rate=LEARNING_RATE).minimize(d_cost, var_list=d_param)
    q_train_op = tf.train.RMSPropOptimizer(learning_rate=LEARNING_RATE).minimize(q_cost, var_list=q_param + g_param)
    
    saver = tf.train.Saver(max_to_keep=20)
        
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    
    fix_z = random_z()
    
    f_train_stat = open("train_log.txt", "w", buffering = 1);
    f_test_stat  = open("test_log.txt",  "w", buffering = 1);
    os.system("mkdir -p figs");
    for it in range(ITERS):        
        start_time = time.time()
        for i in range(CRITIC_ITER):
            
            data = np.array(random.sample(X_train_list, BATCH_SIZE))
            d_cost_rez, _ = sess.run( [d_cost, d_train_op], feed_dict={real_data: data, z_ph: random_z()})
            

        g_cost_rez, q_cost_rez, _, _ = sess.run([g_cost, q_cost, g_train_op, q_train_op], feed_dict={z_ph: random_z()})
        
        f_train_stat.write("%i %g %g %g\n"%(it, g_cost_rez, d_cost_rez, q_cost_rez))
        print(it, (time.time() - start_time ))
        
        if ((it + 1) % 100 == 0):
            samples = sess.run([fake_data], feed_dict={z_ph: fix_z})
            save_images.save_images(np.squeeze(samples),'figs/samples_%.6i.png'%(it))
            
            data = np.array(random.sample(X_test_list, BATCH_SIZE))
            g_cost_rez, d_cost_rez, q_cost_rez = sess.run([g_cost, d_cost, q_cost], 
                                                          feed_dict={real_data: data, z_ph: random_z()})
            f_test_stat.write("%i %g %g %g\n"%(it, g_cost_rez, d_cost_rez, q_cost_rez))

        if ((it + 1) % 10000 == 0):
            saver.save(sess, 'save/model', global_step=it)
        
    saver.save(sess, 'save/final-model')
def generate_image(frame, fixed_noise, model, method, outf):
    x, x_mean, x_std = model(fixed_noise)
    samples = ((x.cpu() + 1.0) * 255.0 / 2.0).detach().numpy().astype('int32')
    save_images.save_images(
        samples.reshape((-1, 3, 32, 32)),
        os.path.join(outf, '{}/{:06d}_samples.png'.format(method, frame)))
Esempio n. 23
0
sns.kdeplot(rec_0_batch.flatten(), shade=True)
sns.kdeplot(rec_1_batch.flatten(), shade=True)
sns.kdeplot(rec_2_batch.flatten(), shade=True)
sns.kdeplot(rec_3_batch.flatten(), shade=True)
sns.kdeplot(rec_4_batch.flatten(), shade=True)
sns.kdeplot(rec_5_batch.flatten(), shade=True)
sns.kdeplot(rec_6_batch.flatten(), shade=True)
sns.kdeplot(rec_7_batch.flatten(), shade=True)
sns.kdeplot(rec_8_batch.flatten(), shade=True)
sns.kdeplot(rec_9_batch.flatten(), shade=True)
plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])
plt.title('Loss')
plt.savefig('Loss.png')
print('fig Loss.png saved\n')

data = np.array(random.sample(X_train_list, BATCH_SIZE))

data_z = sess.run(q_on_real_data, feed_dict={real_data: data})
z = random_z()
z[:, :ZU_DIM] = data_z
rec = sess.run(fake_data, feed_dict={z_ph: z})
border = np.ones((128, 28, 3, 1))
plt.clf()
save_images.save_images(np.concatenate((data, rec, border), axis=2),
                        'Reconstruction.png')

rec, _, _ = getZandRec_best(sess, data)

save_images.save_images(np.concatenate((data, rec, border), axis=2),
                        'Reconstruction_best.png')
def make_batch(arr, save_fn, n=128):
    ims = arr[:n]
    ims.reshape((-1, 96, 96))
    saver.save_images(ims, save_fn)
Esempio n. 25
0
def validate(testloader, model_pre, decoder, epoch, acc_epoch, loss_epoch):
    losses = counter()
    accuracy = counter()
    batch_time = counter()
    train_batches_num = len(testloader)
    end = time.time()

    # Amount = torch.zeros(class_num).cuda()

    model_pre.eval()
    decoder.eval()
    for i, data in enumerate(testloader, 0):

        # get the inputs
        inputs, labels = data

        # inputs, labels = inputs.cuda(), labels.cuda(async=True)
        features = model_pre(inputs)

        re_construct = decoder(
            features.view(inputs.size(0), feature_num, 1, 1).detach())

        if i == 0:
            save_images.save_images(
                re_construct.detach().cpu().numpy(),
                './Class_condition' + str(args.class_id) +
                '/cifar10/test_reconstruct_epoch_{}.jpg'.format(epoch))
            save_images.save_images(
                _image_restore(inputs, 'test').cpu().numpy(),
                './Class_condition' + str(args.class_id) +
                '/cifar10/test_initial_epoch_{}.jpg'.format(epoch))

        loss_re_construct = torch.sum(
            (re_construct - _image_restore(inputs, 'train')).pow(2))
        loss_re_construct /= (labels.size(0) * 224 * 224 * 3)
        losses.update(loss_re_construct.data.item(), labels.size(0))

        # optimizer.zero_grad()

        batch_time.update(time.time() - end)
        end = time.time()

        if (i + 1) % args.print_freq == 0:
            fd = open(record_file, 'a+')
            string = ('Test: [{0}][{1}/{2}]\t'
                      'Time {batch_time.value:.3f} ({batch_time.ave:.3f})\t'
                      'Loss {loss.value:.4f} ({loss.ave:.4f})\t'
                      'Prec@1 {top1.value:.3f} ({top1.ave:.3f})\t'.format(
                          epoch, (i + 1),
                          train_batches_num,
                          batch_time=batch_time,
                          loss=losses,
                          top1=accuracy))
            print(string)
            fd.write(string + '\n')
            fd.close()

    fd = open(record_file, 'a+')
    string = ('Test: [{0}][{1}/{2}]\t'
              'Time {batch_time.value:.3f} ({batch_time.ave:.3f})\t'
              'Loss {loss.value:.4f} ({loss.ave:.4f})\t'
              'Prec@1 {top1.value:.3f} ({top1.ave:.3f})\t'.format(
                  epoch,
                  train_batches_num,
                  train_batches_num,
                  batch_time=batch_time,
                  loss=losses,
                  top1=accuracy))
    print(string)
    fd.write(string + '\n')
    fd.close()

    acc_epoch.append(accuracy.ave)
    loss_epoch.append(losses.ave)
Esempio n. 26
0
start_page = 1
total_pages = 3

#scrapping the images
for page in range(start_page, total_pages + 1):
    try:
        product_details = scrap_image_url(driver=driver)
        print('scrapping page {0} of {1} pages '.format(page, total_pages))

        page_value = driver.find_element_by_xpath(
            r"//a[@class='_2Xp0TH fyt9Eu']").text
        print('The current page  scraped is {} '.format(page_value))

        #downlaoding the images
        save_images(data=product_details, dirname=dir_name, page=page)
        print('scrapping of the page {0} done'.format(page))

        #Saving the data into csv file
        # save_data_to_csv(data=product_details,filename='men_tshirt.csv')

        #Moving to the next page
        print('Moving to the next page')
        button_type = driver.find_element_by_xpath(
            "//div[@class='_2zg3yZ']//a[@class='_3fVaIS']//span"
        ).get_attribute('innerHTML')

        if button_type == 'Next':
            driver.find_element_by_xpath("//a[@class='_3fVaIS']").click()

        else:
Esempio n. 27
0
basic_path = os.path.join('./', aug_dir.split('/')[1].split('.')[0])
isExists = os.path.exists(basic_path)
if not isExists:
    os.makedirs(basic_path)
print('basic_path:', basic_path)

id = 1
for r in result:
    raw_img_id = r[0]
    raw_img_dir = os.path.join(IMAGENET_PATH, class_id, raw_img_id)
    img = Image.open(raw_img_dir)
    img_raw = trans1(img)
    img_raw = img_raw.view(1, img_raw.size(0), img_raw.size(1),
                           img_raw.size(2))
    img_224 = trans2(img)
    img_224 = img_224.view(1, 3, 224, 224)
    img_512 = torch.nn.functional.interpolate(img_224, (512, 512),
                                              mode='bilinear',
                                              align_corners=True)

    save_images.save_images(
        img_raw.cpu().numpy(),
        '{0}/{1}_{2}_raw'.format(basic_path, id,
                                 raw_img_id.split('.')[0]))
    save_images.save_images(
        img_512.cpu().numpy(),
        '{0}/{1}_{2}_512'.format(basic_path, id,
                                 raw_img_id.split('.')[0]))

    id += 1
Esempio n. 28
0
def train():
    # Prepare Training Data
    (X_train, Y_train), (X_test, Y_test) = mnist.load_data()
    
    X_train = prepare_mnist(X_train)
    X_test  = prepare_mnist(X_test)
    
    # Initialize Models
    real_data     = tf.placeholder(tf.float32, (None, *IMG_DIM))
    real_data_rot = tf.placeholder(tf.float32, (None, *IMG_DIM))
    angles_tf     = tf.placeholder(tf.float32, (None,))
    
    
    encoded_data  = encoder_tf(real_data,                      reuse = False)
    rec_data      = reconstructor_tf(encoded_data,  angles_tf, reuse = False) 
    
    real_z        = tf.placeholder(tf.float32, (None,  Z_DIM))
    rec_real_z    = reconstructor_tf(real_z,        angles_tf, reuse = True)

    # reconstruction loss ( decoder cost)
    r_cost = tf.losses.mean_squared_error(real_data_rot, rec_data)     
    
    if (is_aae):
    
    
        # for our discriminator
        # encoded_data is a fake_z
        fake_z = encoded_data
        d_on_real_data  = discriminator_tf(real_z, reuse = False)
        d_on_fake_data  = discriminator_tf(fake_z, reuse = True)
    
    
        alpha = tf.random_uniform(shape=[tf.shape(fake_z)[0], 1, 1, 1], minval=0., maxval=1.)
        interpolates      = real_z + alpha * (fake_z - real_z)
            
        
        gradients        = tf.gradients(discriminator_tf(interpolates, reuse=True), [interpolates])[0]
        slopes           = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1,2,3]))
        gradient_penalty = tf.reduce_mean((slopes-1)**2)
    
        # "generator" loss (it is also our encoder)
        e_cost  = -tf.reduce_mean(d_on_fake_data)
        
        # discriminator loss 
        d_cost  =  tf.reduce_mean(d_on_fake_data) - tf.reduce_mean(d_on_real_data) + LAMBDA * gradient_penalty
        d_param  = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,   scope='Discriminator')
        e_param  = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,   scope='Encoder')
        
        d_train_op = tf.train.RMSPropOptimizer(learning_rate=LEARNING_RATE).minimize(d_cost, var_list=d_param)
        e_train_op = tf.train.RMSPropOptimizer(learning_rate=LEARNING_RATE).minimize(e_cost, var_list=e_param)

    r_param  = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,   scope='Reconstructor')
    e_param  = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,   scope='Encoder')
    r_train_op = tf.train.RMSPropOptimizer(learning_rate=LEARNING_RATE).minimize(r_cost, var_list=e_param + r_param)
    
    saver = tf.train.Saver(max_to_keep=20)
        
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    
    fix_z = random_z()
    
    f_train_stat = open("train_log.txt", "w", buffering = 1);
    f_test_stat  = open("test_log.txt",  "w", buffering = 1);
    os.system("mkdir -p figs figs_rec");
    for it in range(ITERS):        
        start_time = time.time()
        
        
        # first reconstruction phase
        angles, Xb, Xb_rot = get_batch(X_train, Y_train)        
        r_cost_rez, _ = sess.run( [r_cost, r_train_op], feed_dict={real_data: Xb, real_data_rot: Xb_rot, angles_tf : angles})
        
        if (is_aae):
            # second regularization phase (first udate discriminator next update generator(encoder))
            for i in range(CRITIC_ITER):
                Xb,_ = get_batch_only_Xb(X_train, Y_train)
                d_cost_rez, _ = sess.run( [d_cost, d_train_op], feed_dict={real_data: Xb, real_z: random_z()})
            e_cost_rez, _ = sess.run([e_cost, e_train_op], feed_dict={real_data: Xb})
            f_train_stat.write("%i %g %g %g\n"%(it, r_cost_rez, d_cost_rez, e_cost_rez))
        else:
            f_train_stat.write("%i %g\n"%(it, r_cost_rez))
            
        print(it, (time.time() - start_time ))
        
        if ((it + 1) % 500 == 0):
            
            angles, Xb, Xb_rot = get_batch(X_train, Y_train)
            samples = sess.run([rec_data], feed_dict={real_data: Xb, real_data_rot: Xb_rot, angles_tf : angles})
            plot_pair_samples(Xb_rot, samples, 'figs_rec/samples_%.6i_seen.png'%(it))
            
            angles, Xb, Xb_rot = get_batch(X_test, Y_test)
            samples = sess.run([rec_data], feed_dict={real_data: Xb, real_data_rot: Xb_rot, angles_tf : angles})
            plot_pair_samples(Xb_rot, samples, 'figs_rec/samples_%.6i_unseen.png'%(it))
                        
            
            samples = sess.run([rec_real_z], feed_dict={real_z: fix_z, angles_tf : angles})
            save_images.save_images(np.squeeze(samples),'figs/samples_%.6i.png'%(it))
            
            
            
                                                          
            if (is_aae):
                r_cost_rez, d_cost_rez, e_cost_rez = sess.run([r_cost, d_cost, e_cost], 
                feed_dict={real_data: Xb, real_data_rot: Xb_rot, angles_tf : angles, real_z: random_z()})
            else:
                r_cost_rez = sess.run(r_cost, feed_dict={real_data: Xb, real_data_rot: Xb_rot, angles_tf : angles})                
                f_test_stat.write("%i %g\n"%(it, r_cost_rez))

        if ((it + 1) % 10000 == 0):
            saver.save(sess, 'save/model', global_step=it)
        
    saver.save(sess, 'save/final-model')
Esempio n. 29
0
if __name__ == '__main__':
    # size = 64
    # enlarge_border(size)
    # with gzip.open(data_dir+'mnist{}_border.pkl.gz'.format(size), 'rb') as f:
    #     train_data, dev_data = pickle.load(f)
    #
    # image_batch = np.reshape(train_data[:64], (-1, size, size))
    # save_images.save_images(image_batch, save_path='test_image4.png')

    # size = 128
    # fp = zoom(size, 1, npy=True)
    # # with gzip.open(fp, 'rb') as f:
    # #     train_data, dev_data = pickle.load(f)
    #
    # train_data = np.load(fp)
    #
    # image_batch = np.reshape(train_data[:64], (-1, size, size))
    # save_images.save_images(image_batch, save_path='test_image1.png')

    train, l_train, dev, l_dev = load(data_dir + 'mnist128_zoom_1',
                                      100,
                                      npy=False)
    data = train()
    im, targ = data.next()
    save_images.save_images(im, save_path='test_image2.png')
    print targ

    # import tensorflow as tf
    # o = tf.one_hot(targ, depth=10)
    # sess = tf.Session()
    # print sess.run(o)
Esempio n. 30
0
total_pages = 6

# Scraping the pages

for page in range(start_page, total_pages + 1):
    try:
        product_details = scrap_image_url(driver=driver)
        print("Scraping Page {0} of {1} pages".format(page, total_pages))

        #page_value = driver.find_element_by_xpath("//a[@class='_2Xp0TH fyt9Eu']").text
        page_value = driver.find_element_by_xpath(
            "//a[@class='_2Xp0TH fyt9Eu']").text
        print("The current page scraped is {}".format(page_value))

        # Downloading the images
        save_images(data=product_details, dirname=DIRNAME, page=page)
        print("Scraping of page {0} done".format(page))

        # Moving to the next page
        print("Moving the next page")
        button_type = driver.find_element_by_xpath(
            "//div[@class='_2zg3yZ']//a[@class='_3fVaIS']//span"
        ).get_attribute('innerHTML')

        if button_type == 'Next':
            driver.find_element_by_xpath("//a[@class='_3fVaIS']").click()
        else:
            driver.find_element_by_xpath("//a[@class='_3fVaIS'][3]").click()

        #new_page = driver.find_element_by_xpath("//a[@class='_2Xp0TH fyt9Eu']").text
        new_page = driver.find_element_by_xpath(
Esempio n. 31
0
def interpolate(state_dict,
                generator,
                preview=True,
                interpolate=False,
                large_sample=False,
                img_size=28,
                img_channel=1,
                large_dim=1024,
                samples=[
                    random.randint(0, args.bsize - 1),
                    random.randint(0, args.bsize - 1)
                ]):
    """
    Args:
        
    state_dict: saved copy of trained params
    generator: generator model
    preview: show preview of images in grid form in original size (to pick which to blow up)
    interpolate: create interpolation gif
    large_sample: create a large sample of an individual picture
    img_size: size of your input samples, e.g. 28 for MNIST
    img_channel: number of color channels, 3 for cifar
    large_dim: dimension to blow up samples to for interpolation
    samples: indices of the samples you want to interpolate
    """

    x_d = img_size
    y_d = img_size
    c_d = img_channel
    position = 2
    x, y, r = model.get_coordinates(x_d, y_d, batch_size=args.bsize)
    x_large = large_dim
    y_large = large_dim

    generator_int = generator
    generator_int.load_state_dict(
        torch.load(state_dict, map_location=lambda storage, loc: storage))

    noise = torch.randn(args.bsize, args.latdim)

    if preview:

        noise = noise.to(device)
        noisev = autograd.Variable(noise, volatile=True)

        ones = torch.ones(args.bsize, x_d * y_d, c_d)
        ones = ones.to(device)

        seed = torch.bmm(ones, noisev.unsqueeze(1))

        gen_imgs = generator_int(x, y, r, seed)

        gen_imgs = gen_imgs.cpu().data.numpy()

        save_images.save_images(gen_imgs, 'generated_img/samples.png')

    if large_sample >= 0:

        assert args.sample < args.bsize, "Sample position is out of bounds"

        noise = noise.to(device)
        noisev = autograd.Variable(noise[large_sample], volatile=True)
        ones = torch.ones(1, x_large * y_large, 1).to(device)
        seed = torch.bmm(ones, noisev.unsqueeze(0).unsqueeze(0))
        x, y, r = model.get_coordinates(x_large, y_large, batch_size=1)

        gen_imgs = generator_int(x, y, r, seed)
        gen_imgs = gen_imgs.cpu().data.numpy()

        save_images.save_images(gen_imgs, 'generated_img/large_sample.png')
    if interpolate:

        nbSteps = args.frames
        alphaValues = np.linspace(0, 1, nbSteps)
        images = []

        noise = noise.to(device)
        noisev = autograd.Variable(noise[samples[0]], volatile=True)
        ones = torch.ones(1, x_large * y_large, 1).to(device)
        seed = torch.bmm(ones, noisev.unsqueeze(0).unsqueeze(0))
        x, y, r = model.get_coordinates(x_large, y_large, batch_size=1)

        samples.append(samples[0])

        for i in range(len(samples) - 1):
            for alpha in alphaValues:
                vector = noise[samples[i]].unsqueeze(0) * (
                    1 - alpha) + noise[samples[i + 1]].unsqueeze(0) * alpha
                gen_imgs = generator_int(x, y, r, vector)

                if c_d == 3:
                    gen_img_np = np.transpose(gen_imgs.data[0].numpy())
                elif c_d == 1:
                    gen_img_np = np.transpose(gen_imgs.data.numpy()).reshape(
                        x_large, y_large, -1)

                images.append(gen_img_np)

        imageio.mimsave('generated_img/movie.gif', images)