def visualize_results(self, epoch, fix=True):
        self.G.eval()

        if not os.path.exists(self.result_dir + '/' + self.dataset + '/' + self.model_name):
            os.makedirs(self.result_dir + '/' + self.dataset + '/' + self.model_name)

        tot_num_samples = min(self.sample_num, self.batch_size)
        image_frame_dim = int(np.floor(np.sqrt(tot_num_samples)))

        if fix:
            """ fixed noise """
            samples = self.G(self.sample_z_)
        else:
            """ random noise """
            if self.gpu_mode:
                sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True)
            else:
                sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True)

            samples = self.G(sample_z_)

        if self.gpu_mode:
            samples = samples.cpu().data.numpy().transpose(0, 2, 3, 1)
        else:
            samples = samples.data.numpy().transpose(0, 2, 3, 1)

        utils.save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
                          self.result_dir + '/' + self.dataset + '/' + self.model_name + '/' + self.model_name + '_epoch%03d' % epoch + '.png')
Ejemplo n.º 2
0
def add(rs, ws, nrows, ncols):
    site = "wayfair"  # 以后将变为接口参数
    if site == "wayfair":
        headers = rs.row_values(0)
        url_idx = headers.index("product_url")
        price_idx = headers.index("price")
        img_urls_idx = headers.index("img_urls")
        sku_id_idx = headers.index("sku_id")
        for row in range(1, nrows):
            if (
                str(rs.cell(row, price_idx).value).strip() == ""
                or float(extractNum(str(rs.cell(row, price_idx).value).strip())) == -1
            ):
                info_tuple = wayfair_crawl(rs.cell(row, url_idx).value)
                if info_tuple[0] and info_tuple[1]:
                    ws.write(row, price_idx, info_tuple[0])
                    ws.write(row, img_urls_idx, info_tuple[1])
                    save_images("./output", info_tuple[1].split(","), rs.cell(row, sku_id_idx).value.strip())
                    print row
                for col in range(0, ncols):
                    if col not in [price_idx, img_urls_idx]:
                        ws.write(row, col, rs.cell(row, col).value)
            else:
                for col in range(0, ncols):
                    ws.write(row, col, rs.cell(row, col).value)
Ejemplo n.º 3
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    with tf.Session() as sess:
        if FLAGS.dataset == 'mnist':
            dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, y_dim=10,
                    dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)
        else:
            dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,
                    dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)

        if FLAGS.is_train:
            dcgan.train(FLAGS)
        else:
            dcgan.load(FLAGS.checkpoint_dir)

        to_json("./web/js/gen_layers.js", dcgan.h0_w, dcgan.h1_w, dcgan.h2_w, dcgan.h3_w, dcgan.h4_w)

        z_sample = np.random.uniform(-1, 1, size=(FLAGS.batch_size, dcgan.z_dim))

        samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
        save_images(samples, [8, 8], './samples/test_%s.png' % strftime("%Y-%m-%d %H:%M:%S", gmtime()))
    def visualize_results(self, epoch):
        self.G.eval()

        if not os.path.exists(self.result_dir + '/' + self.dataset + '/' + self.model_name):
            os.makedirs(self.result_dir + '/' + self.dataset + '/' + self.model_name)

        image_frame_dim = int(np.floor(np.sqrt(self.sample_num)))

        """ style by class """
        samples = self.G(self.sample_z_, self.sample_c_, self.sample_y_)
        if self.gpu_mode:
            samples = samples.cpu().data.numpy().transpose(0, 2, 3, 1)
        else:
            samples = samples.data.numpy().transpose(0, 2, 3, 1)

        utils.save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
                          self.result_dir + '/' + self.dataset + '/' + self.model_name + '/' + self.model_name + '_epoch%03d' % epoch + '.png')

        """ manipulating two continous codes """
        samples = self.G(self.sample_z2_, self.sample_c2_, self.sample_y2_)
        if self.gpu_mode:
            samples = samples.cpu().data.numpy().transpose(0, 2, 3, 1)
        else:
            samples = samples.data.numpy().transpose(0, 2, 3, 1)

        utils.save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
                          self.result_dir + '/' + self.dataset + '/' + self.model_name + '/' + self.model_name + '_cont_epoch%03d' % epoch + '.png')
    def visualize_results(self, epoch, fix=True):
        self.G.eval()

        if not os.path.exists(self.result_dir + '/' + self.dataset + '/' + self.model_name):
            os.makedirs(self.result_dir + '/' + self.dataset + '/' + self.model_name)

        image_frame_dim = int(np.floor(np.sqrt(self.sample_num)))

        if fix:
            """ fixed noise """
            samples = self.G(self.sample_z_, self.sample_y_)
        else:
            """ random noise """
            temp = torch.LongTensor(self.batch_size, 1).random_() % 10
            sample_y_ = torch.FloatTensor(self.batch_size, 10)
            sample_y_.zero_()
            sample_y_.scatter_(1, temp, 1)
            if self.gpu_mode:
                sample_z_, sample_y_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True), \
                                       Variable(sample_y_.cuda(), volatile=True)
            else:
                sample_z_, sample_y_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True), \
                                       Variable(sample_y_, volatile=True)

            samples = self.G(sample_z_, sample_y_)

        if self.gpu_mode:
            samples = samples.cpu().data.numpy().transpose(0, 2, 3, 1)
        else:
            samples = samples.data.numpy().transpose(0, 2, 3, 1)

        utils.save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
                          self.result_dir + '/' + self.dataset + '/' + self.model_name + '/' + self.model_name + '_epoch%03d' % epoch + '.png')
    def test_sample_image(self):
        train, valid, test = m_loader.load_digits(digits=[2, 3], n=[10, 0, 0], pre={'binary_label':True})
        train_x, train_y = train
        valid_x, valid_y = valid
        test_x, test_y = test

        train_x01 = m_loader.sample_image(train_y, shared=False)
        print train_y.eval()
        utils.save_images(train_x01, 'test_image/sampled_img.png')
def apply_cls(action_path,cls_path,out_path):
    action_frame,cls=read_all(action_path,cls_path)
    actions= action_frame['Action']
    images=[]
    for act in actions:
        images+=[( cls.get_category(img) , utils.to_2D(img) ) for img in act.images]
    def give_name(cat,i):
        return 'cat_'+str(cat)+'_'+str(i)     
    imgs=[(give_name(img[0],i),img[1]) for i,img in enumerate(images)]
    utils.save_images(out_path,imgs)
Ejemplo n.º 8
0
def get_max(in_path,out_path):
    actions=read_actions(in_path)
    named_imgs=[]
    for i,action_i in enumerate(actions):
        dim_x=action_i.get_dim(0)
        max_array=np.zeros(dim_x[0].shape)
        maxim=[np.argmax(img_i)  for img_i in dim_x]
        max_array[maxim]=i*10#1.0
        max_array=np.reshape(max_array,(60,60))
        named_imgs.append((action_i.name,max_array))
    utils.make_dir(out_path)
    utils.save_images(out_path,named_imgs)
Ejemplo n.º 9
0
 def save_projection(self,out_path):
     utils.make_dir(out_path)
     paths=['xy/','zx/','zy/']
     paths=[out_path+path for path in paths]
     [utils.make_dir(path) for path in paths]
     imgs_xy=self.get_imgs(pc.ProjectionXY())
     utils.save_images(paths[0],imgs_xy)
     imgs_xz=self.get_imgs(pc.ProjectionXZ())
     utils.save_images(paths[1],imgs_xz)
     imgs_zy=self.get_imgs(pc.ProjectionYZ())
     utils.save_images(paths[2],imgs_zy)
    def test_load_individual_digits(self):
        chosen_digits = [0, 1]
        dataset = m_loader.load_digits(shared=False, digits=chosen_digits)

        self.assertTrue(len(dataset) == 3)

        train, valid, test = dataset
        train_x, train_y = train
        valid_x, valid_y = valid
        test_x, test_y = test

        self.assertTrue(len(train_x) == len(train_y))
        self.assertTrue(len(valid_x) == len(valid_y))
        self.assertTrue(len(test_x) == len(test_y))
        self.assertTrue((np.unique(train_y) == np.array(chosen_digits)).all())
        self.assertTrue((np.unique(valid_y) == np.array(chosen_digits)).all())
        self.assertTrue((np.unique(test_y) == np.array(chosen_digits)).all())

        utils.save_images(train_x[0:100], 'test_image/zero_and_one_train.png')
        utils.save_images(valid_x[0:100], 'test_image/zero_and_one_valid.png')
        utils.save_images(test_x[0:100], 'test_image/zero_and_one_test.png')
def my_train():
    with tf.Graph().as_default():
        sess = tf.Session(config=config)
        model = FaceAging(sess=sess, lr=FLAGS.learning_rate, keep_prob=1., model_num=FLAGS.model_index, batch_size=FLAGS.batch_size,
                        age_loss_weight=FLAGS.age_loss_weight, gan_loss_weight=FLAGS.gan_loss_weight,
                        fea_loss_weight=FLAGS.fea_loss_weight, tv_loss_weight=FLAGS.tv_loss_weight)

        imgs = tf.placeholder(tf.float32, [FLAGS.batch_size, FLAGS.image_size, FLAGS.image_size, 3])
        true_label_features_128 = tf.placeholder(tf.float32, [FLAGS.batch_size, 128, 128, FLAGS.age_groups])
        true_label_features_64 = tf.placeholder(tf.float32, [FLAGS.batch_size, 64, 64, FLAGS.age_groups])
        false_label_features_64 = tf.placeholder(tf.float32, [FLAGS.batch_size, 64, 64, FLAGS.age_groups])
        age_label = tf.placeholder(tf.int32, [FLAGS.batch_size])

        source_img_227, source_img_128, face_label = load_source_batch3(FLAGS.source_file, FLAGS.root_folder, FLAGS.batch_size)

        model.train_age_lsgan_transfer(source_img_227, source_img_128, imgs, true_label_features_128,
                                       true_label_features_64, false_label_features_64, FLAGS.fea_layer_name, age_label)

        ge_samples = model.generate_images(imgs, true_label_features_128, reuse=True, mode='train')

        # Create a saver.
        model.saver = tf.train.Saver(model.save_d_vars + model.save_g_vars, max_to_keep=200)
        model.alexnet_saver = tf.train.Saver(model.alexnet_vars)
        model.age_saver = tf.train.Saver(model.age_vars)

        d_error = model.d_loss/model.gan_loss_weight
        g_error = model.g_loss/model.gan_loss_weight
        fea_error = model.fea_loss/model.fea_loss_weight
        age_error = model.age_loss/model.age_loss_weight

        # Start running operations on the Graph.
        sess.run(tf.global_variables_initializer())
        tf.train.start_queue_runners(sess)

        model.alexnet_saver.restore(sess, FLAGS.alexnet_pretrained_model)
        model.age_saver.restore(sess, FLAGS.age_pretrained_model)

        if model.load(FLAGS.checkpoint_dir, model.saver):
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed...")

        print("{} Start training...")

        # Loop over max_steps
        for step in range(FLAGS.max_steps):
            print(step)
            images, t_label_features_128, t_label_features_64, f_label_features_64, age_labels = \
                train_generator.next_target_batch_transfer2()
            print('1!!!!')
            dict = {imgs: images,
                    true_label_features_128: t_label_features_128,
                    true_label_features_64: t_label_features_64,
                    false_label_features_64: f_label_features_64,
                    age_label: age_labels
                    }
            print('2!!!!')            
            for i in range(d_iter):
                _, d_loss = sess.run([model.d_optim, d_error], feed_dict=dict)
            print('3!!!!')
            for i in range(g_iter):
                _, g_loss, fea_loss, age_loss = sess.run([model.g_optim, g_error, fea_error, age_error],
                                                         feed_dict=dict)
            print('4!!!!')
            format_str = ('%s: step %d, d_loss = %.3f, g_loss = %.3f, fea_loss=%.3f, age_loss=%.3f')
            print(format_str % (datetime.now(), step, d_loss, g_loss, fea_loss, age_loss))
            print('5!!!!')
            # Save the model checkpoint periodically.
            if step % SAVE_INTERVAL == SAVE_INTERVAL-1 or (step + 1) == FLAGS.max_steps:
                checkpoint_path = os.path.join(FLAGS.checkpoint_dir)
                model.save(checkpoint_path, step, 'acgan')

            if step % VAL_INTERVAL == VAL_INTERVAL-1:
                if not os.path.exists(FLAGS.sample_dir):
                    os.makedirs(FLAGS.sample_dir)
                path = os.path.join(FLAGS.sample_dir, str(step))
                if not os.path.exists(path):
                    os.makedirs(path)

                source = sess.run(source_img_128)
                save_source(source, [4, 8], os.path.join(path, 'source.jpg'))
                for j in range(train_generator.n_classes):
                    true_label_fea = train_generator.label_features_128[j]
                    dict = {
                            imgs: source,
                            true_label_features_128: true_label_fea
                            }
                    samples = sess.run(ge_samples, feed_dict=dict)
                    save_images(samples, [4, 8], './{}/test_{:01d}.jpg'.format(path, j))
Ejemplo n.º 12
0
    def train(self):

        d_trainer = tf.train.AdamOptimizer(self.learning_rate * self.lr_decay,
                                           beta1=self.beta1,
                                           beta2=self.beta2)
        d_gradients = d_trainer.compute_gradients(self.D_loss,
                                                  var_list=self.d_vars)
        opti_D = d_trainer.apply_gradients(d_gradients)

        m_trainer = tf.train.AdamOptimizer(self.learning_rate * self.lr_decay,
                                           beta1=self.beta1,
                                           beta2=self.beta2)
        m_gradients = m_trainer.compute_gradients(self.G_loss,
                                                  var_list=self.g_vars)
        opti_M = m_trainer.apply_gradients(m_gradients)

        init = tf.global_variables_initializer()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:

            sess.run(init)
            summary_op = tf.summary.merge_all()
            summary_writer = tf.summary.FileWriter(self.log_dir, sess.graph)
            step = 0
            step2 = 0
            lr_decay = 1

            if self.is_load:
                self.saver.restore(
                    sess,
                    os.path.join(self.model_path,
                                 'model_{:06d}.ckpt'.format(step)))

            while step <= self.max_iters:

                if step > 20000 and lr_decay > 0.1:
                    lr_decay = (self.max_iters - step) / float(self.max_iters -
                                                               10000)

                for i in range(self.n_critic):

                    train_data_list, batch_eye_pos, batch_train_ex_list, batch_ex_eye_pos = self.data_ob.getNextBatch(
                        step2, self.batch_size)
                    batch_images_array = self.data_ob.getShapeForData(
                        train_data_list)
                    batch_exem_array = self.data_ob.getShapeForData(
                        batch_train_ex_list)
                    batch_eye_pos = np.squeeze(batch_eye_pos)

                    batch_ex_eye_pos = np.squeeze(batch_ex_eye_pos)
                    f_d = {
                        self.input_img: batch_images_array,
                        self.exemplar_images: batch_exem_array,
                        self.img_mask: self.get_Mask(batch_eye_pos),
                        self.exemplar_mask: self.get_Mask(batch_ex_eye_pos),
                        self.lr_decay: lr_decay
                    }

                    # optimize D
                    sess.run(opti_D, feed_dict=f_d)
                    step2 += 1

                # optimize M
                sess.run(opti_M, feed_dict=f_d)
                summary_str = sess.run(summary_op, feed_dict=f_d)
                summary_writer.add_summary(summary_str, step)

                if step % 50 == 0:
                    d_loss, g_loss = sess.run([self.D_loss, self.G_loss],
                                              feed_dict=f_d)
                    print(("step %d d_loss = %.4f, g_loss=%.4f" %
                           (step, d_loss, g_loss)))

                if np.mod(step, 400) == 0:

                    x_tilde, incomplete_img, local_real, local_fake = sess.run(
                        [
                            self.x_tilde, self.incomplete_img,
                            self.local_real_img, self.local_fake_img
                        ],
                        feed_dict=f_d)
                    output_concat = np.concatenate([
                        batch_images_array, batch_exem_array, incomplete_img,
                        x_tilde, local_real, local_fake
                    ],
                                                   axis=0)
                    save_images(
                        output_concat, [
                            output_concat.shape[0] / self.batch_size,
                            self.batch_size
                        ],
                        '{}/{:02d}_output.jpg'.format(self.sample_path, step))
                if np.mod(step, 2000) == 0:
                    self.saver.save(
                        sess,
                        os.path.join(self.model_path,
                                     'model_{:06d}.ckpt'.format(step)))

                step += 1

            save_path = self.saver.save(sess, self.model_path)
            print("Model saved in file: %s" % save_path)
Ejemplo n.º 13
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    with tf.Session() as sess:
        if FLAGS.dataset == 'mnist':
            dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, y_dim=10,
                    dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)
        else:
            dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,
                    dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)

        if FLAGS.is_train:
            dcgan.train(FLAGS)
        else:
            dcgan.load(FLAGS.checkpoint_dir)

        to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
                                      [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
                                      [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
                                      [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
                                      [dcgan.h4_w, dcgan.h4_b, None])

        # Below is codes for visualization
        OPTION = 2
        if OPTION == 0:
          z_sample = np.random.uniform(-0.5, 0.5, size=(FLAGS.batch_size, dcgan.z_dim))
          samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
          save_images(samples, [8, 8], './samples/test_%s.png' % strftime("%Y-%m-%d %H:%M:%S", gmtime()))
        elif OPTION == 1:
          values = np.arange(0, 1, 1./FLAGS.batch_size)
          for idx in xrange(100):
            print(" [*] %d" % idx)
            z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
            for kdx, z in enumerate(z_sample):
              z[idx] = values[kdx]

            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
            save_images(samples, [8, 8], './samples/test_arange_%s.png' % (idx))
        elif OPTION == 2:
          values = np.arange(0, 1, 1./FLAGS.batch_size)
          for idx in [random.randint(0, 99) for _ in xrange(100)]:
            print(" [*] %d" % idx)
            z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
            z_sample = np.tile(z, (FLAGS.batch_size, 1))
            #z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
            for kdx, z in enumerate(z_sample):
              z[idx] = values[kdx]

            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
            make_gif(samples, './samples/test_gif_%s.gif' % (idx))
        elif OPTION == 3:
          values = np.arange(0, 1, 1./FLAGS.batch_size)
          for idx in xrange(100):
            print(" [*] %d" % idx)
            z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
            for kdx, z in enumerate(z_sample):
              z[idx] = values[kdx]

            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
            make_gif(samples, './samples/test_gif_%s.gif' % (idx))
        elif OPTION == 4:
          image_set = []
          values = np.arange(0, 1, 1./FLAGS.batch_size)

          for idx in xrange(100):
            print(" [*] %d" % idx)
            z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
            for kdx, z in enumerate(z_sample): z[idx] = values[kdx]

            image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
            make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))

          new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) for idx in range(64) + range(63, -1, -1)]
          make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8)
        elif OPTION == 5:
          image_set = []
          values = np.arange(0, 1, 1./FLAGS.batch_size)
          z_idx = [[random.randint(0,99) for _ in xrange(5)] for _ in xrange(200)]

          for idx in xrange(200):
            print(" [*] %d" % idx)
            #z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
            z = np.random.uniform(-1e-1, 1e-1, size=(dcgan.z_dim))
            z_sample = np.tile(z, (FLAGS.batch_size, 1))

            for kdx, z in enumerate(z_sample):
              for jdx in xrange(5):
                z_sample[kdx][z_idx[idx][jdx]] = values[kdx]

            image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
            make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))

          new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 20]) for idx in range(64) + range(63, -1, -1)]
          make_gif(new_image_set, './samples/test_gif_random_merged.gif', duration=4)
        elif OPTION == 6:
          image_set = []

          values = np.arange(0, 1, 1.0/FLAGS.batch_size).tolist()
          z_idx = [[random.randint(0,99) for _ in xrange(10)] for _ in xrange(100)]

          for idx in xrange(100):
            print(" [*] %d" % idx)
            z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
            z_sample = np.tile(z, (FLAGS.batch_size, 1))

            for kdx, z in enumerate(z_sample):
              for jdx in xrange(10):
                z_sample[kdx][z_idx[idx][jdx]] = values[kdx]

            image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
            save_images(image_set[-1], [8, 8], './samples/test_random_arange_%s.png' % (idx))

          new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) for idx in range(64) + range(63, -1, -1)]
          make_gif(new_image_set, './samples/test_gif_merged_random.gif', duration=4)
        elif OPTION == 7:
          for _ in xrange(50):
            z_idx = [[random.randint(0,99) for _ in xrange(10)] for _ in xrange(8)]

            zs = []
            for idx in xrange(8):
              z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
              zs.append(np.tile(z, (8, 1)))

            z_sample = np.concatenate(zs)
            values = np.arange(0, 1, 1/8.)

            for idx in xrange(FLAGS.batch_size):
              for jdx in xrange(8):
                z_sample[idx][z_idx[idx/8][jdx]] = values[idx%8]

            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
            save_images(samples, [8, 8], './samples/multiple_testt_%s.png' % strftime("%Y-%m-%d %H:%M:%S", gmtime()))
        elif OPTION == 8:
          counter = 0
          for _ in xrange(50):
            import scipy.misc
            z_idx = [[random.randint(0,99) for _ in xrange(10)] for _ in xrange(8)]

            zs = []
            for idx in xrange(8):
              z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
              zs.append(np.tile(z, (8, 1)))

            z_sample = np.concatenate(zs)
            values = np.arange(0, 1, 1/8.)

            for idx in xrange(FLAGS.batch_size):
              for jdx in xrange(8):
                z_sample[idx][z_idx[idx/8][jdx]] = values[idx%8]

            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
            for sample in samples:
              scipy.misc.imsave('./samples/turing/%s.png' % counter, sample)
              counter += 1
        else:
          import scipy.misc
          from glob import glob

          samples = []
          fnames = glob("/Users/carpedm20/Downloads/x/1/*.png")
          fnames = sorted(fnames, key = lambda x: int(x.split("_")[1]) * 10000 + int(x.split('_')[2].split(".")[0]))
          for f in fnames:
            samples.append(scipy.misc.imread(f))
          make_gif(samples, './samples/training.gif', duration=8, true_image=True)
Ejemplo n.º 14
0
def associate_data2dataDBN(cache=False):
    print "Testing Joint DBN which tries to learn even-oddness of numbers"
    # project set-up
    data_manager = store.StorageManager('associative_dbn_test', log=True)


    # Load mnist hand digits, class label is already set to binary
    train, valid, test = m_loader.load_digits(n=[500, 100, 100], digits=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
                                              pre={'binary_label': True})
    train_x, train_y = train
    test_x, test_y = test
    train_x01 = m_loader.sample_image(train_y)

    dataset01 = m_loader.load_digits(n=[500, 100, 100], digits=[0, 1])

    # Initialise RBM parameters
    # fixed base train param
    base_tr = RBM.TrainParam(learning_rate=0.01,
                             momentum_type=RBM.CLASSICAL,
                             momentum=0.5,
                             weight_decay=0.0005,
                             sparsity_constraint=False,
                             epochs=20)

    # top layer parameters
    tr = RBM.TrainParam(learning_rate=0.1,
                        find_learning_rate=True,
                        momentum_type=RBM.NESTEROV,
                        momentum=0.5,
                        weight_decay=0.001,
                        sparsity_constraint=False,
                        epochs=20)

    tr_top = RBM.TrainParam(learning_rate=0.1,
                            find_learning_rate=True,
                            momentum_type=RBM.CLASSICAL,
                            momentum=0.5,
                            weight_decay=0.001,
                            sparsity_constraint=False,
                            epochs=20)


    # Layer 1
    # Layer 2
    # Layer 3
    topology = [784, 500, 500, 100]

    config = associative_dbn.DefaultADBNConfig()
    config.topology_left = [784, 500, 500, 100]
    config.topology_right = [784, 500, 500, 100]
    config.reuse_dbn = False
    config.top_rbm_params = tr_top
    config.base_rbm_params = [base_tr, tr, tr]

    for cd_type in [RBM.CLASSICAL, RBM.PERSISTENT]:
        for n_ass in [100, 250, 500, 750, 1000]:
            config.n_association = n_ass
            config.top_cd_type = cd_type

            # Construct DBN
            assoc_dbn = associative_dbn.AssociativeDBN(config=config, data_manager=data_manager)

            # Train
            assoc_dbn.train(train_x, train_x01, cache=cache, optimise=True)

            for n_recall in [1, 3, 5, 7, 10]:
                for n_think in [0, 1, 3, 5, 7, 10]:  # 1, 3, 5, 7, 10]:
                    # Reconstruct
                    sampled = assoc_dbn.recall(test_x, n_recall, n_think)

                    # Sample from top layer to generate data
                    sample_n = 100
                    utils.save_images(sampled, image_name='reconstruced_{}_{}_{}.png'.format(n_ass, n_recall, n_think),
                                      shape=(sample_n / 10, 10))

                    dataset01[2] = (theano.shared(sampled), test_y)
    def train(self):

        opti_D = tf.train.AdamOptimizer(self.learning_rate, beta1=0.5, beta2=0.9).minimize(self.D_loss, var_list=self.d_vars)
        opti_G1 = tf.train.AdamOptimizer(self.learning_rate, beta1=0.5, beta2=0.9).minimize(self.G_1_loss, var_list=self.g_1_vars)
        opti_G2 = tf.train.AdamOptimizer(self.learning_rate, beta1=0.5, beta2=0.9).minimize(self.G_2_loss, var_list=self.g_2_vars)

        init = tf.global_variables_initializer()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:

            sess.run(init)
            summary_op = tf.summary.merge_all()
            summary_writer = tf.summary.FileWriter(self.log_dir, sess.graph)

            # self.saver.restore(sess, self.pixel_model_path)
            step = 0

            print "Starting the training"
            while step <= self.max_iters:

                # optimization D
                dom_1_train_data_list, dom_1_label_list, dom_2_train_data_list, dom_2_label_list = self.data_ob.getNextBatch(
                    step, self.batch_size)

                dom_1_batch_images_array = self.data_ob.getShapeForData(dom_1_train_data_list)

                dom_2_batch_images_array = self.data_ob.getShapeForData(dom_2_train_data_list)

                start_time = time.time()

                f_d = {self.dom_1_images: dom_1_batch_images_array, self.dom_2_images: dom_2_batch_images_array,
                       self.dom_1_label: dom_1_label_list, self.dom_2_label: dom_2_label_list}

                f_d_1 = {self.dom_1_images: dom_1_batch_images_array, self.dom_1_label: dom_1_label_list,
                         self.dom_2_label: dom_2_label_list}
                f_d_2 = {self.dom_2_images: dom_2_batch_images_array, self.dom_1_label: dom_1_label_list,
                         self.dom_2_label: dom_2_label_list}

                sess.run(opti_D, feed_dict=f_d)
                sess.run(opti_G1, feed_dict=f_d_1)
                sess.run(opti_G2, feed_dict=f_d_2)

                end_time = time.time() - start_time

                summary_str = sess.run(summary_op, feed_dict=f_d)
                summary_writer.add_summary(summary_str, step)

                if step % 50 == 0:

                    d_loss, G_1_loss, G_1_resi_regu_loss, G_1_fm_loss, G_2_loss, G_2_resi_regu_loss, G_2_fm_loss\
                        = sess.run([self.D_loss, self.G_1_loss, self.G_1_resi_regu_loss,
                                    self.G_1_feature_mapping_loss \
                                       , self.G_2_loss, self.G_2_resi_regu_loss,
                                    self.G_2_feature_mapping_loss], feed_dict=f_d)
                    print(
                    "step %d D_loss = %.7f, G_1_loss=%.7f, G_1_regu_loss=%.7f,  G_1_fm_loss=%.7f, G_2_loss=%.7f,"
                    " G_2_regu_loss=%.7f, G_2_fm_loss=%.7f , Time=%.3f" % (
                        step, d_loss, G_1_loss, G_1_resi_regu_loss, G_1_fm_loss, G_2_loss, G_2_resi_regu_loss, G_2_fm_loss, end_time))

                if np.mod(step, 200) == 0 and step != 0:

                    save_images(dom_1_batch_images_array[0:self.batch_size], [self.batch_size/8, 8],
                                '{}/{:02d}_real_dom1.png'.format(self.sample_path, step))
                    save_images(dom_2_batch_images_array[0:self.batch_size], [self.batch_size/8, 8],
                                '{}/{:02d}_real_dom2.png'.format(self.sample_path, step))

                    r1, x_tilde_1, r2, x_tilde_2 = sess.run([self.residual_img_1, self.x_tilde_1,
                                                             self.residual_img_2, self.x_tilde_2], feed_dict=f_d)

                    x_tilde_1 = np.clip(x_tilde_1, -1, 1)
                    x_tilde_2 = np.clip(x_tilde_2, -1, 1)

                    save_images(r1[0:self.batch_size], [self.batch_size/8, 8], '{}/{:02d}_r1.png'.format(self.sample_path, step))
                    save_images(x_tilde_1[0:64], [self.batch_size/8, 8], '{}/{:02d}_x_tilde1.png'.format(self.sample_path, step))

                    save_images(r2[0:self.batch_size], [self.batch_size/8, 8], '{}/{:02d}_r2.png'.format(self.sample_path, step))
                    save_images(x_tilde_2[0:self.batch_size], [self.batch_size/8, 8], '{}/{:02d}_x_tilde2.png'.format(self.sample_path, step))

                    self.saver.save(sess, self.pixel_model_path)

                step += 1

            save_path = self.saver.save(sess, self.pixel_model_path)
            print "Model saved in file: %s" % save_path
Ejemplo n.º 16
0
    def train(self, args):

        opti_D = tf.train.AdamOptimizer(args.lr, beta1=args.beta1) \
                          .minimize(self.d_loss, var_list=self.d_vars)
        opti_G = tf.train.AdamOptimizer(args.lr, beta1=args.beta1) \
                          .minimize(self.g_loss, var_list=self.g_vars)
        opti_C = tf.train.AdamOptimizer(args.lr, beta1=args.beta1) \
                          .minimize(self.d_c_loss, var_list=self.c_vars)

        init = tf.global_variables_initializer()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:
            sess.run(init)
            if self.load:
                # load pretrained model
                print('loading:')
                self.saver = tf.train.import_meta_graph(
                    './model/model.ckpt-20201.meta'
                )  # default to save all variable
                self.saver.restore(sess,
                                   tf.train.latest_checkpoint('./model/'))

            self.writer = tf.summary.FileWriter("./logs", sess.graph)
            summary_writer = tf.summary.FileWriter(self.log_dir,
                                                   graph=sess.graph)

            step = 0
            while step <= self.training_step:
                realbatch_array, real_lungs, real_mediastinums, realmasks, real_labels = self.data_ob.getNext_batch(
                    step, batch_size=self.batch_size)
                batch_z = np.random.uniform(-1,
                                            1,
                                            size=[self.batch_size, self.z_dim])
                sess.run(
                    [opti_D],
                    feed_dict={
                        self.images: realbatch_array,
                        self.lungwindow: real_lungs,
                        self.mediastinumwindow: real_mediastinums,
                        self.masks: realmasks,
                        self.z: batch_z,
                        self.y: real_labels
                    })
                sess.run(
                    [opti_G],
                    feed_dict={
                        self.images: realbatch_array,
                        self.lungwindow: real_lungs,
                        self.mediastinumwindow: real_mediastinums,
                        self.masks: realmasks,
                        self.z: batch_z,
                        self.y: real_labels
                    })
                sess.run(
                    [opti_C],
                    feed_dict={
                        self.images: realbatch_array,
                        self.lungwindow: real_lungs,
                        self.mediastinumwindow: real_mediastinums,
                        self.masks: realmasks,
                        self.z: batch_z,
                        self.y: real_labels
                    })

                if np.mod(step, 50) == 1 and step != 0:
                    print('Saving...')
                    sample_images, lungwindow, mediastinumwindow = sess.run(
                        [
                            self.fake_B, self.fake_lungwindow,
                            self.fake_mediastinumwindow
                        ],
                        feed_dict={
                            self.images: realbatch_array,
                            self.lungwindow: real_lungs,
                            self.mediastinumwindow: real_mediastinums,
                            self.masks: realmasks,
                            self.z: batch_z,
                            self.y: real_labels
                        })
                    save_images(
                        sample_images, [8, 8],
                        './{}/{:04d}_sample.png'.format(self.train_dir, step))
                    save_images(
                        lungwindow, [8, 8],
                        './{}/{:04d}_lung.png'.format(self.train_dir, step))
                    save_images(
                        mediastinumwindow, [8, 8],
                        './{}/{:04d}_mediastinum.png'.format(
                            self.train_dir, step))
                    save_images(
                        realmasks, [8, 8],
                        './{}/{:04d}_mask.png'.format(self.train_dir, step))

                    print('save eval image')

                    real_labels = sample_label()
                    realmasks = sample_masks()
                    sample_images, lungwindow, mediastinumwindow = sess.run(
                        [
                            self.fake_B, self.fake_lungwindow,
                            self.fake_mediastinumwindow
                        ],
                        feed_dict={
                            self.masks: realmasks,
                            self.y: real_labels
                        })
                    save_images(
                        sample_images, [8, 8],
                        './{}/{:04d}_sample.png'.format(self.eval_dir, step))
                    save_images(
                        lungwindow, [8, 8],
                        './{}/{:04d}_lung.png'.format(self.eval_dir, step))
                    save_images(
                        mediastinumwindow, [8, 8],
                        './{}/{:04d}_mediastinum.png'.format(
                            self.eval_dir, step))
                    save_images(
                        realmasks, [8, 8],
                        './{}/{:04d}_mask.png'.format(self.eval_dir, step))

                    print('save test image')
                    real_labels = sample_label()
                    realmasks = sample_masks_test()
                    sample_images, lungwindow, mediastinumwindow = sess.run(
                        [
                            self.fake_B, self.fake_lungwindow,
                            self.fake_mediastinumwindow
                        ],
                        feed_dict={
                            self.masks: realmasks,
                            self.y: real_labels
                        })
                    save_images(
                        sample_images, [8, 8],
                        './{}/{:04d}_sample.png'.format(self.test_dir, step))
                    save_images(
                        lungwindow, [8, 8],
                        './{}/{:04d}_lung.png'.format(self.test_dir, step))
                    save_images(
                        mediastinumwindow, [8, 8],
                        './{}/{:04d}_mediastinum.png'.format(
                            self.test_dir, step))
                    save_images(
                        realmasks, [8, 8],
                        './{}/{:04d}_mask.png'.format(self.test_dir, step))
                    # save model each 50 epochs
                    self.saver.save(sess, self.model_path, global_step=step)

                step = step + 1

            save_path = self.saver.save(sess, self.model_path)
            print("Model saved in file: %s" % save_path)
Ejemplo n.º 17
0
    def train(self, epochs):
        data_images_path = glob(
            os.path.join(self.conf.pic_dict, "*.%s" % self.conf.img_format))
        if (len(data_images_path) == 0):
            print("No Images here: %s" % self.conf.pic_dict)
            exit(1)
        data = [utils.imread(path) for path in data_images_path]

        data = [utils.transform(image) for image in data]

        #merged = tf.merge_all_summaries()
        #train_weiter = tf.train.SummaryWriter('./logs_sgan', self.sess.graph)
        #tf.summary.scalar("bob_input", self.bob_input)
        #merged_summary_op = tf.summary.merge_all()
        #summary_writer = tf.summary.FileWriter('./logs', self.sess.graph)
        self.sess.run(tf.global_variables_initializer())
        bob_results = []
        alice_results = []

        while (len(data) < self.batch_size):
            data.append(data)

        if len(data) > 4096:
            data = data[0:4096]

        lens = len(data)
        input_data = 2 * np.random.random_integers(0, 1,
                                                   size=(4096, self.N)) - 1
        startInputIndex = 0
        for i in range(epochs):
            startIndex = (i * self.batch_size) % lens
            endIndex = startIndex + self.batch_size
            if endIndex > lens:
                dataTrain = data[lens - self.batch_size:lens]
            else:
                dataTrain = data[startIndex:endIndex]
            if startInputIndex >= 4096:
                startInputIndex = startInputIndex - 4096
            input_data1 = input_data[startInputIndex:startInputIndex +
                                     self.batch_size]
            startInputIndex += self.batch_size
            #if i >=0 and i <= 30000:
            ##self.sess.run(self.alice_step_only, feed_dict = {self.data_images: data[ 0: self.batch_size]})
            #self.sess.run(self.alice_step_only, feed_dict = {self.data_images: data[ 0: self.batch_size]})
            #self.sess.run(self.alice_step, feed_dict = {self.data_images: dataTrain})
            #self.sess.run(self.alice_step, feed_dict = {self.data_images: dataTrain})
            self.sess.run(self.alice_step,
                          feed_dict={
                              self.data_images: dataTrain,
                              self.data_input: input_data1
                          })
            #if i > 30000:
            #    self.sess.run(self.bob_step, feed_dict= {self.data_images: data[0 : self.batch_size]})
            #    self.sess.run(self.eve_step, feed_dict= {self.data_images: data[0 : self.batch_size]})
            self.sess.run(self.bob_step,
                          feed_dict={
                              self.data_images: dataTrain,
                              self.data_input: input_data1
                          })
            #self.sess.run(self.eve_step, feed_dict= {self.data_images: data[0 : self.batch_size]})
            self.sess.run(self.eve_step,
                          feed_dict={
                              self.data_images: dataTrain,
                              self.data_input: input_data1
                          })
            #self.sess.run(self.alice_step, feed_dict = {self.data_images: data[ 0: self.batch_size]})
            if i % 100 == 0:
                bit_error, alice_error, eve_real, eve_fake = self.sess.run(
                    [
                        self.Bob_bit_error, self.Alice_bit_error,
                        self.Eve_real_error, self.Eve_fake_error
                    ],
                    feed_dict={
                        self.data_images: dataTrain,
                        self.data_input: input_data1
                    })
                print(
                    "step {}, bob bit error {}, alice bit error {}, Eve real {}, Eve fake {}"
                    .format(i, bit_error, alice_error, eve_real, eve_fake))
                bob_results.append(bit_error)
                alice_results.append(alice_error)
                #summary_str = self.sess.run(merged_summary_op, feed_dict = {self.data_images: data[ 0: self.batch_size]})
                #summary_writer.add_summary(summary_str, i)
            if (i > 48000) and (i % 100 == 0):
                c_output = self.sess.run(self.bob_input,
                                         feed_dict={
                                             self.data_images: dataTrain,
                                             self.data_input: input_data1
                                         })
                c_output = utils.inverse_transform(c_output)
                utils.save_images(c_output, i / 100, self.conf.save_pic_dict)
        #保存图片
        #c_output = self.sess.run(self.bob_input, feed_dict= {self.data_images: da})
        return bob_results, alice_results
def _handler_mix(ir_path,
                 vis_path,
                 model_path,
                 model_pre_path,
                 ssim_weight,
                 index,
                 output_path=None):
    mix_block = []
    ir_img = get_train_images(ir_path, flag=False)
    vis_img = get_train_images(vis_path, flag=False)
    dimension = ir_img.shape
    ir_img = ir_img.reshape([1, dimension[0], dimension[1], dimension[2]])
    vis_img = vis_img.reshape([1, dimension[0], dimension[1], dimension[2]])
    ir_img = np.transpose(ir_img, (0, 2, 1, 3))
    vis_img = np.transpose(vis_img, (0, 2, 1, 3))

    print('img shape final:', ir_img.shape)
    with tf.Graph().as_default(), tf.Session() as sess:
        infrared_field = tf.placeholder(tf.float32,
                                        shape=ir_img.shape,
                                        name='content')
        visible_field = tf.placeholder(tf.float32,
                                       shape=vis_img.shape,
                                       name='style')

        # -----------------------------------------------

        dfn = DenseFuseNet(model_pre_path)

        #sess.run(tf.global_variables_initializer())

        enc_ir, enc_ir_res_block, enc_ir_block, enc_ir_block2 = dfn.transform_encoder(
            infrared_field)
        enc_vis, enc_vis_res_block, enc_vis_block, enc_vis_block2 = dfn.transform_encoder(
            visible_field)

        result = tf.placeholder(tf.float32, shape=enc_ir.shape, name='target')

        saver = tf.train.Saver()
        saver.restore(sess, model_path)

        enc_ir_temp, enc_ir_res_block_temp, enc_ir_block_temp, enc_ir_block2_temp = sess.run(
            [enc_ir, enc_ir_res_block, enc_ir_block, enc_ir_block2],
            feed_dict={infrared_field: ir_img})
        enc_vis_temp, enc_vis_res_block_temp, enc_vis_block_temp, enc_vis_block2_temp = sess.run(
            [enc_vis, enc_vis_res_block, enc_vis_block, enc_vis_block2],
            feed_dict={visible_field: vis_img})

        block = L1_norm(enc_ir_block_temp, enc_vis_block_temp)
        block2 = L1_norm(enc_ir_block2_temp, enc_vis_block2_temp)

        first_first = L1_norm(enc_ir_res_block_temp[0],
                              enc_vis_res_block_temp[0])
        first_second = Strategy(enc_ir_res_block_temp[1],
                                enc_vis_res_block_temp[1])
        #first_third = L1_norm_attention(enc_ir_res_block_temp[2],feation_ir, enc_vis_res_block_temp[2],feation_vis)
        #first_four = L1_norm_attention(enc_ir_res_block_temp[3],feation_ir, enc_vis_res_block_temp[3],feation_vis)
        first_third = L1_norm(enc_ir_res_block_temp[2],
                              enc_vis_res_block_temp[2])
        first_four = Strategy(enc_ir_res_block_temp[3],
                              enc_vis_res_block_temp[3])
        first_first = tf.concat(
            [first_first, tf.to_int32(first_second, name='ToInt')], 3)
        first_first = tf.concat(
            [first_first, tf.to_int32(first_third, name='ToInt')], 3)
        first_first = tf.concat([first_first, first_four], 3)

        first = first_first

        second = L1_norm(enc_ir_res_block_temp[6], enc_vis_res_block_temp[6])
        third = L1_norm(enc_ir_res_block_temp[9], enc_vis_res_block_temp[9])

        feature = 1 * first + 0.1 * second + 0.1 * third

        #---------------------------------------------------------
        # block=Strategy(enc_ir_block_temp,enc_vis_block_temp)
        # block2=L1_norm(enc_ir_block2_temp,enc_vis_block2_temp)
        #---------------------------------------------------------

        feature = feature.eval()

        output_image = dfn.transform_decoder(result, block, block2)

        # output = dfn.transform_decoder(feature)
        # print(type(feature))
        # output = sess.run(output_image, feed_dict={result: feature,enc_res_block:block,enc_res_block2:block2})
        output = sess.run(output_image, feed_dict={result: feature})

        save_images(ir_path,
                    output,
                    output_path,
                    prefix='fused' + str(index),
                    suffix='_mix_' + str(ssim_weight))
Ejemplo n.º 19
0
def associate_data2dataDBN(cache=False):
    print "Testing Associative DBN which tries to learn even-oddness of numbers"
    # project set-up
    data_manager = store.StorageManager('Kanade/associative_dbn_test', log=True)


    # Load mnist hand digits, class label is already set to binary
    dataset = loader.load_kanade(n=500, emotions=['anger', 'sadness', 'happy'], pre={'scale2unit': True})
    train_x, train_y = dataset
    train_x01 = loader.sample_image(train_y)

    dataset01 = loader.load_kanade(n=500)

    # Initialise RBM parameters
    # fixed base train param
    base_tr = RBM.TrainParam(learning_rate=0.001,
                             momentum_type=RBM.CLASSICAL,
                             momentum=0.5,
                             weight_decay=0.0005,
                             sparsity_constraint=False,
                             epochs=20)

    # top layer parameters
    tr = RBM.TrainParam(learning_rate=0.001,
                        # find_learning_rate=True,
                        momentum_type=RBM.NESTEROV,
                        momentum=0.5,
                        weight_decay=0.001,
                        sparsity_constraint=False,
                        epochs=20)

    tr_top = RBM.TrainParam(learning_rate=0.001,
                            # find_learning_rate=True,
                            momentum_type=RBM.CLASSICAL,
                            momentum=0.5,
                            weight_decay=0.001,
                            sparsity_constraint=False,
                            epochs=20)


    # Layer 1
    # Layer 2
    # Layer 3
    # topology = [784, 500, 500, 100]

    config = associative_dbn.DefaultADBNConfig()
    config.topology_left = [625, 500, 500, 100]
    config.topology_right = [625, 500, 500, 100]
    config.reuse_dbn = False
    config.top_rbm_params = tr_top
    config.base_rbm_params = [base_tr, tr, tr]

    count = 0
    for cd_type in [RBM.CLASSICAL, RBM.PERSISTENT]:
        for n_ass in [100, 250, 500, 750, 1000]:
            config.n_association = n_ass
            config.top_cd_type = cd_type

            # Construct DBN
            ass_dbn = associative_dbn.AssociativeDBN(config=config, data_manager=data_manager)

            # Train
            for trainN in xrange(0, 5):
                ass_dbn.train(train_x, train_x01, cache=cache)

                for n_recall in [1, 3, 10]:
                    for n_think in [0, 1, 3, 5, 10]:  # 1, 3, 5, 7, 10]:
                        # Reconstruct
                        sampled = ass_dbn.recall(train_x, n_recall, n_think)

                        # Sample from top layer to generate data
                        sample_n = 100
                        utils.save_images(sampled,
                                          image_name='{}_reconstruced_{}_{}_{}.png'.format(count, n_ass, n_recall,
                                                                                           n_think),
                                          shape=(sample_n / 10, 10), img_shape=(25, 25))
                        count += 1
Ejemplo n.º 20
0
def main(hparams):
    # Set up some stuff according to hparams
    utils.set_up_dir(hparams.ckpt_dir)
    utils.set_up_dir(hparams.sample_dir)
    utils.print_hparams(hparams)

    # encode
    x_ph = tf.placeholder(tf.float32, [None, hparams.n_input], name='x_ph')
    z_mean, z_log_sigma_sq = model_def.encoder(hparams,
                                               x_ph,
                                               'enc',
                                               reuse=False)

    # sample
    eps = tf.random_normal((hparams.batch_size, hparams.n_z),
                           0,
                           1,
                           dtype=tf.float32)
    z_sigma = tf.sqrt(tf.exp(z_log_sigma_sq))
    z = z_mean + z_sigma * eps

    # reconstruct
    logits, x_reconstr_mean, _ = model_def.generator(hparams,
                                                     z,
                                                     'gen',
                                                     reuse=False)

    # generator sampler
    z_ph = tf.placeholder(tf.float32, [None, hparams.n_z], name='x_ph')
    _, x_sample, _ = model_def.generator(hparams, z_ph, 'gen', reuse=True)

    # define loss and update op
    total_loss = model_def.get_loss(x_ph, logits, z_mean, z_log_sigma_sq)
    opt = tf.train.AdamOptimizer(learning_rate=hparams.learning_rate)
    update_op = opt.minimize(total_loss)

    # Sanity checks
    for var in tf.global_variables():
        print(var.op.name)
    print('')

    # Get a new session
    sess = tf.Session()

    # Model checkpointing setup
    model_saver = tf.train.Saver()

    init_op = tf.global_variables_initializer()
    sess.run(init_op)

    # Attempt to restore variables from checkpoint
    start_epoch = utils.try_restore(hparams, sess, model_saver)

    # Get data iterator
    iterator = data_input.mnist_data_iteratior()

    # Training
    for epoch in range(start_epoch + 1, hparams.training_epochs):
        avg_loss = 0.0
        num_batches = hparams.num_samples // hparams.batch_size
        batch_num = 0
        for (x_batch_val, _) in iterator(hparams, num_batches):
            batch_num += 1
            feed_dict = {x_ph: x_batch_val}
            _, loss_val = sess.run([update_op, total_loss],
                                   feed_dict=feed_dict)
            avg_loss += loss_val / hparams.num_samples * hparams.batch_size

            if batch_num % 100 == 0:
                x_reconstr_mean_val = sess.run(x_reconstr_mean,
                                               feed_dict={x_ph: x_batch_val})

                z_val = np.random.randn(hparams.batch_size, hparams.n_z)
                x_sample_val = sess.run(x_sample, feed_dict={z_ph: z_val})

                utils.save_images(
                    np.reshape(x_reconstr_mean_val, [-1, 28, 28]), [10, 10],
                    '{}/reconstr_{:02d}_{:04d}.png'.format(
                        hparams.sample_dir, epoch, batch_num))
                utils.save_images(
                    np.reshape(x_batch_val, [-1, 28, 28]), [10, 10],
                    '{}/orig_{:02d}_{:04d}.png'.format(hparams.sample_dir,
                                                       epoch, batch_num))
                utils.save_images(
                    np.reshape(x_sample_val, [-1, 28, 28]), [10, 10],
                    '{}/sampled_{:02d}_{:04d}.png'.format(
                        hparams.sample_dir, epoch, batch_num))

        if epoch % hparams.summary_epoch == 0:
            print("Epoch:", '%04d' % (epoch),
                  'Avg loss = {:.9f}'.format(avg_loss))

        if epoch % hparams.ckpt_epoch == 0:
            save_path = os.path.join(hparams.ckpt_dir,
                                     'mnist_vae_model_hid' + str(hparams.n_z))
            model_saver.save(sess, save_path, global_step=epoch)

    save_path = os.path.join(hparams.ckpt_dir,
                             'mnist_vae_model' + str(hparams.n_z))
    model_saver.save(sess, save_path, global_step=hparams.training_epochs - 1)
Ejemplo n.º 21
0
    def train(self, config):
        """Train DCGAN"""
        if config.dataset == 'mnist':
            data_X, data_y = self.load_mnist()
        else:
            data = glob(os.path.join("./data", config.dataset, "*.jpg"))
        if self.config.use_kernel:
            kernel_g_optim = tf.train.MomentumOptimizer(self.lr, 0.9) \
                      .minimize(self.kernel_loss, var_list=self.g_vars, global_step=self.global_step)
            if self.config.use_gan:
                kernel_d_optim = tf.train.MomentumOptimizer(self.config.kernel_d_learning_rate * self.lr, 0.9) \
                      .minimize(self.d_loss, var_list=self.dk_vars)
            else:
                kernel_d_optim = tf.train.MomentumOptimizer(self.config.kernel_d_learning_rate * self.lr, 0.9) \
                        .minimize((-1) * self.kernel_loss, var_list=self.dk_vars)

        self.sess.run(tf.global_variables_initializer())
        TrainSummary = tf.summary.merge_all()
        self.writer = tf.summary.FileWriter(self.log_dir, self.sess.graph)

        sample_z = np.random.uniform(-1, 1, size=(self.sample_size , self.z_dim))

        if config.dataset == 'mnist':
            sample_images = data_X[0:self.sample_size]
        else:
           return
        counter = 1
        start_time = time.time()

        if self.load(self.checkpoint_dir):
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed...")

        if config.dataset == 'mnist':
            batch_idxs = len(data_X) // config.batch_size
        else:
            data = glob(os.path.join("./data", config.dataset, "*.jpg"))
            batch_idxs = min(len(data), config.train_size) // config.batch_size
        lr = self.config.learning_rate
        d_loss = 0
        for it in xrange(self.config.max_iteration):
            if np.mod(it, batch_idxs) == 0:
                perm = np.random.permutation(len(data_X))
            if np.mod(it, 10000) == 1:
                lr = lr * self.config.decay_rate
            idx = np.mod(it, batch_idxs)
            batch_images = data_X[perm[idx*config.batch_size:
                                       (idx+1)*config.batch_size]]

            batch_z = np.random.uniform(
                -1, 1, [config.batch_size, self.z_dim]).astype(np.float32)

            if self.config.use_kernel:
                _, summary_str, step, kernel_loss = self.sess.run(
                    [kernel_d_optim, TrainSummary, self.global_step,
                     self.kernel_loss],
                    feed_dict={self.lr: lr,
                               self.images: batch_images,
                               self.z: batch_z})
                _, summary_str, step, kernel_loss = self.sess.run(
                    [kernel_g_optim, TrainSummary, self.global_step,
                     self.kernel_loss],
                    feed_dict={self.lr: lr,
                               self.images: batch_images,
                               self.z: batch_z})
            counter += 1
            if np.mod(counter, 10) == 1:
                if self.config.use_gan:
                    d_loss = self.sess.run(
                        self.d_loss,
                        feed_dict={self.lr: lr,
                                   self.images: batch_images,
                                   self.z: batch_z})
                self.writer.add_summary(summary_str, step)
                print(("optmmd Epoch: [%2d] time: %4.4f, kernel_loss: %.8f, "
                       "d_loss: %.8f") %
                      (it, time.time() - start_time, kernel_loss, d_loss))
            if np.mod(counter, 500) == 1:
                self.save(self.checkpoint_dir, counter)
                samples = self.sess.run(
                    self.sampler,
                    feed_dict={self.z: sample_z, self.images: sample_images})
                print(samples.shape)
                p = os.path.join(self.sample_dir, 'train_{:02d}.png'.format(it))
                save_images(samples[:64, :, :, :], [8, 8], p)
Ejemplo n.º 22
0
    def train(self):

        opti_D = tf.train.AdamOptimizer(self.learning_rate,
                                        beta1=0.5,
                                        beta2=0.9).minimize(
                                            self.D_loss, var_list=self.d_vars)
        opti_G1 = tf.train.AdamOptimizer(self.learning_rate,
                                         beta1=0.5,
                                         beta2=0.9).minimize(
                                             self.G_1_loss,
                                             var_list=self.g_1_vars)
        opti_G2 = tf.train.AdamOptimizer(self.learning_rate,
                                         beta1=0.5,
                                         beta2=0.9).minimize(
                                             self.G_2_loss,
                                             var_list=self.g_2_vars)

        init = tf.global_variables_initializer()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:

            sess.run(init)
            summary_op = tf.summary.merge_all()
            summary_writer = tf.summary.FileWriter(self.log_dir, sess.graph)

            # self.saver.restore(sess, self.pixel_model_path)
            step = 0

            print "Starting the training"
            while step <= self.max_iters:

                # optimization D
                dom_1_train_data_list, dom_1_label_list, dom_2_train_data_list, dom_2_label_list = self.data_ob.getNextBatch(
                    step, self.batch_size)

                dom_1_batch_images_array = self.data_ob.getShapeForData(
                    dom_1_train_data_list)

                dom_2_batch_images_array = self.data_ob.getShapeForData(
                    dom_2_train_data_list)

                start_time = time.time()

                f_d = {
                    self.dom_1_images: dom_1_batch_images_array,
                    self.dom_2_images: dom_2_batch_images_array,
                    self.dom_1_label: dom_1_label_list,
                    self.dom_2_label: dom_2_label_list
                }

                f_d_1 = {
                    self.dom_1_images: dom_1_batch_images_array,
                    self.dom_1_label: dom_1_label_list,
                    self.dom_2_label: dom_2_label_list
                }
                f_d_2 = {
                    self.dom_2_images: dom_2_batch_images_array,
                    self.dom_1_label: dom_1_label_list,
                    self.dom_2_label: dom_2_label_list
                }

                sess.run(opti_D, feed_dict=f_d)
                sess.run(opti_G1, feed_dict=f_d_1)
                sess.run(opti_G2, feed_dict=f_d_2)

                end_time = time.time() - start_time

                summary_str = sess.run(summary_op, feed_dict=f_d)
                summary_writer.add_summary(summary_str, step)

                if step % 50 == 0:

                    d_loss, G_1_loss, G_1_resi_regu_loss, G_1_fm_loss, G_2_loss, G_2_resi_regu_loss, G_2_fm_loss\
                        = sess.run([self.D_loss, self.G_1_loss, self.G_1_resi_regu_loss,
                                    self.G_1_feature_mapping_loss \
                                       , self.G_2_loss, self.G_2_resi_regu_loss,
                                    self.G_2_feature_mapping_loss], feed_dict=f_d)
                    print(
                        "step %d D_loss = %.7f, G_1_loss=%.7f, G_1_regu_loss=%.7f,  G_1_fm_loss=%.7f, G_2_loss=%.7f,"
                        " G_2_regu_loss=%.7f, G_2_fm_loss=%.7f , Time=%.3f" %
                        (step, d_loss, G_1_loss, G_1_resi_regu_loss,
                         G_1_fm_loss, G_2_loss, G_2_resi_regu_loss,
                         G_2_fm_loss, end_time))

                if np.mod(step, 200) == 0 and step != 0:

                    save_images(
                        dom_1_batch_images_array[0:self.batch_size],
                        [self.batch_size / 8, 8],
                        '{}/{:02d}_real_dom1.png'.format(
                            self.sample_path, step))
                    save_images(
                        dom_2_batch_images_array[0:self.batch_size],
                        [self.batch_size / 8, 8],
                        '{}/{:02d}_real_dom2.png'.format(
                            self.sample_path, step))

                    r1, x_tilde_1, r2, x_tilde_2 = sess.run([
                        self.residual_img_1, self.x_tilde_1,
                        self.residual_img_2, self.x_tilde_2
                    ],
                                                            feed_dict=f_d)

                    x_tilde_1 = np.clip(x_tilde_1, -1, 1)
                    x_tilde_2 = np.clip(x_tilde_2, -1, 1)

                    save_images(
                        r1[0:self.batch_size], [self.batch_size / 8, 8],
                        '{}/{:02d}_r1.png'.format(self.sample_path, step))
                    save_images(
                        x_tilde_1[0:64], [self.batch_size / 8, 8],
                        '{}/{:02d}_x_tilde1.png'.format(
                            self.sample_path, step))

                    save_images(
                        r2[0:self.batch_size], [self.batch_size / 8, 8],
                        '{}/{:02d}_r2.png'.format(self.sample_path, step))
                    save_images(
                        x_tilde_2[0:self.batch_size], [self.batch_size / 8, 8],
                        '{}/{:02d}_x_tilde2.png'.format(
                            self.sample_path, step))

                    self.saver.save(sess, self.pixel_model_path)

                step += 1

            save_path = self.saver.save(sess, self.pixel_model_path)
            print "Model saved in file: %s" % save_path
Ejemplo n.º 23
0
def train_model(learning_rate=0.0009, n_epochs=50, batch_size=200):
    '''
            Function that compute the training of the model
            '''

    #######################
    # Loading the dataset #
    #######################

    print ('... Loading data')

    # Load the dataset on the CPU
    data_path = get_path()
    train_input_path = 'train_input_'
    train_target_path = 'train_target_'
    valid_input_path = 'valid_input_'
    valid_target_path = 'valid_target_'
    nb_train_batch = 9
    nb_valid_batch = 5

    # Creating symbolic variables
    batch = 200
    max_size = 25
    min_train_size = 13
    min_valid_size = 2
    input_channel = 3
    max_height = 64
    max_width = 64
    min_height = 32
    min_width = 32
    # Shape = (5000, 3, 64, 64)
    big_train_input = shared_GPU_data(shape=(batch * max_size, input_channel, max_height, max_width))
    big_valid_input = shared_GPU_data(shape=(batch * max_size, input_channel, max_height, max_width))
    # Shape = (5000, 3, 32, 32)
    big_train_target = shared_GPU_data(shape=(batch * max_size, input_channel, min_height, min_width))
    big_valid_target = shared_GPU_data(shape=(batch * max_size, input_channel, min_height, min_width))
    # Shape = (2600, 3, 64, 64)
    small_train_input = shared_GPU_data(shape=(batch * min_train_size, input_channel, max_height, max_width))
    # Shape = (2600, 3, 32, 32)
    small_train_target = shared_GPU_data(shape=(batch * min_train_size, input_channel, min_height, min_width))
    # Shape = (400, 3, 64, 64)
    small_valid_input = shared_GPU_data(shape=(batch * min_valid_size, input_channel, max_height, max_width))
    # Shape = (400, 3, 32, 32)
    small_valid_target = shared_GPU_data(shape=(batch * min_valid_size, input_channel, min_height, min_width))

    ######################
    # Building the model #
    ######################

    # Symbolic variables
    x = T.tensor4('x', dtype=theano.config.floatX)
    y = T.tensor4('y', dtype=theano.config.floatX)
    index = T.lscalar()

    # Creation of the model
    model = build_model2(input_var=x)
    output = layers.get_output(model, deterministic=True)
    params = layers.get_all_params(model, trainable=True)
    loss = T.mean(objectives.squared_error(output, y))
    updates = lasagne.updates.adam(loss, params, learning_rate=learning_rate)

    # Creation of theano functions
    train_big_model = theano.function([index], loss, updates=updates, allow_input_downcast=True,
                                      givens={x: big_train_input[index * batch_size: (index + 1) * batch_size],
                                              y: big_train_target[index * batch_size: (index + 1) * batch_size]})

    train_small_model = theano.function([index], loss, updates=updates, allow_input_downcast=True,
                                        givens={x: small_train_input[index * batch_size: (index + 1) * batch_size],
                                                y: small_train_target[index * batch_size: (index + 1) * batch_size]})

    big_valid_loss = theano.function([index], loss, allow_input_downcast=True,
                                     givens={x: big_valid_input[index * batch_size: (index + 1) * batch_size],
                                             y: big_valid_target[index * batch_size: (index + 1) * batch_size]})

    small_valid_loss = theano.function([index], loss, allow_input_downcast=True,
                                       givens={x: small_valid_input[index * batch_size: (index + 1) * batch_size],
                                               y: small_valid_target[index * batch_size: (index + 1) * batch_size]})

    idx = 50  # idx = index in this case
    pred_batch = 5
    predict_target = theano.function([index], output, allow_input_downcast=True,
                                     givens={x: small_valid_input[index * pred_batch: (index + 1) * pred_batch]})

    ###################
    # Train the model #
    ###################

    print('... Training')

    best_validation_loss = np.inf
    best_iter = 0
    epoch = 0

    # Valid images chosen when a better model is found
    batch_verification = 0
    num_images = range(idx * pred_batch, (idx + 1) * pred_batch)

    start_time = timeit.default_timer()

    while (epoch < n_epochs):
        epoch = epoch + 1
        n_train_batches = 0
        for i in range(nb_train_batch):
            if i == (nb_train_batch - 1):
                # Shape = (2600, 3, 64, 64) & Shape = (2600, 3, 32, 32)
                input, target = get_image(data_path, train_input_path, train_target_path, str(i))
                small_train_input.set_value(input)
                small_train_target.set_value(target)
                for j in range(min_train_size):
                    cost = train_small_model(j)
                    n_train_batches += 1
            else:
                # Shape = (10000, 3, 64, 64) & Shape = (10000, 3, 32, 32)
                input, target = get_image(data_path, train_input_path, train_target_path, str(i))
                big_train_input.set_value(input[0: batch * max_size])
                big_train_target.set_value(target[0: batch * max_size])
                for j in range(max_size):
                    cost = train_big_model(j)
                    n_train_batches += 1
                big_train_input.set_value(input[batch * max_size:])
                big_train_target.set_value(target[batch * max_size:])
                for j in range(max_size):
                    cost = train_big_model(j)
                    n_train_batches += 1

        validation_losses = []
        for i in range(nb_valid_batch):
            if i == (nb_valid_batch - 1):
                # Shape = (400, 3, 64, 64) & Shape = (400, 3, 32, 32)
                input, target = get_image(data_path, valid_input_path, valid_target_path, str(i))
                small_valid_input.set_value(input)
                small_valid_target.set_value(target)
                for j in range(min_valid_size):
                    validation_losses.append(small_valid_loss(j))
            else:
                # Shape = (10000, 3, 64, 64) & Shape = (10000, 3, 32, 32)
                input, target = get_image(data_path, valid_input_path, valid_target_path, str(i))
                big_valid_input.set_value(input[0: batch * max_size])
                big_valid_target.set_value(target[0: batch * max_size])
                for j in range(max_size):
                    validation_losses.append(big_valid_loss(j))
                big_valid_input.set_value(input[batch * max_size:])
                big_valid_target.set_value(target[batch * max_size:])
                for j in range(max_size):
                    validation_losses.append(big_valid_loss(j))

        this_validation_loss = np.mean(validation_losses)

        print('epoch %i, minibatch %i/%i, validation error %f %%' %
              (epoch, n_train_batches, n_train_batches, this_validation_loss * 100.))

        # if we got the best validation score until now
        if this_validation_loss < best_validation_loss:
            # save best validation score and iteration number
            best_validation_loss = this_validation_loss
            best_iter = epoch

            # save the model and a bunch of valid pictures
            print ('... saving model and valid images')

            np.savez('best_cnn_model.npz', *layers.get_all_param_values(model))
            # Shape = (10000, 3, 64, 64) & Shape = (10000, 3, 32, 32)
            input, target = get_image(data_path, valid_input_path, valid_target_path, str(batch_verification))
            small_valid_input.set_value(input[0: batch * min_valid_size])
            input = input[num_images]
            target = target[num_images]
            output = predict_target(idx)
            save_images(input=input, target=target, output=output, nbr_images=len(num_images), iteration=epoch)

    end_time = timeit.default_timer()

    print('Optimization complete.')
    print('Best validation score of %f %% obtained at epoch %i' %
          (best_validation_loss * 100., best_iter))
    print('The code ran for %.2fm' % ((end_time - start_time) / 60.))
Ejemplo n.º 24
0
Archivo: main.py Proyecto: ChicoOu/dl
    def recons_random(self, change_speed=0.3):
        baseline = [
            [
                -0.284, -0.254, 0.136, -0.224, 0.28, 0.271, 0.192, 0.328,
                -0.273, -0.189, -0.206, -0.0693, -0.317, -0.231, -0.278, 0.199
            ],
            [
                0.278, -0.289, -0.216, 0.253, -0.189, 0.131, -0.265, -0.246,
                0.3, 0.243, 0.343, -0.251, -0.156, 0.0995, 0.23, -0.253
            ],
            [
                0.263, -0.161, -0.234, -0.176, -0.187, 0.336, -0.308, 0.254,
                0.292, -0.268, -0.228, 0.222, -0.28, -0.168, 0.169, 0.253
            ],
            [
                -0.196, 0.221, -0.231, -0.146, -0.213, 0.351, 0.184, 0.353,
                0.189, -0.227, -0.213, 0.113, 0.206, -0.242, -0.267, -0.238
            ],
            [
                0.262, -0.349, -0.23, 0.254, 0.207, -0.163, -0.223, -0.156,
                0.271, 0.307, 0.295, -0.187, 0.212, -0.0903, 0.28, -0.228
            ],
            [
                -0.261,
                0.226, 0.233, -0.173, 0.181, 0.251, 0.174, 0.282,
                0.126, -0.351, -0.19, -0.263, 0.263, 0.175, -0.223, 0.278
            ],
            [
                0.297, 0.274, -0.139, -0.326, 0.312, -0.149, -0.374, 0.0468,
                0.118, -0.299, 0.306, 0.137, -0.208, -0.277, -0.1, -0.138
            ],
            [
                -0.281, 0.273, 0.198, 0.221, -0.34, -0.241, 0.188, 0.24, 0.135,
                -0.178, 0.254, -0.271, 0.195, -0.229, 0.313, 0.131
            ],
            [
                -0.202, -0.13, -0.209, -0.191, -0.251, 0.121, -0.37, -0.331,
                -0.335, -0.277, -0.168, 0.251, -0.226, 0.26, -0.126, -0.165
            ],
            [
                -0.206, -0.143, 0.193, -0.0517, -0.123, -0.0842, 0.134, -0.098,
                0.184, 0.0991, 0.0826, 0.138, 0.119, 0.211, 0.212, -0.125
            ]
        ]

        with self.sv.managed_session(config=self.config) as sess:
            for which_number in range(10):
                input_random = np.zeros(shape=[self.batch_size, 16],
                                        dtype=np.float32)
                for index in [0, 1]:
                    for row_index in range(8):
                        start_index = row_index * 8
                        input_random[start_index, :] = baseline[which_number]
                        for col_index in range(1, 8):
                            now_data = np.copy(baseline[which_number])
                            now_data[row_index + index *
                                     8] = (col_index - 4) * change_speed
                            input_random[start_index + col_index, :] = now_data
                            pass
                        pass

                    decoded = sess.run(
                        self.capsNet.decoded,
                        feed_dict={self.capsNet.recons_input: input_random})
                    save_images(
                        decoded,
                        result_file_name="recons/random_{}_{}_{}.bmp".format(
                            which_number, change_speed, index),
                        height_number=8)
                pass

        pass
Ejemplo n.º 25
0
Archivo: main.py Proyecto: ChicoOu/dl
    def recons_random_slow(self):
        baseline = [
            [
                -0.284, -0.254, 0.136, -0.224, 0.28, 0.271, 0.192, 0.328,
                -0.273, -0.189, -0.206, -0.0693, -0.317, -0.231, -0.278, 0.199
            ],
            [
                0.278, -0.289, -0.216, 0.253, -0.189, 0.131, -0.265, -0.246,
                0.3, 0.243, 0.343, -0.251, -0.156, 0.0995, 0.23, -0.253
            ],
            [
                0.263, -0.161, -0.234, -0.176, -0.187, 0.336, -0.308, 0.254,
                0.292, -0.268, -0.228, 0.222, -0.28, -0.168, 0.169, 0.253
            ],
            [
                -0.196, 0.221, -0.231, -0.146, -0.213, 0.351, 0.184, 0.353,
                0.189, -0.227, -0.213, 0.113, 0.206, -0.242, -0.267, -0.238
            ],
            [
                0.262, -0.349, -0.23, 0.254, 0.207, -0.163, -0.223, -0.156,
                0.271, 0.307, 0.295, -0.187, 0.212, -0.0903, 0.28, -0.228
            ],
            [
                -0.261,
                0.226, 0.233, -0.173, 0.181, 0.251, 0.174, 0.282,
                0.126, -0.351, -0.19, -0.263, 0.263, 0.175, -0.223, 0.278
            ],
            [
                0.297, 0.274, -0.139, -0.326, 0.312, -0.149, -0.374, 0.0468,
                0.118, -0.299, 0.306, 0.137, -0.208, -0.277, -0.1, -0.138
            ],
            [
                -0.281, 0.273, 0.198, 0.221, -0.34, -0.241, 0.188, 0.24, 0.135,
                -0.178, 0.254, -0.271, 0.195, -0.229, 0.313, 0.131
            ],
            [
                -0.202, -0.13, -0.209, -0.191, -0.251, 0.121, -0.37, -0.331,
                -0.335, -0.277, -0.168, 0.251, -0.226, 0.26, -0.126, -0.165
            ],
            [
                -0.206, -0.143, 0.193, -0.0517, -0.123, -0.0842, 0.134, -0.098,
                0.184, 0.0991, 0.0826, 0.138, 0.119, 0.211, 0.212, -0.125
            ]
        ]

        change_speed = (0.5 - -0.5) / 63
        with self.sv.managed_session(config=self.config) as sess:
            for number_index in range(10):
                decodes = []
                for attr_index in range(len(baseline[0])):
                    input_random = np.zeros(shape=[self.batch_size, 16],
                                            dtype=np.float32)
                    input_random[0, :] = baseline[number_index]
                    for col_index in range(1, self.batch_size):
                        now_data = np.copy(baseline[number_index])
                        now_data[attr_index] = (col_index - 32) * change_speed
                        input_random[col_index, :] = now_data
                        pass

                    decoded = sess.run(
                        self.capsNet.decoded,
                        feed_dict={self.capsNet.recons_input: input_random})
                    decodes.extend(decoded)
                    pass

                save_images(
                    decodes,
                    result_file_name="recons/random_{}_{:.4}.bmp".format(
                        number_index, change_speed),
                    height_number=16)
            pass

        pass
Ejemplo n.º 26
0
    def train(self, training_steps, summary_steps, checkpoint_steps,
              save_steps):
        step_num = 0  # save checkpoint format: model-num.*.
        # restore last checkpoint
        latest_checkpoint = tf.train.latest_checkpoint(
            "model_output_20180115112852/checkpoint"
        )  # self.checkpoint_dir, or "", or appointed path.

        if latest_checkpoint:
            step_num = int(os.path.basename(latest_checkpoint).split("-")[1])
            assert step_num > 0, "Please ensure checkpoint format is model-*.*."
            self.saver.restore(self.sess, latest_checkpoint)
            logging.info(
                "{}: Resume training from step {}. Loaded checkpoint {}".
                format(datetime.now(), step_num, latest_checkpoint))
        else:
            self.sess.run(
                tf.global_variables_initializer())  # init all variables
            logging.info("{}: Init new training".format(datetime.now()))

        # data
        reader = Read_TFRecords(filename=self.training_set,
                                batch_size=self.batch_size,
                                image_h=self.image_h,
                                image_w=self.image_w,
                                image_c=self.image_c)
        tfrecord_clean_images, tfrecord_noisy_images = reader.read()

        self.coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=self.sess,
                                               coord=self.coord)
        # train
        try:
            c_time = time.time()
            for c_step in xrange(step_num + 1, training_steps + 1):
                # Generate clean images and noisy images.
                batch_clean_images, batch_noisy_images = self.sess.run(
                    [tfrecord_clean_images, tfrecord_noisy_images])
                # print("clean image shape: {}".format(batch_clean_images.shape))
                # print("noisy image shape: {}".format(batch_noisy_images.shape))

                c_feed_dict = {
                    self.clean_images: batch_clean_images,
                    self.noisy_images: batch_noisy_images
                }
                self.ops = [self.memnet_opt]
                self.sess.run(self.ops, feed_dict=c_feed_dict)

                # save summary
                if c_step % summary_steps == 0:
                    c_summary = self.sess.run(self.summary,
                                              feed_dict=c_feed_dict)
                    self.writer.add_summary(c_summary, c_step)

                    e_time = time.time() - c_time
                    time_periter = e_time / summary_steps
                    logging.info("{}: Iteration_{} ({:.4f}s/iter) {}".format(
                        datetime.now(), c_step, time_periter,
                        self._print_summary(c_summary)))
                    c_time = time.time()  # update time

                # save checkpoint
                if c_step % checkpoint_steps == 0:
                    self.saver.save(self.sess,
                                    os.path.join(self.checkpoint_dir,
                                                 self.checkpoint_prefix),
                                    global_step=c_step)
                    logging.info("{}: Iteration_{} Saved checkpoint".format(
                        datetime.now(), c_step))

                # save images
                if c_step % save_steps == 0:
                    compress_images, recovery_images, real_images = self.sess.run(
                        [
                            self.noisy_images, self.pre_images,
                            self.clean_images
                        ],
                        feed_dict=c_feed_dict)
                    # numpy ndarray.
                    save_images(
                        compress_images, recovery_images, real_images,
                        './{}/train_{:06d}.png'.format(self.sample_dir,
                                                       c_step))

        except KeyboardInterrupt:
            print('Interrupted')
            self.coord.request_stop()
        except Exception as e:
            self.coord.request_stop(e)
        finally:
            # When done, ask the threads to stop.
            self.coord.request_stop()
            self.coord.join(threads)

        logging.info("{}: Done training".format(datetime.now()))
def _handler_rgb_l1(ir_path,
                    vis_path,
                    model_path,
                    model_pre_path,
                    ssim_weight,
                    index,
                    output_path=None):
    # ir_img = get_train_images(ir_path, flag=False)
    # vis_img = get_train_images(vis_path, flag=False)
    ir_img = get_train_images_rgb(ir_path, flag=False)
    vis_img = get_train_images_rgb(vis_path, flag=False)
    dimension = ir_img.shape

    ir_img = ir_img.reshape([1, dimension[0], dimension[1], dimension[2]])
    vis_img = vis_img.reshape([1, dimension[0], dimension[1], dimension[2]])

    #ir_img = np.transpose(ir_img, (0, 2, 1, 3))
    #vis_img = np.transpose(vis_img, (0, 2, 1, 3))

    ir_img1 = ir_img[:, :, :, 0]
    ir_img1 = ir_img1.reshape([1, dimension[0], dimension[1], 1])
    ir_img2 = ir_img[:, :, :, 1]
    ir_img2 = ir_img2.reshape([1, dimension[0], dimension[1], 1])
    ir_img3 = ir_img[:, :, :, 2]
    ir_img3 = ir_img3.reshape([1, dimension[0], dimension[1], 1])

    vis_img1 = vis_img[:, :, :, 0]
    vis_img1 = vis_img1.reshape([1, dimension[0], dimension[1], 1])
    vis_img2 = vis_img[:, :, :, 1]
    vis_img2 = vis_img2.reshape([1, dimension[0], dimension[1], 1])
    vis_img3 = vis_img[:, :, :, 2]
    vis_img3 = vis_img3.reshape([1, dimension[0], dimension[1], 1])

    print('img shape final:', ir_img1.shape)

    with tf.Graph().as_default(), tf.Session() as sess:
        infrared_field = tf.placeholder(tf.float32,
                                        shape=ir_img1.shape,
                                        name='content')
        visible_field = tf.placeholder(tf.float32,
                                       shape=ir_img1.shape,
                                       name='style')

        dfn = DenseFuseNet(model_pre_path)

        enc_ir, enc_ir_res_block = dfn.transform_encoder(infrared_field)
        enc_vis, enc_vis_res_block = dfn.transform_encoder(visible_field)

        target = tf.placeholder(tf.float32, shape=enc_ir.shape, name='target')

        output_image = dfn.transform_decoder(target)

        # restore the trained model and run the style transferring
        saver = tf.train.Saver()
        saver.restore(sess, model_path)

        enc_ir_temp, enc_vis_temp = sess.run([enc_ir, enc_vis],
                                             feed_dict={
                                                 infrared_field: ir_img1,
                                                 visible_field: vis_img1
                                             })
        feature = L1_norm(enc_ir_temp, enc_vis_temp)
        output1 = sess.run(output_image, feed_dict={target: feature})

        enc_ir_temp, enc_vis_temp = sess.run([enc_ir, enc_vis],
                                             feed_dict={
                                                 infrared_field: ir_img2,
                                                 visible_field: vis_img2
                                             })
        feature = L1_norm(enc_ir_temp, enc_vis_temp)
        output2 = sess.run(output_image, feed_dict={target: feature})

        enc_ir_temp, enc_vis_temp = sess.run([enc_ir, enc_vis],
                                             feed_dict={
                                                 infrared_field: ir_img3,
                                                 visible_field: vis_img3
                                             })
        feature = L1_norm(enc_ir_temp, enc_vis_temp)
        output3 = sess.run(output_image, feed_dict={target: feature})

        output1 = output1.reshape([1, dimension[0], dimension[1]])
        output2 = output2.reshape([1, dimension[0], dimension[1]])
        output3 = output3.reshape([1, dimension[0], dimension[1]])

        output = np.stack((output1, output2, output3), axis=-1)
        #output = np.transpose(output, (0, 2, 1, 3))
        save_images(ir_path,
                    output,
                    output_path,
                    prefix='fused' + str(index),
                    suffix='_densefuse_l1norm_' + str(ssim_weight))
Ejemplo n.º 28
0
	def __init__(self, args):
		# parameters
		self.epoch = args.epoch
		self.sample_num = 19
		self.batch_size = args.batch_size
		self.save_dir = args.save_dir
		self.result_dir = args.result_dir
		self.dataset = args.dataset
		self.dataroot_dir = args.dataroot_dir
		self.log_dir = args.log_dir
		self.gpu_mode = args.gpu_mode
		self.num_workers = args.num_workers
		self.model_name = args.gan_type
		self.loss_option = args.loss_option
		if len(args.loss_option) > 0:
			self.model_name = self.model_name + '_' + args.loss_option
			self.loss_option = args.loss_option.split(',')
		if len(args.comment) > 0:
			self.model_name = self.model_name + '_' + args.comment
		self.lambda_ = 0.25

		if self.dataset == 'MultiPie' or self.dataset == 'miniPie':
			self.Nd = 337 # 200
			self.Np = 9
			self.Ni = 20
			self.Nz = 50
		elif self.dataset == 'Bosphorus':
			self.Nz = 50
		elif self.dataset == 'CASIA-WebFace':
			self.Nd = 10885 
			self.Np = 13
			self.Ni = 20
			self.Nz = 50

		if not os.path.exists(self.result_dir + '/' + self.dataset + '/' + self.model_name):
			os.makedirs(self.result_dir + '/' + self.dataset + '/' + self.model_name)
		if not os.path.exists(os.path.join(self.save_dir, self.dataset, self.model_name)):
			os.makedirs(os.path.join(self.save_dir, self.dataset, self.model_name))

		# load dataset
		data_dir = os.path.join( self.dataroot_dir, self.dataset )
		if self.dataset == 'mnist':
			self.data_loader = DataLoader(datasets.MNIST(data_dir, train=True, download=True,
																		  transform=transforms.Compose(
																			  [transforms.ToTensor()])),
														   batch_size=self.batch_size, shuffle=True)
		elif self.dataset == 'fashion-mnist':
			self.data_loader = DataLoader(
				datasets.FashionMNIST(data_dir, train=True, download=True, transform=transforms.Compose(
					[transforms.ToTensor()])),
				batch_size=self.batch_size, shuffle=True)
		elif self.dataset == 'celebA':
			self.data_loader = utils.CustomDataLoader(data_dir, transform=transforms.Compose(
				[transforms.CenterCrop(160), transforms.Scale(64), transforms.ToTensor()]), batch_size=self.batch_size,
												 shuffle=True)
		elif self.dataset == 'MultiPie' or self.dataset == 'miniPie':
			self.data_loader = DataLoader( utils.MultiPie(data_dir,
					transform=transforms.Compose(
					[transforms.Scale(100), transforms.RandomCrop(96), transforms.ToTensor()])),
				batch_size=self.batch_size, shuffle=True) 
		elif self.dataset == 'CASIA-WebFace':
			self.data_loader = utils.CustomDataLoader(data_dir, transform=transforms.Compose(
				[transforms.Scale(100), transforms.RandomCrop(96), transforms.ToTensor()]), batch_size=self.batch_size,
												 shuffle=True)
		elif self.dataset == 'Bosphorus':
			self.data_loader = DataLoader( utils.Bosphorus(data_dir, use_image=True, skipCodes=['YR','PR','CR'],
											transform=transforms.ToTensor(),
											shape=128, image_shape=256),
											batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers)
			self.Nid = 105
			self.Npcode = len(self.data_loader.dataset.posecodemap)

		# fixed samples for reconstruction visualization
		print( 'Generating fixed sample for visualization...' )
		nPcodes = self.Npcode//4
		nSamples = self.sample_num-nPcodes
		sample_x2D_s = []
		sample_x3D_s = []
		for iB, (sample_x3D_,sample_y_,sample_x2D_) in enumerate(self.data_loader):
			sample_x2D_s.append( sample_x2D_ )
			sample_x3D_s.append( sample_x3D_ )
			if iB > nSamples // self.batch_size:
				break
		sample_x2D_s = torch.cat( sample_x2D_s )[:nSamples,:,:,:]
		sample_x3D_s = torch.cat( sample_x3D_s )[:nSamples,:,:,:]
		sample_x2D_s = torch.split( sample_x2D_s, 1 )
		sample_x3D_s = torch.split( sample_x3D_s, 1 )
		sample_x2D_s += (sample_x2D_s[0],)*nPcodes
		sample_x3D_s += (sample_x3D_s[0],)*nPcodes
		self.sample_x2D_ = torch.cat( sample_x2D_s )
		self.sample_x3D_ = torch.cat( sample_x3D_s )
		self.sample_pcode_ = torch.zeros( nSamples+nPcodes, self.Npcode )
		self.sample_pcode_[:nSamples,0]=1
		for iS in range( nPcodes ):
			ii = iS%self.Npcode
			self.sample_pcode_[iS+nSamples,ii] = 1
		self.sample_z_ = torch.rand( nSamples+nPcodes, self.Nz )

		fname = os.path.join( self.result_dir, self.dataset, self.model_name, 'samples.png' )
		nSpS = int(math.ceil( math.sqrt( nSamples+nPcodes ) )) # num samples per side
		utils.save_images(self.sample_x2D_[:nSpS*nSpS,:,:,:].numpy().transpose(0,2,3,1), [nSpS,nSpS],fname)

		fname = os.path.join( self.result_dir, self.dataset, self.model_name, 'sampleGT.npy')
		self.sample_x3D_.numpy().squeeze().dump( fname )

		if self.gpu_mode:
			self.sample_x2D_ = Variable(self.sample_x2D_.cuda(), volatile=True)
			self.sample_x3D_ = Variable(self.sample_x3D_.cuda(), volatile=True)
			self.sample_z_ = Variable(self.sample_z_.cuda(), volatile=True)
			self.sample_pcode_ = Variable(self.sample_pcode_.cuda(), volatile=True)
		else:
			self.sample_x2D_ = Variable(self.sample_x2D_, volatile=True)
			self.sample_x3D_ = Variable(self.sample_x3D_, volatile=True)
			self.sample_z_ = Variable(self.sample_z_, volatile=True)
			self.sample_pcode_ = Variable(self.sample_pcode_, volatile=True)

		# networks init
		self.G_3Dto2D = generator3Dto2D(self.Nid, self.Npcode, self.Nz)
		self.D_2D = discriminator2D(self.Nid, self.Npcode)
		
		self.G_3Dto2D_optimizer = optim.Adam(self.G_3Dto2D.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2))
		self.D_2D_optimizer = optim.Adam(self.D_2D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2))

		if self.gpu_mode:
			self.G_3Dto2D.cuda()
			self.D_2D.cuda()
			self.CE_loss = nn.CrossEntropyLoss().cuda()
			self.BCE_loss = nn.BCELoss().cuda()
			self.MSE_loss = nn.MSELoss().cuda()
			self.L1_loss = nn.L1Loss().cuda()
		else:
			self.CE_loss = nn.CrossEntropyLoss()
			self.BCE_loss = nn.BCELoss()
			self.MSE_loss = nn.MSELoss()
			self.L1_loss = nn.L1Loss()

		print('init done')
def _handler_mix_a(ir_path,
                   vis_path,
                   model_path,
                   model_pre_path,
                   model_path_a,
                   model_pre_path_a,
                   ssim_weight,
                   index,
                   output_path=None):
    ir_img = get_train_images(ir_path, flag=False)
    vis_img = get_train_images(vis_path, flag=False)
    dimension = ir_img.shape
    ir_img = ir_img.reshape([1, dimension[0], dimension[1], dimension[2]])
    vis_img = vis_img.reshape([1, dimension[0], dimension[1], dimension[2]])
    ir_img = np.transpose(ir_img, (0, 2, 1, 3))
    vis_img = np.transpose(vis_img, (0, 2, 1, 3))

    g2 = tf.Graph()  # 加载到Session 2的graph

    sess2 = tf.Session(graph=g2)  # Session2

    with sess2.as_default():  # 1
        with g2.as_default(), tf.Session() as sess:
            infrared_field = tf.placeholder(tf.float32,
                                            shape=ir_img.shape,
                                            name='content')
            visible_field = tf.placeholder(tf.float32,
                                           shape=vis_img.shape,
                                           name='style')

            dfn = DenseFuseNet(model_pre_path)

            # sess.run(tf.global_variables_initializer())

            enc_ir, enc_ir_res_block, enc_ir_block, enc_ir_block2 = dfn.transform_encoder(
                infrared_field)
            enc_vis, enc_vis_res_block, enc_vis_block, enc_vis_block2 = dfn.transform_encoder(
                visible_field)

            result = tf.placeholder(tf.float32,
                                    shape=enc_ir.shape,
                                    name='target')

            saver = tf.train.Saver()
            saver.restore(sess, model_path)
            feature_a, feature_b = _get_attention(ir_path, vis_path,
                                                  model_path_a,
                                                  model_pre_path_a)
            print("______+++________")
            print(feature_a[0].shape)

            enc_ir_temp, enc_ir_res_block_temp, enc_ir_block_temp, enc_ir_block2_temp = sess.run(
                [enc_ir, enc_ir_res_block, enc_ir_block, enc_ir_block2],
                feed_dict={infrared_field: ir_img})
            enc_vis_temp, enc_vis_res_block_temp, enc_vis_block_temp, enc_vis_block2_temp = sess.run(
                [enc_vis, enc_vis_res_block, enc_vis_block, enc_vis_block2],
                feed_dict={visible_field: vis_img})

            # ------------------------------------------------------------------------------------------------------------
            #------------------------------------------------------------------------------------------------------------
            block = L1_norm_attention(enc_ir_block_temp, feature_a,
                                      enc_vis_block_temp, feature_b)
            block2 = L1_norm_attention(enc_ir_block2_temp, feature_a,
                                       enc_vis_block2_temp, feature_b)

            first_first = Strategy(enc_ir_res_block_temp[0],
                                   enc_vis_res_block_temp[0])
            #first_first = L1_norm(enc_ir_res_block_temp[0], enc_vis_res_block_temp[0])
            first_second = Strategy(enc_ir_res_block_temp[1],
                                    enc_vis_res_block_temp[1])
            #first_second = L1_norm(enc_ir_res_block_temp[1], enc_vis_res_block_temp[1])
            #first_third = Strategy(enc_ir_res_block_temp[2], enc_vis_res_block_temp[2])
            first_third = L1_norm_attention(enc_ir_res_block_temp[2],
                                            feature_a,
                                            enc_vis_res_block_temp[2],
                                            feature_b)
            #first_four = Strategy(enc_ir_res_block_temp[3], enc_vis_res_block_temp[3])
            first_four = L1_norm_attention(enc_ir_res_block_temp[3], feature_a,
                                           enc_vis_res_block_temp[3],
                                           feature_b)
            first_first = tf.concat(
                [first_first,
                 tf.to_int32(first_second, name='ToInt')], 3)
            first_first = tf.concat(
                [first_first,
                 tf.to_int32(first_third, name='ToInt')], 3)
            first_first = tf.concat([first_first, first_four], 3)

            first = first_first

            second = L1_norm_attention(enc_ir_res_block_temp[6], feature_a,
                                       enc_vis_res_block_temp[6], feature_b)
            third = Strategy(enc_ir_res_block_temp[9],
                             enc_vis_res_block_temp[9])
            # ------------------------------------------------------------------------------------------------------------
            # ------------------------------------------------------------------------------------------------------------

            feature = 1 * first + 0.1 * second + 0.1 * third

            # ---------------------------------------------------------
            # block=Strategy(enc_ir_block_temp,enc_vis_block_temp)
            # block2=L1_norm(enc_ir_block2_temp,enc_vis_block2_temp)
            # ---------------------------------------------------------

            feature = feature.eval()

            # --------------将特征图压成单通道----------------------------------
            #feature_map_vis_out = sess.run(tf.reduce_sum(feature_a[0], 3, keep_dims=True))
            #feature_map_ir_out = sess.run(tf.reduce_sum(feature_b[0],3, keep_dims=True))
            # ------------------------------------------------------------------

            output_image = dfn.transform_decoder(result, block, block2)

            # output = dfn.transform_decoder(feature)
            # print(type(feature))
            # output = sess.run(output_image, feed_dict={result: feature,enc_res_block:block,enc_res_block2:block2})
            output = sess.run(output_image, feed_dict={result: feature})

            save_images(ir_path,
                        output,
                        output_path,
                        prefix='fused' + str(index),
                        suffix='_mix_' + str(ssim_weight))
Ejemplo n.º 30
0
    def train(self):

        global_step = tf.Variable(0, trainable=False)
        add_global = global_step.assign_add(1)
        new_learning_rate = tf.train.exponential_decay(self.learn_rate_init,
                                                       global_step=global_step,
                                                       decay_steps=10000,
                                                       decay_rate=0.98)
        #for D
        trainer_D = tf.train.RMSPropOptimizer(learning_rate=new_learning_rate)
        gradients_D = trainer_D.compute_gradients(self.D_loss,
                                                  var_list=self.d_vars)
        opti_D = trainer_D.apply_gradients(gradients_D)

        #for G
        trainer_G = tf.train.RMSPropOptimizer(learning_rate=new_learning_rate)
        gradients_G = trainer_G.compute_gradients(self.G_loss,
                                                  var_list=self.g_vars)
        opti_G = trainer_G.apply_gradients(gradients_G)

        #for E
        trainer_E = tf.train.RMSPropOptimizer(learning_rate=new_learning_rate)
        gradients_E = trainer_E.compute_gradients(self.encode_loss,
                                                  var_list=self.e_vars)
        opti_E = trainer_E.apply_gradients(gradients_E)

        init = tf.global_variables_initializer()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:

            sess.run(init)

            # Initialzie the iterator
            sess.run(self.training_init_op)
            summary_op = tf.summary.merge_all()
            summary_writer = tf.summary.FileWriter(self.log_dir, sess.graph)

            #self.saver.restore(sess, self.saved_model_path)
            step = 0

            while step <= self.max_iters:

                next_x_images = sess.run(self.next_x)

                fd = {self.images: next_x_images}
                sess.run(opti_E, feed_dict=fd)
                # optimizaiton G
                sess.run(opti_G, feed_dict=fd)
                # optimization D
                sess.run(opti_D, feed_dict=fd)

                summary_str = sess.run(summary_op, feed_dict=fd)

                summary_writer.add_summary(summary_str, step)
                new_learn_rate = sess.run(new_learning_rate)

                if new_learn_rate > 0.00005:
                    sess.run(add_global)

                if step % 200 == 0:

                    D_loss, fake_loss, encode_loss, LL_loss, kl_loss, new_learn_rate \
                        = sess.run([self.D_loss, self.G_loss, self.encode_loss, self.LL_loss, self.kl_loss/(self.latent_dim*self.batch_size), new_learning_rate], feed_dict=fd)
                    print(
                        "Step %d: D: loss = %.7f G: loss=%.7f E: loss=%.7f LL loss=%.7f KL=%.7f, LR=%.7f"
                        % (step, D_loss, fake_loss, encode_loss, LL_loss,
                           kl_loss, new_learn_rate))

                if np.mod(step, 200) == 1:

                    save_images(
                        next_x_images[0:self.batch_size],
                        [self.batch_size / 8, 8],
                        '{}/train_{:02d}_real.png'.format(
                            self.sample_path, step))
                    sample_images = sess.run(self.x_tilde, feed_dict=fd)
                    save_images(
                        sample_images[0:self.batch_size],
                        [self.batch_size / 8, 8],
                        '{}/train_{:02d}_recon.png'.format(
                            self.sample_path, step))

                if np.mod(step, 2000) == 1 and step != 0:

                    self.saver.save(sess, self.saved_model_path)

                step += 1

            save_path = self.saver.save(sess, self.saved_model_path)
            print "Model saved in file: %s" % save_path
Ejemplo n.º 31
0
def KanadeAssociativeRBM(cache=False, train_further=False):
    print "Testing Associative RBM which tries to learn the ID map "
    # print "Testing Associative RBM which tries to learn the following mapping: {anger, saddness, disgust} -> {sadness}, {contempt, happy, surprise} -> {happy}"
    # project set-up
    data_manager = store.StorageManager('Kanade/OptMFSparse0.01RBMTest', log=True)
    # data_manager = store.StorageManager('Kanade/OptAssociativeRBMTest', log=True)
    shape = 25
    dataset_name = 'sharp_equi{}_{}'.format(shape, shape)

    # Load kanade database
    mapping = None  # id map
    # mapping = {'anger': 'sadness', 'contempt': 'happy', 'disgust': 'sadness', 'fear': 'sadness', 'happy': 'happy',
    #            'sadness': 'sadness', 'surprise': 'happy'}
    train, valid, test = loader.load_kanade(pre={'scale': True}, set_name=dataset_name)
    train_x, train_y = train
    test_x, test_y = test

    # Sample associated image
    train_x_mapped, train_y_mapped = loader.sample_image(train_y, mapping=mapping, pre={'scale': True},
                                                         set_name=dataset_name)
    test_x_mapped, test_y_mapped = loader.sample_image(test_y, mapping=mapping, pre={'scale': True},
                                                       set_name=dataset_name)

    # Concatenate images
    concat1 = T.concatenate([train_x, train_x_mapped], axis=1)
    # concat2 = T.concatenate([train_x_mapped, train_x], axis=1)
    # concat = T.concatenate([concat1, concat2], axis=0)
    # train_tX = theano.function([], concat)()
    train_tX = theano.function([], concat1)()
    train_X = theano.shared(train_tX)

    # Train classifier to be used for classifying reconstruction associated image layer
    # mapped_data = loader.load_kanade(#emotions=['sadness', 'happy'],
    #                                  pre={'scale': True},
    #                                  set_name=dataset_name)  # Target Image
    # clf_orig = SimpleClassifier('logistic', mapped_data[0][0], mapped_data[0][1])
    clf_orig = SimpleClassifier('logistic', train_x, train_y)

    # Initialise RBM
    tr = rbm_config.TrainParam(learning_rate=0.0001,
                               momentum_type=rbm_config.NESTEROV,
                               momentum=0.9,
                               weight_decay=0.0001,
                               sparsity_constraint=True,
                               sparsity_target=0.01,
                               sparsity_cost=100,
                               sparsity_decay=0.9,
                               batch_size=10,
                               epochs=10)

    n_visible = shape * shape * 2
    n_hidden = 500

    config = rbm_config.RBMConfig()
    config.v_n = n_visible
    config.h_n = n_hidden
    config.v_unit = rbm_units.GaussianVisibleUnit
    # config.h_unit = rbm_units.ReLUnit
    config.progress_logger = rbm_logger.ProgressLogger(img_shape=(shape * 2, shape))
    config.train_params = tr
    rbm = RBM(config)
    print "... initialised RBM"

    # Load RBM (test)
    loaded = data_manager.retrieve(str(rbm))
    if loaded:
        rbm = loaded
    else:
        rbm.set_initial_hidden_bias()
        rbm.set_hidden_mean_activity(train_X)

    # Train RBM - learn joint distribution
    # rbm.pretrain_lr(train_x, train_x01)
    for i in xrange(0, 10):
        if not cache or train_further:
            rbm.train(train_X)

        data_manager.persist(rbm)

        print "... reconstruction of associated images"
        # Get reconstruction with train data to get 'mapped' images to train classifiers on
        reconstruction = rbm.reconstruct(train_X, 1,
                                         plot_n=100,
                                         plot_every=1,
                                         img_name='recon_train')
        reconstruct_assoc_part = reconstruction[:, (shape ** 2):]

        # Get associated images of test data
        nsamples = np.random.normal(0, 1, test_x.get_value(True).shape).astype(np.float32)
        initial_y = theano.shared(nsamples, name='initial_y')
        utils.save_images(nsamples[0:100], 'initialisation.png', (10, 10), (25, 25))

        test_x_associated = rbm.reconstruct_association_opt(test_x, initial_y,
                                                            5,
                                                            0.,
                                                            plot_n=100,
                                                            plot_every=1,
                                                            img_name='recon_test_gibbs')

        mf_recon = rbm.mean_field_inference_opt(test_x, y=initial_y,
                                                sample=False,
                                                k=10,
                                                img_name='recon_test_mf_raw')

        # Concatenate images
        test_MFX = theano.function([], T.concatenate([test_x, mf_recon], axis=1))()
        test_MF = theano.shared(test_MFX)
        reconstruction = rbm.reconstruct(test_MF, 1,
                                         plot_n=100,
                                         plot_every=1,
                                         img_name='recon_test_mf_recon')
        mf_recon = reconstruction[:, (shape ** 2):]

        print "... reconstructed"

        # Classify the reconstructions

        # 1. Train classifier on original images
        score_orig = clf_orig.get_score(test_x_associated, test_y_mapped.eval())
        score_orig_mf = clf_orig.get_score(test_x_associated, test_y_mapped.eval())

        # 2. Train classifier on reconstructed images
        clf_recon = SimpleClassifier('logistic', reconstruct_assoc_part, train_y_mapped.eval())
        score_retrain = clf_recon.get_score(test_x_associated, test_y_mapped.eval())
        score_retrain_mf = clf_recon.get_score(mf_recon, test_y_mapped.eval())

        out_msg = '{} (orig, retrain):{},{}'.format(rbm, score_orig, score_retrain)
        out_msg2 = '{} (orig, retrain):{},{}'.format(rbm, score_orig_mf, score_retrain_mf)
        print out_msg
        print out_msg2
Ejemplo n.º 32
0
    def train(self, config):
        d_optim = tf.train.AdamOptimizer( config.learning_rate, beta1=config.beta1 ) \
                    .minimize( self.d_loss, var_list=self.d_vars )
        g_optim = tf.train.AdamOptimizer( config.learning_rate, beta1=config.beta1 ) \
                    .minimize( self.g_loss, var_list=self.g_vars )

        try:
            tf.global_variables_initializer().run()
        except:
            tf.initialize_all_tables().run()

        self.g_sum = ops.merge_summary([
            self.z_sum, self.d__sum, self.G_sum, self.d_loss_fake_sum,
            self.g_loss_sum
        ])
        self.d_sum = ops.merge_summary(
            [self.z_sum, self.d_sum, self.d_loss_real_sum, self.d_loss_sum])
        self.writer = ops.SummaryWriter('./logs', self.sess.graph)

        sample_z = np.random.uniform(-1, 1, size=(self.sample_num, self.z_dim))

        if config.dataset == 'mnist':
            sample_inputs = self.data_X[0:self.sample_num]
            sample_labels = self.data_y[0:self.sample_num]
        else:
            sample_files = self.data[0:self.sample_num]
            sample = [
                utils.get_image(sample_file,
                                input_height=self.input_height,
                                input_width=self.input_width,
                                resize_height=self.output_height,
                                resize_width=self.output_width,
                                crop=self.crop,
                                grayscale=self.grayscale)
                for sample_file in sample_files
            ]
            if (self.grayscale):
                sample_inputs = np.array(sample).astype(np.float32)[:, :, :,
                                                                    None]
            else:
                sample_inputs = np.array(sample).astype(np.float32)

        counter = 1
        start_time = time.time()
        could_load, checkpoint_counter = self.load(self.checkpoint_dir)
        if could_load:
            counter = checkpoint_counter
            print('[*] Load SUCESS')
        else:
            print('[*] Load failed...')

        for epoch in range(config.epoch):
            if config.dataset == 'mnist':
                batch_idxs = min(len(self.data_X),
                                 config.train_size) // config.batch_size
            else:
                self.data = glob(
                    os.path.join('./data', config.dataset,
                                 self.input_fname_pattern))
                batch_idxs = min(len(self.data),
                                 config.train_size) // config.batch_size

            for idx in range(0, batch_idxs):
                if config.dataset == 'mnist':
                    batch_images = self.data_X[idx *
                                               config.batch_size:(idx + 1) *
                                               config.batch_size]
                    batch_labels = self.data_y[idx *
                                               config.batch_size:(idx + 1) *
                                               config.batch_size]
                else:
                    batch_files = self.data[idx * config.batch_size:(idx + 1) *
                                            config.batch_size]
                    batch = [
                        utils.get_image(batch_file,
                                        input_height=self.input_height,
                                        input_width=self.input_width,
                                        resize_height=self.output_height,
                                        resize_width=self.output_width,
                                        crop=self.crop,
                                        grayscale=self.grayscale)
                        for batch_file in batch_files
                    ]

                    if self.grayscale:
                        batch_images = np.arrray(batch).astype(
                            np.float32)[:, :, :, None]
                    else:
                        batch_images = np.array(batch).astype(np.float32)

                batch_z = np.random.uniform(
                    -1, 1, [config.batch_size, self.z_dim]).astype(np.float32)

                if config.dataset == 'mnist':
                    # Update D network
                    _, summary_str = self.sess.run(
                        [d_optim, self.d_sum],
                        feed_dict={
                            self.inputs: batch_images,
                            self.z: batch_z,
                            self.y: batch_labels
                        })
                    self.writer.add_summary(summary_str, counter)

                    # Update G network
                    _, summary_str = self.sess.run([g_optim, self.g_sum],
                                                   feed_dict={
                                                       self.z: batch_z,
                                                       self.y: batch_labels
                                                   })
                    self.writer.add_summary(summary_str, counter)

                    # Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
                    _, summary_str = self.sess.run([g_optim, self.g_sum],
                                                   feed_dict={
                                                       self.z: batch_z,
                                                       self.y: batch_labels
                                                   })
                    self.writer.add_summary(summary_str, counter)

                    errD_fake = self.d_loss_fake.eval({
                        self.z: batch_z,
                        self.y: batch_labels
                    })

                    errD_real = self.d_loss_real.eval({
                        self.inputs: batch_images,
                        self.y: batch_labels
                    })
                    errG = self.g_loss.eval({
                        self.z: batch_z,
                        self.y: batch_labels
                    })
                else:
                    # Update D network
                    _, summary_str = self.sess.run([d_optim, self.d_sum],
                                                   feed_dict={
                                                       self.inputs:
                                                       batch_images,
                                                       self.z: batch_z
                                                   })
                    self.writer.add_summary(summary_str, counter)

                    # Update G network
                    _, summary_str = self.sess.run([g_optim, self.g_sum],
                                                   feed_dict={self.z: batch_z})
                    self.writer.add_summary(summary_str, counter)

                    # Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
                    _, summary_str = self.sess.run([g_optim, self.g_sum],
                                                   feed_dict={self.z: batch_z})
                    self.writer.add_summary(summary_str, counter)

                    errD_fake = self.d_loss_fake.eval({self.z: batch_z})
                    errD_real = self.d_loss_real.eval(
                        {self.inputs: batch_images})
                    errG = self.g_loss.eval({self.z: batch_z})

                counter += 1
                print('Epoch: [%2d]  [%4d/%4d] time: %4.4f, d_loss:%.8f, g_loss:%.8f' \
                      % (epoch, idx, batch_idxs,
                         time.time() - start_time, errD_fake + errD_real, errG
                         )
                      )
                if np.mod(counter, 100) == 1:
                    if config.dataset == 'mnist':
                        samples, d_loss, g_loss = self.sess.run(
                            [self.sampler, self.d_loss, self.g_loss],
                            feed_dict={
                                self.z: sample_z,
                                self.inputs: sample_inputs,
                                self.y: sample_labels,
                            })
                        utils.save_images(
                            samples,
                            utils.image_manifold_size(samples.shape[0]),
                            './{}/train_{:02d}_{:04d}.png'.format(
                                config.sample_dir, epoch, idx))
                        print('[Sample] d_loss: %.8f g_loss: %.8f' %
                              (d_loss, g_loss))
                    else:
                        try:
                            samples, d_loss, g_loss = self.sess.run(
                                [self.sampler, self.d_loss, self.g_loss],
                                feed_dict={
                                    self.z: sample_z,
                                    self.inputs: sample_inputs,
                                })
                            utils.save_images(
                                samples,
                                utils.image_manifold_size(samples.shape[0]),
                                './{}/train_{:02d}_{:04d}.png'.format(
                                    config.sample_dir, epoch, idx))
                            print('[Sample] d_loss: %.8f, g_loss: %.8f' %
                                  (d_loss, g_loss))
                        except:
                            print('One pic error!...')

                if np.mod(counter, 500) == 2:
                    self.save(config.checkpoint_dir, counter)
Ejemplo n.º 33
0
 def test_generate(self, sess, n_samples = 128, filename='images/samples.png'):
     noises = self.noise_gen((n_samples,self.get_latent_dim()))
     samples = sess.run(self.Generator, feed_dict={self.z_in: noises})
     
     utils.save_images(samples.reshape(n_samples, 64, 64, 3), filename)
Ejemplo n.º 34
0
    def test_during_train(self, epoch, args):
        """Test SG-GAN"""
        # print(" [*] Running Test ...")

        sample_files = glob(
            './datasets/{}/*.*'.format(args.dataset_dir + '/testA')
        )  # glob('./datasets/{}/*.*'.format(self.dataset_dir + '/testA'))

        preds1 = []
        preds2 = []
        preds3 = []
        preds4 = []
        preds5 = []
        gts1 = []
        gts2 = []
        gts3 = []
        gts4 = []
        gts5 = []

        fake_img = []
        actual_image = []
        output_images = []

        plot_labels = True

        for sample_file in sample_files:
            # print('Processing image: ' + sample_file)

            #### [MODIFIED] to test metric functions ####
            #### sample_image = [load_test_data(sample_file, args.image_width, args.image_height)]

            #### [CHANGES]
            sample_image, seg_image, seg_mask_64, seg_mask_8 = load_test_data(
                sample_file, args.image_width, args.image_height)
            sample_image = [sample_image]
            seg_image = [seg_image]
            # seg_maks_64 = [seg_mask_64]
            seg_mask_8 = [seg_mask_8]

            seg_image = np.array(seg_image).astype(np.float32)
            seg_mask_8 = np.array(seg_mask_8).astype(np.float32)
            seg_mask_64 = np.expand_dims(seg_mask_64, axis=0)
            ####

            rescaled_sample = [
                tf.image.convert_image_dtype(sample, np.uint8)
                for sample in sample_image
            ]
            rescaled_sample = np.array(rescaled_sample).astype(np.float32)
            sample_image = np.array(sample_image).astype(np.float32)

            # Get fake image
            fake_A = self.generator(rescaled_sample)
            fake_img = fake_A

            sample_image = (sample_image * 2) - 1

            image_path = os.path.join(args.test_dir,
                                      os.path.basename(sample_file))
            real_image_copy = os.path.join(
                args.test_dir, "real_" + os.path.basename(sample_file))
            # save_images(sample_image, [1, 1], real_image_copy)
            save_images(fake_img, [1, 1], image_path)

            # Get fake image
            actual_image = get_img(sample_image, [1, 1])
            fake_img = get_img(fake_A, [1, 1])

            output_images.append(fake_img)

            lt1, lp1 = scores_seg_fake(seg_image, fake_img)
            preds1 += list(lp1)
            gts1 += list(lt1)

        print("score")
        score = scores(gts1, preds1, n_class=args.segment_class)
        score_df = pd.DataFrame(score)

        print("\n[*] ------------")
        print("[*] Test scores:\n")

        with train_summary_writer.as_default():
            tf.summary.scalar('Overall Accuracy',
                              score["Overall Acc"],
                              step=epoch)
            tf.summary.scalar('Mean Accuracy', score["Mean Acc"], step=epoch)
            tf.summary.scalar('Frequency Weighted Accuracy',
                              score["FreqW Acc"],
                              step=epoch)
            tf.summary.scalar('Mean IoU', score["Mean IoU"], step=epoch)

        ########
        # if plot_labels:
        #     title="[*] Labels: seg_image | fake_img"
        #     name1="seg_image"
        #     name2="fake_image"
        #     for lt, lp in zip(gts1, preds1):
        #         plot_tensors(lt, lp, title, name1, name2)

        # print("---------------------------")
        # print("lt: seg_img || lp: fake_img")
        # print(score_df)

        # ########
        # if plot_labels:
        #     title="[*] Labels: seg_class_mask | crf(sample_image)"
        #     name1="seg_class_mask"
        #     name2="crf(sample_image, seg_class_mask)"
        #     for lt, lp in zip(gts2, preds2):
        #         plot_tensors(lt, lp, title, name1, name2)

        # print("---------------------------")
        # print("lt: seg_mask || lp: crf(test sample)")
        # print(score_crf_df)

        # ########
        # if plot_labels:
        #     title="[*] Labels: fake_img | crf(sample_image, seg_mask)"
        #     name1="fake_img"
        #     name2="crf(sample_image, seg_mask)"
        #     for lt, lp in zip(gts3, preds3):
        #         plot_tensors(lt, lp, title, name1, name2)

        # print("-------------------------------------")
        # print("lt: fake_img || lp: crf(sample_image, seg_mask)")
        # print(score_crf_2_df)

        # #########
        # if plot_labels:
        #     title="[*] Labels: seg_image | fake_img"
        #     name1="seg_image"
        #     name2="da_fake"
        #     for lt, lp in zip(gts4, preds4):
        #         plot_tensors(lt, lp, title, name1, name2)

        # print("----------------------------")
        # print("lt: seg_image || lp: da_fake")
        # print(score_d_df)

        # #########
        # if plot_labels:
        #     title="[*] Labels: seg_mask | crf(sample_image, fake_img)"
        #     name1="seg_mask"
        #     name2="crf(sample_image, fake_img)"
        #     for lt, lp in zip(gts5, preds5):
        #         plot_tensors(lt, lp, title, name1, name2)

        # print("----------------------------")
        # print("lt: seg_mask | lp: crf(sample_image, fake_img)")
        # print(score_crf_3_df)
        # print("Making multiple image tensor:", len(output_images))

        if (len(output_images) <= 1):
            return output_images[0]
        else:
            output_tensor = tf.concat([output_images[0], output_images[1]],
                                      axis=0)
            for i in range(2, len(output_images)):
                output_tensor = tf.concat([output_tensor, output_images[i]],
                                          axis=0)

            return output_tensor
Ejemplo n.º 35
0
def main():

    m = Mask()
    c = Classifier()

    device = torch.device('cuda')

    # IR_50
    model_ir50 = IR_50([112, 112])
    model_ir50.load_state_dict(
        torch.load('./ckpt/backbone_ir50_ms1m_epoch120.pth',
                   map_location='cuda'))
    model_ir50.eval().to(device).zero_grad()

    # IR_152
    model_ir152 = IR_152([112, 112])
    model_ir152.load_state_dict(
        torch.load('./ckpt/Backbone_IR_152_Epoch_112_Batch.pth',
                   map_location='cuda'))
    model_ir152.eval().to(device).zero_grad()

    # IR_SE_50
    model_irse50 = Backbone(50, mode='ir_se')
    model_irse50.load_state_dict(
        torch.load('./ckpt/model_ir_se50.pth', map_location='cuda'))
    model_irse50.eval().to(device).zero_grad()

    eps = (args.max_epsilon / 255.0)
    alpha = eps / args.iterations

    momentum = args.momentum

    kernel = gkern(args.kernlen, args.sig).astype(np.float32)
    stack_kernel = np.stack([kernel, kernel, kernel])
    stack_kernel = np.expand_dims(stack_kernel, 1)
    stack_kernel = torch.Tensor(stack_kernel).to(device)

    counter = 0
    total_distance = 0
    num = 1
    for raw_images, filenames, _ in load_images_with_names(
            args.input_dir, args.batch_size):

        if num * args.batch_size > 712:
            batch_size = 712 - (num - 1) * args.batch_size
        else:
            batch_size = args.batch_size
        num += 1

        in_tensor = process(raw_images)
        raw_variable = in_tensor.detach().to(device)

        # raw embedding
        raw_ir50 = model_ir50(raw_variable)
        raw_ir152 = model_ir152(raw_variable)
        raw_irse50 = model_irse50(raw_variable)

        true_labels = c.classifier(raw_ir50.data.cpu().detach().numpy())

        bias_ir50, bias_ir152, bias_irse50 = found_bias_v2(
            raw_ir50.data.cpu().detach().numpy(),
            raw_ir152.data.cpu().detach().numpy(),
            raw_irse50.data.cpu().detach().numpy(), batch_size)

        perturbation = torch.Tensor(batch_size, 3, 112,
                                    112).uniform_(-0.01, 0.01).to(device)
        in_variable = raw_variable + perturbation
        in_variable.data.clamp_(-1.0, 1.0)
        in_variable.requires_grad = True

        last_grad = 0.0
        momentum_sum = 0.0

        for step in range(args.iterations):

            new_ir50 = model_ir50(in_variable)
            new_ir152 = model_ir152(in_variable)
            new_irse50 = model_irse50(in_variable)

            loss1 = -torch.mean(
                torch.cosine_similarity(x1=raw_ir50, x2=new_ir50, dim=1) * 1.7
                + torch.cosine_similarity(x1=raw_ir152, x2=new_ir152, dim=1) *
                0.35 +
                torch.cosine_similarity(x1=raw_irse50, x2=new_irse50, dim=1) *
                0.65) / 2.7

            loss2 = torch.mean(
                torch.cosine_similarity(
                    x1=torch.from_numpy(bias_ir50).detach().to(device),
                    x2=new_ir50,
                    dim=1) * 1.7 + torch.cosine_similarity(x1=torch.from_numpy(
                        bias_ir152).detach().to(device),
                                                           x2=new_ir152,
                                                           dim=1) * 0.35 +
                torch.cosine_similarity(x1=torch.from_numpy(
                    bias_irse50).detach().to(device),
                                        x2=new_irse50,
                                        dim=1) * 0.65) / 2.7
            loss = loss1 + loss2

            print('loss :', loss)

            loss.backward(retain_graph=True)

            data_grad = in_variable.grad.data

            data_grad = F.conv2d(data_grad,
                                 stack_kernel,
                                 padding=(args.kernlen - 1) // 2,
                                 groups=3)

            for i in range(data_grad.shape[0]):
                data_grad[i] = data_grad[i] / torch.mean(
                    data_grad[i].norm(2, 0) / 1.713)

            if iter == 0:
                noise = data_grad
            else:
                noise = last_grad * momentum + data_grad * 0.9

            last_grad = noise.detach()
            norm = noise.norm(dim=1).unsqueeze(1)
            index = norm.mean()
            momentum_sum = momentum_sum * momentum + 1.0
            d_img = noise * norm * alpha / (momentum_sum * index)
            d_img = d_img / d_img.norm(dim=1).mean() * alpha

            perturb_mask = m.get_perturb_mask(
                new_ir50.data.detach().cpu().numpy(),
                new_ir152.data.detach().cpu().numpy(),
                new_irse50.data.detach().cpu().numpy(), true_labels,
                args.cos_margin)

            in_variable.data = in_variable.data + \
                               d_img * torch.from_numpy(perturb_mask.reshape([batch_size, 1, 1, 1])).to(device).float()

            raw_variable.data = torch.clamp(in_variable.data, -1.0, 1.0)
            in_variable.grad.data.zero_()

        advs = raw_variable.data.cpu().detach().numpy()
        advs = advs.swapaxes(1, 2).swapaxes(2, 3)

        total_distance_ = save_images(raw_images, advs, filenames,
                                      args.output_dir)
        total_distance += total_distance_
        counter += batch_size
        print('attack images num : [%d / 712]' % counter)
    print('mean_dist:', total_distance / 712.0)
Ejemplo n.º 36
0
    def reconstruct(self):
        self.G.eval()
        self.load()

        data_dir = 'data/' + self.dataset + '/' + self.model_name
        if not os.path.exists(data_dir):
            os.makedirs(data_dir)

        ## saving generated imgs
        samples = self.G(self.sample_z_, self.sample_y_)

        if self.gpu_mode:
            samples = samples.cpu().data.numpy().transpose(0, 2, 3, 1)
        else:
            samples = samples.data.numpy().transpose(0, 2, 3, 1)

        samples = (samples + 1) / 2
        utils.save_images(
            samples[:100, :, :, :], [10, 10], self.save_dir + '/' +
            self.dataset + '/' + self.model_name + '/gen_img.png')

        # for training
        torch.manual_seed(self.manual_seed)
        for k in range(self.train_parts):  # avoid memory overflow
            sample_z = torch.randn((self.train_size, self.z_dim))
            labels = torch.randint(0, self.class_num,
                                   (self.train_size, 1)).type(torch.LongTensor)
            sample_y = torch.zeros(self.train_size,
                                   self.class_num).scatter_(1, labels, 1)

            if self.gpu_mode:
                sample_z, sample_y = sample_z.cuda(), sample_y.cuda()

            samples = (self.G(sample_z, sample_y) + 1) / 2

            if self.gpu_mode:
                samples = samples.cpu().data.numpy()
            else:
                samples = samples.data.numpy()

            if k == 0:
                labels_train = labels
                samples_train = samples
            else:
                labels_train = np.concatenate((labels_train, labels), axis=0)
                samples_train = np.concatenate((samples_train, samples),
                                               axis=0)

        np.savez(data_dir + '/train',
                 sample=samples_train,
                 label=labels_train.squeeze(1))

        # for testing
        torch.manual_seed(self.manual_seed + 999)
        sample_z = torch.randn((self.test_size, self.z_dim))
        labels_test = torch.randint(0, self.class_num,
                                    (self.test_size, 1)).type(torch.LongTensor)
        sample_y = torch.zeros(self.test_size,
                               self.class_num).scatter_(1, labels_test, 1)

        if self.gpu_mode:
            sample_z, sample_y = sample_z.cuda(), sample_y.cuda()

        samples_test = (self.G(sample_z, sample_y) + 1) / 2

        if self.gpu_mode:
            samples_test = samples_test.cpu().data.numpy()
        else:
            samples_test = samples_test.data.numpy()

        np.savez(data_dir + '/test',
                 sample=samples_test,
                 label=labels_test.squeeze(1))
Ejemplo n.º 37
0
 def save_imgs(self,out_path):
 	prefix="action"
 	utils.make_dir(out_path)
     imgs=[("act"+str(i),img) for i,img in enumerate(self.frames)]
     utils.save_images(out_path,imgs)
Ejemplo n.º 38
0
    def visualize_results(self, epoch, fix=True):
        self.G.eval()

        if not os.path.exists(self.result_dir + '/' + self.dataset + '/' +
                              self.model_name):
            os.makedirs(self.result_dir + '/' + self.dataset + '/' +
                        self.model_name)

        tot_num_samples = min(self.sample_num, self.batch_size)
        image_frame_dim = int(np.floor(np.sqrt(tot_num_samples)))

        if fix:
            """ fixed noise """
            samples = self.G(self.sample_z_)
        else:
            """ random noise """
            if self.gpu_mode:
                sample_z_ = Variable(torch.rand(
                    (self.batch_size, self.z_dim)).cuda(),
                                     volatile=True)
            else:
                sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)),
                                     volatile=True)

            samples = self.G(sample_z_)

        copy_samples = samples

        if self.gpu_mode:
            samples = samples.cpu().data.numpy().transpose(0, 2, 3, 1)
            presamples = copy_samples.cpu().data.numpy()
            presamples = 2 * (presamples -
                              np.max(presamples)) / -np.ptp(presamples) - 1
        else:
            samples = samples.data.numpy().transpose(0, 2, 3, 1)
            presamples = copy_samples.data.numpy()
            presamples = 2 * (presamples -
                              np.max(presamples)) / -np.ptp(presamples) - 1

        images = samples[:image_frame_dim * image_frame_dim, :, :, :]

        # Calculate inception score
        if self.calculate_inception:

            score = inception_score(presamples,
                                    cuda=True,
                                    resize=True,
                                    batch_size=self.batch_size)

            # display inception mean and std

            print("Inception score mean and std are", score)

        # save images and display if set
        utils.save_images(self.vis,
                          images, [image_frame_dim, image_frame_dim],
                          self.result_dir + '/' + self.dataset + '/' +
                          self.model_name + '/' + self.model_name +
                          '_epoch%03d' % epoch + '.png',
                          env=self.env_display,
                          visualize=self.visualize)
Ejemplo n.º 39
0
def action_imgs(action_frame,out_path):
    utils.make_dir(out_path)
    actions=action_frame['Action']
    act_imgs=[ (action.name,action.to_action_img()) for action in actions]
    utils.save_images(out_path,act_imgs)
    print(action_frame.head())
Ejemplo n.º 40
0
def main():
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    parser = argparse.ArgumentParser(description='Test trained models')
    parser.add_argument(
        '--options-file',
        '-o',
        default='options-and-config.pickle',
        type=str,
        help='The file where the simulation options are stored.')
    parser.add_argument('--checkpoint-file',
                        '-c',
                        required=True,
                        type=str,
                        help='Model checkpoint file')
    parser.add_argument('--batch-size',
                        '-b',
                        default=12,
                        type=int,
                        help='The batch size.')
    parser.add_argument('--source-image',
                        '-s',
                        required=True,
                        type=str,
                        help='The image to watermark')
    # parser.add_argument('--times', '-t', default=10, type=int,
    #                     help='Number iterations (insert watermark->extract).')

    args = parser.parse_args()

    train_options, hidden_config, noise_config = utils.load_options(
        args.options_file)
    noiser = Noiser(noise_config)

    checkpoint = torch.load(args.checkpoint_file)
    hidden_net = Hidden(hidden_config, device, noiser, None)
    utils.model_from_checkpoint(hidden_net, checkpoint)

    image_pil = Image.open(args.source_image)
    image = randomCrop(np.array(image_pil), hidden_config.H, hidden_config.W)
    image_tensor = TF.to_tensor(image).to(device)
    image_tensor = image_tensor * 2 - 1  # transform from [0, 1] to [-1, 1]
    image_tensor.unsqueeze_(0)

    # for t in range(args.times):
    message = torch.Tensor(
        np.random.choice(
            [0, 1],
            (image_tensor.shape[0], hidden_config.message_length))).to(device)
    losses, (encoded_images, noised_images,
             decoded_messages) = hidden_net.validate_on_batch(
                 [image_tensor, message])
    decoded_rounded = decoded_messages.detach().cpu().numpy().round().clip(
        0, 1)
    message_detached = message.detach().cpu().numpy()
    print('original: {}'.format(message_detached))
    print('decoded : {}'.format(decoded_rounded))
    print('error : {:.3f}'.format(
        np.mean(np.abs(decoded_rounded - message_detached))))
    utils.save_images(image_tensor.cpu(),
                      encoded_images.cpu(),
                      'test',
                      '.',
                      resize_to=(256, 256))
from utils import save_images
from capsNet import CapsNet


if __name__ == '__main__':
    capsNet = CapsNet(is_training=cfg.is_training)
    tf.logging.info('Graph loaded')

    teX, teY = load_mnist(cfg.dataset, cfg.is_training)
    with capsNet.graph.as_default():
        sv = tf.train.Supervisor(logdir=cfg.logdir)
        # with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        with sv.managed_session() as sess:
            sv.saver.restore(sess, tf.train.latest_checkpoint(cfg.logdir))
            tf.logging.info('Restored')

            reconstruction_err = []
            for i in range(10000 // cfg.batch_size):
                start = i * cfg.batch_size
                end = start + cfg.batch_size
                recon_imgs = sess.run(capsNet.decoded, {capsNet.X: teX[start:end]})
                orgin_imgs = np.reshape(teX[start:end], (cfg.batch_size, -1))
                squared = np.square(recon_imgs - orgin_imgs)
                reconstruction_err.append(np.mean(squared))

                if i % 5 == 0:
                    imgs = np.reshape(recon_imgs, (cfg.batch_size, 28, 28, 1))
                    size = 6
                    save_images(imgs[0:size * size, :], [size, size], 'results/test_%03d.png' % i)
            print('test acc:')
            print((1. - np.mean(reconstruction_err)) * 100)
gpu_options = tf.GPUOptions(allow_growth=True)
sess_config = tf.ConfigProto(allow_soft_placement=True,
                            gpu_options=gpu_options)

# set up the TF session and init all ops and variables
with sv.prepare_or_wait_for_session(config=sess_config) as sess:
    cycles = data_test_prep.shape[0]/test_batch_size
    results = []
    for i in range(cycles):
        feed_dict = {nat_enc.input_test_ph: data_test_prep[i*test_batch_size:(i+1)*test_batch_size],
                     nat_enc.dropout_keep_prob:1.0}
        fetch_dict = {
                "reps": nat_enc.representation_test,
            }
        res_test = sess.run(fetch_dict,feed_dict=feed_dict)
        results.append(res_test['reps'])
    r = np.concatenate(results,axis=0)
    images = []
    for counter,sample in enumerate(samples):
        images.append(np.expand_dims(data_test[sample],axis=0))
        img_without = np.concatenate([data_test[:sample,:],data_test[sample+1:,:]],axis=0)
        r_without = np.concatenate([r[:sample,:],r[sample+1:,:]],axis=0)
        for i in range(number_of_neighbors):
            nearest_index = np.sum(np.square(r_without-r[sample]),axis=1).argmin()
            images.append(np.expand_dims(img_without[nearest_index],axis=0))
            img_without = np.concatenate([img_without[:nearest_index,:],img_without[nearest_index+1:,:]],axis=0)
            r_without = np.concatenate([r_without[:nearest_index,:],r_without[nearest_index+1:,:]],axis=0)

    printimages = np.concatenate(images,axis=0)
    utils.save_images(printimages,[len(samples),number_of_neighbors+1],params['out_path'])
Ejemplo n.º 43
0
 def save_imgs(self,out_path):
 	utils.make_dir(out_path)
 	imgs=[cloud.to_img(self.dim) for cloud in self.point_clouds]
     imgs=[("act"+str(i),img) for i,img in enumerate(imgs)]
     utils.save_images(out_path,imgs)
Ejemplo n.º 44
0
nn_model.register_checkpoint(checkpoint)

if not checkpoint.load(FLAGS.checkpoint_it_to_load):
    raise RuntimeError('Cannot load checkpoint')

now = datetime.datetime.now()
for i in range(FLAGS.n_samples):
    z = np.random.randn(FLAGS.sample_size, FLAGS.z_dim).astype(np.float32)
    z = torch.tensor(z, device=device)

    with torch.no_grad():
        if hasattr(nn_model, 'av_g_model'):
            nn_model.av_g_model.eval()
            gen_samples = nn_model.av_g_model(z)
        else:
            nn_model.g_model.eval()
            gen_samples = nn_model.g_model(z)
            nn_model.g_model.train()

        gen_samples = torch.clamp(gen_samples, -1., 1.)

    gen_samples = gen_samples.data.cpu().numpy()

    n = int(np.sqrt(FLAGS.sample_size))
    utils.save_images(
        gen_samples, [n, n],
        './{}/sample_{:02d}_{:02d}_{:02d}:{:02d}:{:02d}__{:d}.png'.format(
            FLAGS.sample_dir, now.month, now.day, now.hour, now.minute,
            now.second, i))

logger.info("Sample done")
Ejemplo n.º 45
0
                    default=False,
                    help='generation network using residule block')
parser.add_argument('--use_lsgan',
                    dest='use_lsgan',
                    type=bool,
                    default=True,
                    help='gan loss defined in lsgan')
parser.add_argument(
    '--max_size',
    dest='max_size',
    type=int,
    default=50,
    help='max size of image pool, 0 means do not use image pool')
parser.add_argument('--segment_class',
                    dest='segment_class',
                    type=int,
                    default=8,
                    help='number of segmentation classes')
args = parser.parse_args()

pool = ImagePool(10)

sample_file = "test/real_00007.png"
sample_image = load_test_data(sample_file, args.img_width, args.img_height)

print(sample_image.shape)
imgplot = plt.imshow(sample_image)
plt.show()

save_images(sample_image, [1, 1], "test/holis.png")
Ejemplo n.º 46
0
    def train(self):

        # Used for plot loss curve
        train_D_loss = []
        train_G_loss = []

        opti_D = tf.train.AdamOptimizer(learning_rate=self.learning_rate_dis,
                                        beta1=0.5).minimize(
                                            self.loss, var_list=self.d_vars)
        opti_G = tf.train.AdamOptimizer(learning_rate=self.learning_rate_gen,
                                        beta1=0.5).minimize(
                                            self.G_fake_loss,
                                            var_list=self.g_vars)
        # opti_G = tf.train.AdamOptimizer(learning_rate=self.learning_rate_gen, beta1=0.5).minimize(- self.loss, var_list=self.g_vars)

        init = tf.global_variables_initializer()

        with tf.Session() as sess:

            sess.run(init)

            summary_op = tf.summary.merge_all()
            summary_writer = tf.summary.FileWriter(self.log_dir, sess.graph)

            #self.saver.restore(sess , self.model_path)

            batch_num = 0
            e = 0
            step = 0

            while e <= self.max_epoch:

                rand = np.random.randint(0, 100)
                rand = 0

                while batch_num < len(self.ds_train) / self.batch_size:

                    step = step + 1
                    realbatch_array, real_y = celebA.getNextBatch(
                        self.ds_train, self.label_y, rand, batch_num,
                        self.batch_size)

                    batch_z = np.random.normal(
                        0, 1, size=[self.batch_size, self.sample_size])

                    #for i in range(2):
                    # optimizaiton G
                    _, summary_str = sess.run(
                        [opti_G, summary_op],
                        feed_dict={
                            self.images: realbatch_array,
                            self.z: batch_z,
                            self.y: real_y
                        })
                    summary_writer.add_summary(summary_str, step)

                    #for i in range(3):
                    # optimization D
                    _, summary_str = sess.run(
                        [opti_D, summary_op],
                        feed_dict={
                            self.images: realbatch_array,
                            self.z: batch_z,
                            self.y: real_y
                        })
                    summary_writer.add_summary(summary_str, step)

                    batch_num += 1

                    if step % 1 == 0:

                        D_loss = sess.run(self.loss,
                                          feed_dict={
                                              self.images: realbatch_array,
                                              self.z: batch_z,
                                              self.y: real_y
                                          })
                        fake_loss = sess.run(self.G_fake_loss,
                                             feed_dict={
                                                 self.z: batch_z,
                                                 self.y: real_y
                                             })
                        print(
                            "EPOCH %d step %d: D: loss = %.7f G: loss=%.7f " %
                            (e, step, D_loss, fake_loss))

                    if np.mod(step, 50) == 1:

                        sample_images = sess.run(self.fake_images,
                                                 feed_dict={
                                                     self.z: batch_z,
                                                     self.y:
                                                     sample_label_celebA()
                                                 })

                        #save_images(sample_images[0:64] , [8, 8], './{}/train_{:02d}_{:04d}.png'.format(self.sample_path, e, step))
                        save_images(
                            sample_images[0:64], [8, 8],
                            './{}/train_{:02d}_{:04d}.png'.format(
                                self.sample_path, e, step))

                        #save_images_single(sample_images[0], './{}/train_{:02d}_{:04d}.png'.format(self.sample_path, e, step))
                        #Save the model
                        self.saver.save(sess, self.model_path)

                e += 1
                batch_num = 0

                # #list of D loss curve
                train_D_loss.append(D_loss)
                # #list of G loss curve
                train_G_loss.append(fake_loss)

            #plot D-loss and G-loss
            plt.plot(train_D_loss, label="D")
            plt.plot(train_G_loss, label="G")
            plt.legend()
            plt.xlabel("epoch")
            plt.ylabel("loss")
            plt.show()

            save_path = self.saver.save(sess, self.model_path)
            print("Model saved in file: %s" % save_path)
Ejemplo n.º 47
0
    def train(self):
        self.train_hist = {}
        self.train_hist['D_loss'] = []
        self.train_hist['G_loss'] = []
        self.train_hist['per_epoch_time'] = []
        self.train_hist['total_time'] = []

        if self.gpu_mode:
            self.y_real_, self.y_fake_ = Variable(torch.ones(self.batch_size, 1).cuda()), \
                                         Variable(torch.zeros(self.batch_size, 1).cuda())
        else:
            self.y_real_, self.y_fake_ = Variable(torch.ones(self.batch_size, 1)), \
                                         Variable(torch.zeros(self.batch_size, 1))

        self.D.train()
        print('training start!!')
        start_time = time.time()
        for epoch in range(self.epoch):
            self.G.train()
            epoch_start_time = time.time()
            for iter, (x_, _) in enumerate(self.data_loader):
                if iter == self.data_loader.dataset.__len__(
                ) // self.batch_size:
                    break

                z_ = torch.rand((self.batch_size, self.z_dim))

                if self.gpu_mode:
                    x_, z_ = Variable(x_.cuda()), Variable(z_.cuda())
                else:
                    x_, z_ = Variable(x_), Variable(z_)

                # update D network
                self.D_optimizer.zero_grad()

                D_real = self.D(x_)
                D_real_loss = self.MSE_loss(D_real, self.y_real_)

                G_ = self.G(z_)
                D_fake = self.D(G_)
                D_fake_loss = self.MSE_loss(D_fake, self.y_fake_)

                D_loss = D_real_loss + D_fake_loss
                self.train_hist['D_loss'].append(D_loss.data[0])

                D_loss.backward()
                self.D_optimizer.step()

                # update G network
                self.G_optimizer.zero_grad()

                G_ = self.G(z_)
                D_fake = self.D(G_)
                G_loss = self.MSE_loss(D_fake, self.y_real_)
                self.train_hist['G_loss'].append(G_loss.data[0])

                G_loss.backward()
                self.G_optimizer.step()

                if ((iter + 1) % 100) == 0:
                    print(
                        "Epoch: [%2d] [%4d/%4d] time: %4.4f, D_loss: %.8f, G_loss: %.8f"
                        % ((epoch + 1),
                           (iter + 1), self.data_loader.dataset.__len__() //
                           self.batch_size, time.time() - start_time,
                           D_loss.data[0], G_loss.data[0]))
                if np.mod((iter + 1), 300) == 0:
                    samples = self.G(self.sample_z_)
                    if self.gpu_mode:
                        samples = samples.cpu().data.numpy().transpose(
                            0, 2, 3, 1)
                    else:
                        samples = samples.data.numpy().transpose(0, 2, 3, 1)
                    tot_num_samples = min(self.sample_num, self.batch_size)
                    manifold_h = int(np.floor(np.sqrt(tot_num_samples)))
                    manifold_w = int(np.floor(np.sqrt(tot_num_samples)))
                    utils.save_images(
                        samples[:manifold_h * manifold_w, :, :, :],
                        [manifold_h, manifold_w],
                        utils.check_folder(self.result_dir + '/' +
                                           self.model_dir) + '/' +
                        self.model_name +
                        '_train_{:02d}_{:04d}.png'.format(epoch, (iter + 1)))

            self.train_hist['per_epoch_time'].append(time.time() -
                                                     epoch_start_time)
            self.visualize_results((epoch + 1))

        self.train_hist['total_time'].append(time.time() - start_time)
        print("Avg one epoch time: %.2f, total %d epochs time: %.2f" %
              (np.mean(self.train_hist['per_epoch_time']), self.epoch,
               self.train_hist['total_time'][0]))
        print("Training finish!... save training results")
        self.save()
        utils.generate_animation(
            self.result_dir + '/' + self.model_dir + '/' + self.model_name,
            self.epoch)
        utils.loss_plot(self.train_hist,
                        os.path.join(self.save_dir, self.model_dir),
                        self.model_name)
def _handler_video(ir_path,
                   vis_path,
                   model_path,
                   model_pre_path,
                   ssim_weight,
                   output_path=None):
    infrared = ir_path[0]
    img = get_train_images(infrared, flag=False)
    img = img.reshape([1, img.shape[0], img.shape[1], img.shape[2]])
    img = np.transpose(img, (0, 2, 1, 3))
    print('img shape final:', img.shape)
    num_imgs = len(ir_path)

    with tf.Graph().as_default(), tf.Session() as sess:
        # build the dataflow graph
        infrared_field = tf.placeholder(tf.float32,
                                        shape=img.shape,
                                        name='content')
        visible_field = tf.placeholder(tf.float32,
                                       shape=img.shape,
                                       name='style')

        dfn = DenseFuseNet(model_pre_path)

        output_image = dfn.transform_addition(infrared_field, visible_field)

        # restore the trained model and run the style transferring
        saver = tf.train.Saver()
        saver.restore(sess, model_path)

        ##################GET IMAGES###################################################################################
        start_time = datetime.now()
        for i in range(num_imgs):
            print('image number:', i)
            infrared = ir_path[i]
            visible = vis_path[i]

            ir_img = get_train_images(infrared, flag=False)
            vis_img = get_train_images(visible, flag=False)
            dimension = ir_img.shape

            ir_img = ir_img.reshape(
                [1, dimension[0], dimension[1], dimension[2]])
            vis_img = vis_img.reshape(
                [1, dimension[0], dimension[1], dimension[2]])

            ir_img = np.transpose(ir_img, (0, 2, 1, 3))
            vis_img = np.transpose(vis_img, (0, 2, 1, 3))

            ################FEED########################################
            output = sess.run(output_image,
                              feed_dict={
                                  infrared_field: ir_img,
                                  visible_field: vis_img
                              })
            save_images(infrared,
                        output,
                        output_path,
                        prefix='fused' + str(i),
                        suffix='_addition_' + str(ssim_weight))
            ######################################################################################################
        elapsed_time = datetime.now() - start_time
        print('Dense block video==> elapsed time: %s' % (elapsed_time))
Ejemplo n.º 49
0
  def train(self):
    """Train DCGAN"""
    with tf.Graph().as_default(), tf.device('/cpu:0'):
      # Override the number of preprocessing threads to account for the increased
      # number of GPU towers.
      num_preprocess_threads = FLAGS.num_preprocess_threads
      images, labels = image_processing.distorted_inputs(self.dataset, num_preprocess_threads=num_preprocess_threads)
  
      with tf.device('/gpu:0'):
        # Set weight_decay for weights in Conv and FC layers.
        
        self.build_model(FLAGS.batch_size, images, labels, 12, True, False)
            
        d_opt = tf.train.AdamOptimizer(FLAGS.learning_rate, beta1=FLAGS.beta1) \
                      .minimize(self.d_loss, var_list=self.d_vars)
        g_opt = tf.train.AdamOptimizer(FLAGS.learning_rate, beta1=FLAGS.beta1) \
                      .minimize(self.g_loss, var_list=self.g_vars)
                      
        train_op = tf.group(d_opt, g_opt, g_opt)

        batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION)

      # Add a summaries for the input processing and global_step.
      summaries = tf.get_collection(tf.GraphKeys.SUMMARIES)

      # Group all updates to into a single train op.
      batchnorm_updates_op = tf.group(*batchnorm_updates)
      train_op = tf.group(train_op, batchnorm_updates_op)
  
      # Create a saver.
      saver = tf.train.Saver(tf.all_variables())
  
      summary_op = tf.merge_summary(summaries)
  
      # Build an initialization operation to run below.
      init = tf.initialize_all_variables()
  
      # Start running operations on the Graph. allow_soft_placement must be set to
      # True to build towers on GPU, as some of the ops do not have GPU
      # implementations.
      sess = tf.Session(config=tf.ConfigProto(
          allow_soft_placement=True,
          log_device_placement=FLAGS.log_device_placement))
      sess.run(init)
  
  
      ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
      if ckpt and ckpt.model_checkpoint_path:
        variables_to_restore = tf.get_collection(
            slim.variables.VARIABLES_TO_RESTORE)
        restorer = tf.train.Saver(variables_to_restore)
        restorer.restore(sess, ckpt.model_checkpoint_path)
        print('%s: Pre-trained model restored from %s' %
              (datetime.now(), FLAGS.checkpoint_dir))
  
      # Start the queue runners.
      tf.train.start_queue_runners(sess=sess)
      summary_writer = tf.train.SummaryWriter(
          FLAGS.log_dir,
          graph=sess.graph)
  
      for step in xrange(FLAGS.max_steps):
        start_time = time.time()
        sess.run([train_op])
        duration = time.time() - start_time
  
        if step % 10 == 0:
          examples_per_sec = FLAGS.batch_size / float(duration)
          format_str = ('%s: step %d(%.1f examples/sec; %.3f '
                        'sec/batch)')
          print(format_str % (datetime.now(), step, examples_per_sec, duration))
  
        if step % 100 == 0:
          summary_str = sess.run(summary_op)
          summary_writer.add_summary(summary_str, step)
          samples = sess.run(self.G)
          save_images(samples, './%s/%d' % (FLAGS.sample_dir, step))
  
        # Save the model checkpoint periodically.
        if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
          checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt')
          saver.save(sess, checkpoint_path, global_step=step)
Ejemplo n.º 50
0
    def train(self):
        global_step = tf.Variable(0, trainable=False)
        add_global = global_step.assign_add(1)
        new_learning_rate = tf.train.exponential_decay(self.learn_rate_init,
                                                       global_step=global_step,
                                                       decay_steps=10000,
                                                       decay_rate=0.98)
        #for D
        trainer_D = tf.train.RMSPropOptimizer(learning_rate=new_learning_rate)
        gradients_D = trainer_D.compute_gradients(self.D_loss,
                                                  var_list=self.d_vars)
        opti_D = trainer_D.apply_gradients(gradients_D)

        #for G
        trainer_G = tf.train.RMSPropOptimizer(learning_rate=new_learning_rate)
        gradients_G = trainer_G.compute_gradients(self.G_loss,
                                                  var_list=self.g_vars)
        opti_G = trainer_G.apply_gradients(gradients_G)

        #for E
        trainer_E = tf.train.RMSPropOptimizer(learning_rate=new_learning_rate)
        gradients_E = trainer_E.compute_gradients(self.encode_loss,
                                                  var_list=self.e_vars)
        opti_E = trainer_E.apply_gradients(gradients_E)

        init = tf.global_variables_initializer()
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:

            sess.run(init)
            # inception_score = 0
            # Initialzie the iterator
            sess.run(self.training_init_op)
            sess.run(self.val_init_op)
            summary_op = tf.summary.merge_all()
            # summary_op1 = tf.Summary(value=[tf.Summary.Value(tag="inc", simple_value=inception_score)])
            now = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
            summary_writer_train = tf.summary.FileWriter(
                '{}/{}/train'.format(self.log_dir, now), sess.graph)
            summary_writer_test = tf.summary.FileWriter(
                '{}/{}/test'.format(self.log_dir, now), sess.graph)
            step = 0

            if self.load_type != 'none' and os.path.exists(self.ckp_dir + '/' +
                                                           self.load_type):
                ckpt = tf.train.get_checkpoint_state(
                    self.ckp_dir, latest_filename=self.load_type)
                if ckpt and ckpt.model_checkpoint_path:
                    self.saver.restore(sess, ckpt.model_checkpoint_path)
                    g_step = int(
                        ckpt.model_checkpoint_path.split('/')[-1].split('-')
                        [-1])
                    sess.run(global_step.assign(g_step))
                    self.best_loss = np.load(self.ckp_dir + '/' +
                                             'best_loss.npy')
                    print('model restored')

            step = global_step.eval()

            test_images = self.dataset.get_next_val_batch(sess)
            measure_dict = {'recon_loss': [], 'psnr': [], 'ssim': []}

            # inf_net = inf_def.InferenceNetwork()

            while step <= self.max_iters:
                next_x_images = self.dataset.get_next_train_batch(sess)

                theta_val = self.mdevice.sample_theta(self.FLAGS,
                                                      self.batch_size)
                theta_val_rec = self.mdevice.sample_theta(
                    self.FLAGS, self.batch_size)
                theta_val_xp = self.mdevice.sample_theta(
                    self.FLAGS, self.batch_size)
                # next_x_images = sess.run(self.next_x)
                # next_x_images = np.reshape(next_x_images,[-1,28,28,1])
                fd = {
                    self.images: next_x_images,
                    self.theta_ph: theta_val,
                    self.theta_ph_rec: theta_val_rec,
                    self.theta_ph_xp: theta_val_xp
                }
                sess.run(opti_E, feed_dict=fd)
                # optimizaiton G

                sess.run(opti_G, feed_dict=fd)
                # optimization D
                sess.run(opti_D, feed_dict=fd)
                # lossy_images , generated_image = sess.run([self.x_lossy,self.x_p], feed_dict=fd)

                if (step + 1) % self.print_every == 0:

                    fd_test = {
                        self.images: test_images,
                        self.theta_ph: theta_val,
                        self.theta_ph_rec: theta_val_rec,
                        self.theta_ph_xp: theta_val_xp
                    }
                    tags = [
                        'D_loss', 'G_loss', 'E_loss', 'PL_loss', 'L2_loss',
                        'kl_loss', 'recon_loss', 'Learning_rate'
                    ]
                    all_loss_train = sess.run([
                        self.D_loss, self.G_loss, self.encode_loss,
                        self.PL_loss, self.L2_loss, self.kl_loss,
                        self.recon_loss, new_learning_rate
                    ],
                                              feed_dict=fd)
                    all_loss_test = sess.run([
                        self.D_loss, self.G_loss, self.encode_loss,
                        self.PL_loss, self.L2_loss, self.kl_loss,
                        self.recon_loss, new_learning_rate
                    ],
                                             feed_dict=fd_test)
                    print(
                        "Step %d: D: loss = %.7f G: loss=%.7f E: loss=%.7f PL loss=%.7f L2 loss=%.7f KL=%.7f RC=%.7f, LR=%.7f"
                        % (step, all_loss_train[0], all_loss_train[1],
                           all_loss_train[2], all_loss_train[3],
                           all_loss_train[4], all_loss_train[5],
                           all_loss_train[6], all_loss_train[7]))
                    summary_str = tf.Summary()
                    for k, v in zip(tags, all_loss_train):
                        summary_str.value.add(tag=k, simple_value=v)
                    summary_writer_train.add_summary(summary_str, step)
                    summary_str = tf.Summary()
                    for k, v in zip(tags, all_loss_test):
                        summary_str.value.add(tag=k, simple_value=v)
                    summary_writer_test.add_summary(summary_str, step)

                    # summary_str = sess.run(summary_op, feed_dict=fd_test)
                    # summary_writer_test.add_summary(summary_str, step)
                    # save_images(next_x_images[0:self.batch_size], [self.batch_size/8, 8],
                    #             '{}/train_{:02d}_real.png'.format(self.sample_path, step))

                    rec_images, lossy_images, generated_image, rc = sess.run(
                        [
                            self.x_tilde, self.x_lossy, self.x_p,
                            self.recon_loss
                        ],
                        feed_dict=fd_test)
                    measure_dict['recon_loss'].append(rc)
                    # y_hat_val = inf_net.get_y_hat_val(rec_images)
                    # inception_score = get_inception_score(y_hat_val)
                    # score_list.append(inception_score)
                    # summary_str = sess.run(self.summ[0], feed_dict=fd_test)
                    # summary_str = sess.run(summary_op1)
                    lossy_images = np.clip(lossy_images, self.FLAGS.x_min,
                                           self.FLAGS.x_max)
                    # summary_writer_test.add_summary(summary_str, step)
                    sample_images = [
                        test_images[0:self.batch_size],
                        lossy_images[0:self.batch_size],
                        rec_images[0:self.batch_size],
                        generated_image[0:self.batch_size]
                    ]
                    # save_images(sample_images[0:self.batch_size] , [self.batch_size/8, 8], '{}/train_{:02d}_recon.png'.format(self.sample_path, step))
                    titles = ['orig', 'lossy', 'reconstructed', 'generated']
                    save_images(
                        sample_images, [self.batch_size / 8, 8],
                        '{}/train_{:02d}_images.png'.format(
                            self.log_dir, step), measure_dict, titles,
                        (self.FLAGS.x_min, self.FLAGS.x_max))
                if (step + 1) % self.save_every == 0:
                    self.saver.save(sess,
                                    self.ckp_dir + '/last.ckpt',
                                    global_step=global_step,
                                    latest_filename='last')
                    print("Model saved in file: %s" % self.ckp_dir)
                if (step + 1) % (self.save_every // 4) == 0:
                    if rc < self.best_loss:
                        self.best_loss = rc
                        np.save(self.ckp_dir + '/' + 'best_loss.npy',
                                self.best_loss)
                        self.saver_best.save(sess,
                                             self.ckp_dir + '/best.ckpt',
                                             global_step=global_step,
                                             latest_filename='best')
                        print("Best model saved in file: %s" % self.ckp_dir)

                step += 1
                new_learn_rate = sess.run(new_learning_rate)
                if new_learn_rate > 0.00005:
                    sess.run(add_global)