def shear(self, image, shear_val=5):
     row, col, _ = image.shape
     point1 = np.float32([[5, 5], [20, 5], [5, 20]])
     p1 = 5 + shear_val * np.random_uniform() - shear_val/2
     p2 = 20 + shear_val * np.random_uniform() - shear_val/2
     point2 = np.float32([[p1, 5], [p2, p1], [5, p2]])
     M = cv2.getAffineTransform(point1, point2)
     dst = cv2.warpAffine(image, M, (col, row))
     return dst
Ejemplo n.º 2
0
def _testOfreduce_sum(style_ids_x):
    print(style_ids_x)
    sum_op = tf.reduce_sum(style_ids_x)
    less_op = tf.less(sum_op, 0)

    style_embedding_np = np.random.uniform(-1, 1, (256, 100))
    embedding_op = tf.nn.embedding_lookup(style_embedding_np, style_ids_x)

    random_op = np.random_uniform((4, 100), -1, 1).astype(np.float32)

    with tf.Session() as sess:
        print(sess.run([sum_op]))
        print(sess.run([less_op]))
        true_res = sess.run([random_op])
        #true_res = np.array(true_res)
        #print(true_res.shape)

        false_res = sess.run([embedding_op])
        false_res = np.array(false_res)
        print(false_res.shape)

        char_z = tf.one_hot(style_ids_x, 56)

        z = tf.concat([true_res, char_z], axis=1)
        print(z.shape)
Ejemplo n.º 3
0
    def conditional_test(self):
        checker, before_counter = self.load_model()
        if not checker:
            print_time_info("There isn't any ready model, quit.")
            sys.quit()
        if not self.is_conditional:
            print_time_info("Unconditional model doesn't support conditional test, quit.")
            sys.quit()

        sample_z = np.random_uniform(-1, 1, size=(self.batch_size, self.z_dim))
        sample_y, offset = self.data_engine.conditional_test(self.batch_size)
        samples = self.sess.run(self.S, feed_dict={self.z: sample_z, self.y: sample_y})
        samples[offset:, :, :] = 0.0
        save_images(samples, 1, self.aggregate_size, self.channels, self.images_dir, False)
        print_time_info("Conditional testing end!")
Ejemplo n.º 4
0
 def interpolation_test(self):
     checker, before_counter = self.load_model()
     if not checker:
         print_time_info("There isn't any ready model, quit.")
         sys.quit()
     if not self.is_conditional:
         print_time_info("Unconditional model doesn't support interpolation test, quit.")
         sys.quit()
     sample_z = np.random_uniform(-1, 1, size=(self.batch_size, self.z_dim))
     labels = []
     with open(self.test_file, 'r') as file:
         for line in file: labels.append(line.strip())
     sample_y = self.data_engine.interpolation_test(labels, self.batch_size)
     samples = self.sess.run(self.S, feed_dict={self.z: sample_z, self.y: sample_y})
     save_images(samples, 1, self.aggregate_size, self.channels, self.images_dir, False)
     print_time_info("Interpolation testing end!")
Ejemplo n.º 5
0
    def test(self):
        checker, before_counter = self.load_model()
        if not checker:
            print_time_info("There isn't any ready model, quit.")
            sys.quit()
        
        sample_z = np.random_uniform(-1, 1, size=(self.batch_size, self.z_dim))
        if self.is_conditional:
            sample = self.data_engine.get_batch(self.batch_size, with_labels=True, is_random=True)
            sample_y = sample['labels']
            samples = self.sess.run(self.S, feed_dict={self.z: sample_z, self.y: sample_y})
        else:
            samples = self.sess.run(self.S, feed_dict={self.z: sample_z})

        save_images(samples, 0, self.aggregate_size, self.channels, self.images_dir, False)
        print_time_info("Testing end!")
Ejemplo n.º 6
0
    def np_augment_input(inputs):
        xyz = inputs[0]
        features = inputs[1]
        theta = np.random.uniform(size=(1, ), low=0, high=2 * np.pi)
        # Rotation matrices
        c, s = cos(theta), sin(theta)
        cs0 = np.zeros_like(c)
        cs1 = np.ones_like(c)
        R = np.stack([c, -s, cs0, s, c, cs0, cs0, cs0, cs1], axis=1)
        stacked_rots = np.reshape(R, (3, 3))

        # Apply rotations
        transformed_xyz = np.reshape(np.matmul(xyz, stacked_rots), [-1, 3])
        # Choose random scales for each example
        min_s = cfg.augment_scale_min
        max_s = cfg.augment_scale_max

        # ES: minval, maxval -> low, high, / shape -> size
        if cfg.augment_scale_anisotropic:
            s = np.random.uniform(size=(1, 3), low=min_s, high=max_s)
        else:
            s = np.random.uniform(size=(1, 1), low=min_s, high=max_s)

        symmetries = []
        for i in range(3):
            if cfg.augment_symmetries[i]:
                symmetries.append(
                    np.round(np.random_uniform(size=(1, 1))) * 2 - 1)
            else:
                symmetries.append(np.ones([1, 1]))
        s *= np.concatenate(symmetries, 1)

        # Create N x 3 vector of scales to multiply with stacked_points
        stacked_scales = np.tile(s, [np.shape(transformed_xyz)[0], 1])

        # Apply scales
        transformed_xyz = transformed_xyz * stacked_scales

        # ES: stddev -> scale
        noise = np.random.normal(size=transformed_xyz.shape,
                                 scale=cfg.augment_noise)
        transformed_xyz = transformed_xyz + noise
        rgb = features[:, :3]
        stacked_features = np.concatenate([transformed_xyz, rgb], axis=-1)
        return stacked_features
Ejemplo n.º 7
0
def stochastic_round(tensor, stochastic):
    value_floor = floor(tensor)
    value_ceil = ceil(tensor)
    if stochastic == 'naive':
        prob = 0.5
    elif stochastic == 'ulp':
        # tensor is scaled so ulp is 1
        prob = (tensor - value_floor)
    else:
        raise TypeError(
            'Stochastic argument {} not recognized.'.format(stochastic))
    if is_tensor(tensor):
        randoms = tf.random_uniform(shape=tensor.shape)
    else:
        randoms = np.random_uniform(shape=tensor.shape)
    round_up = cast(randoms > prob, float)
    round_down = cast(randoms <= prob, float)
    return value_ceil * round_up + value_floor * round_down
Ejemplo n.º 8
0
    def perturb(self, x_nat, y, sess):

        if args.rand:
            x = x_nat + np.random_uniform(-args.eps, args.eps, x_nat.shape)
        else:
            x = np.copy(x_nat)

        for i in range(args.num_steps):
            grad = sess.run(self.grad,
                            feed_dict={
                                self.x_input: x,
                                self.y_input: y
                            })

            x = np.add(x,
                       args.step_size * np.sign(grad),
                       out=x,
                       casting='unsafe')

            x = np.clip(x, x_nat - args.eps, x_nat + args.eps)
            x = np.clip(x, 0, 1)

        return x
Ejemplo n.º 9
0
def visualize(sess, dcgan, config, option):
    image_frame_dim = int(math.ceil(config.batch_size**.5))
    if option == 0:
        z_sample = np.random_uniform(-0.5, 0.5, size=(config.batch_size, dcgan.z_dim))
        samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
        save_images(samples, [image_frame_dim, image_frame_dim],
                    './samples/test_{}.png'.format(strftime('%Y%H%d%H%M%S', gmtime())))
    elif option == 1:
        values = np.arange(0, 1, 1./config.batch_size)

        for idx in xrange(100):
            print(' [*] {}'.format(idx))
            z_sample = np.zeros([config.batch_size, dcgan.z_dim])

            for kdx, z in enumerate(z_sample):
                z[idx] = values[kdx]

            if config.dataset == 'mnist':
                y = np.random.choice(10, config.batch_size)
                y_one_hot = np.zeros((config.batch_size, 10))
                y_one_hot[np.arange(config.batch_size), y] = 1

                samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
            else:
                samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})

            save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_arange_{}.png'.format(idx))
    elif option == 2:
        values = np.arange(0, 1, 1./config.batch_size)

        for idx in [random.randint(0, 90) for _ in xrange(100)]:
            print(' [*] {}'.format(idx))
            z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
            z_sample = np.tile(z, (config.batch_size, 1))

            for kdx, z in enumerate(z_sample):
                z[idx] = values[kdx]

            if config.dataset == 'mnist':
                y = np.random.choice(10, config.batch_size)
                y_one_hot = np.zeros((config.batch_size, 10))
                y_one_hot[np.arange(config.batch_size), y] = 1

            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
        else:
            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})

        try:
            make_gif(samples, './samples/test_gif_{}.gif'.format(idx))
        except:
            save_images(samples, [image_frame_dim, image_frame_dim],
                        './samples/test_{}.png'.format(strftime('%Y%m%d%H%M%S', gmtime())))
    elif option == 3:
        values = np.arange(0, 1, 1./config.batch_size)

        for idx in xrange(100):
            print(' [*] {}'.format(idx))
            z_sample = np.zeros([config.batch_size, dcgan.z_dim])

            for kdx, z in enumerate(z_sample):
                z[idx] = values[kdx]

            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
            make_gif(samples, './samples/test_gif_{}.gif'.format(idx))
    elif option == 4:
        image_set = []
        values = np.arange(0, 1, 1./config.batch_size)

        for idx in xrange(100):
            print(' [*] {}'.format(idx))
            z_sample = np.zeros([config.batch_size, dcgan.z_dim])

            for kdx, z in enumerate(z_sample):
                z[idx] = values[kdx]

            image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
            make_gif(image_set[-1], './samples/test_gif_merged.gif', duration=8)
Ejemplo n.º 10
0
    def train(self, config):
        d_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1)\
            .minimize(self.d_loss, var_list=self.d_vars)
        g_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1)\
            .minimize(self.g_loss, var_list=self.g_vars)

        try:
            tf.global_variable_initializer().run()
        except:
            tf.initialize_all_variabbles().run()

        self.g_sum = merge_summary([
            self.z_sum, self.d__sum, self.G_sum, self.d_loss_face_sum,
            self.g_loss_sum
        ])
        self.d_sum = merge_summary(
            [self.z_sum, self.d_sum, self.d_loss_real_sum, self.d_loss_sum])
        self.writer = SummaryWriter('./logs', self.sess.graph)

        sample_z = np.random_uniform(-1, 1, size=(self.sample_num, self.z_dim))

        if config.dataset == 'mnist':
            sample_inputs = self.data_X[0:self.sample_num]
            sample_labels = self.data_y[0:self.sample_num]
        else:
            sample_files = self.data[0:self.sample_num]
            sample = [
                get_image(sample_file,
                          input_height=self.input_height,
                          input_width=self.input_width,
                          resize_height=self.output_height,
                          resize_width=self.output_width,
                          crop=self.crop,
                          grayscale=self.grayscale)
                for sample_file in sample_files
            ]
            if (self.grayscale):
                sample_inputs = np.array(sample).astype(np.float32)[:, :, :,
                                                                    None]
            else:
                sample_inputs = np.array(sample).astype(np.float32)

        counter = 1
        start_time = time.time()
        could_load, checkpoint_counter = self.load(self.checkpoint_dir)

        if could_load:
            counter = checkpoint_counter
            print(' [*] Load SUCCESS')
        else:
            print(' [!] Load failed..')

        for epoch in xrange(config.epoch):
            if config.data == 'mnist':
                batch_idxs = min(len(self.data_X),
                                 config.train_size) // config.batch_size
            else:
                self.data = glob(
                    os.path.join('./data', config.dataset,
                                 set.input_fname_pattern))
                batch_idxs = min(len(self.data),
                                 config.train_size) // config.batch_size

            for idx in xrange(0, batch_idxs):
                if config.dataset == 'mnist':
                    batch_images = self.data_X[idx *
                                               config.batch_size:(idx + 1) *
                                               config.batch_size]
                    batch_labels = self.data_y[idx *
                                               config.batch_size:(idx + 1) *
                                               config.batch_size]
                else:
                    batch_files = self.data[idx * config.batch_size:(idx + 1) *
                                            config.batch_size]
                    batch = [
                        get_image(batch_file,
                                  input_height=self.input_height,
                                  input_width=self.input_width,
                                  resize_height=self.output_height,
                                  resize_width=self.output_width,
                                  crop=self.crop,
                                  grayscale=self.grayscale)
                        for batch_file in batch_files
                    ]
                    if self.grayscale:
                        batch_images = np.array(batch).astype(
                            np.float32)[:, :, :, None]
                    else:
                        batch_images = np.array(batch).astype(np.float32)

                batch_z = np.random.uniform(
                    -1, 1, [config.batch_size, self.z_dim]).astype(np.float32)

                if config.dataset == 'mnist':
                    # Update D network
                    _, summary_str = self.sess.run(
                        [d_optim, self.d_sum],
                        feed_dict={
                            self.inputs: batch_images,
                            self.z: batch_z,
                            self.y: batch_labels,
                        })
                    self.writer.add_summary(summary_str, counter)

                    # Update G network
                    _, summary_str = self.sess.run([g_optim, self.g_sum],
                                                   feed_dict={
                                                       self.z: batch_z,
                                                       self.y: batch_labels,
                                                   })
                    self.writer.add_summary(summary_str, counter)

                    # Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
                    _, summary_str = self.sess.run([g_optim, self.g_sum],
                                                   feed_dict={
                                                       self.z: batch_z,
                                                       self.y: batch_labels
                                                   })
                    self.writer.add_summary(summary_str, counter)

                    errD_fake = self.d_loss_fake.eval({
                        self.z: batch_z,
                        self.y: batch_labels
                    })
                    errD_real = self.d_loss_real.eval({
                        self.inputs: batch_images,
                        self.y: batch_labels
                    })
                    errG = self.g_loss.eval({
                        self.z: batch_z,
                        self.y: batch_labels
                    })
                else:
                    # Update D network
                    _, summary_srt = self.sess.run([d_optim, self.d_sum],
                                                   feed_dict={
                                                       self.inputs:
                                                       batch_images,
                                                       self.z: batch_z
                                                   })
                    self.writer.add_summary(summary_str, counter)

                    # Update G network
                    _, summary_str = self.sess.run([g_optim, self.g_sum],
                                                   feed_dict={self.z: batch_z})
                    self.writer.add_summary(summary_str, counter)

                    # Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
                    _, summary_str = self.sess.run([g_optim, self.g_sum],
                                                   feed_dict={self.z: batch_z})

                    errD_fake = self.d_loss_fake.eval({self.z: batch_z})
                    errD_real = self.d_loss_real.eval(
                        {self.inputs: batch_images})
                    errG = self.g_loss.eval({self.z: batch_z})

                counter += 1
                print('Epoch: [%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f' %
                      (epoch, idx, batch_idxs, time.time() - start_time,
                       errD_fake + errD_real + errG))

                if np.mod(counter, 100) == 1:
                    if config.dataset == 'mnist':
                        samples, d_loss, g_loss = self.sess.run(
                            [self.sampler, self.d_loss, self.g_loss],
                            feed_dict={
                                self.z: sample_z,
                                self.inputs: sample_inputs,
                                self.y: sample_labels
                            })
                        save_images(
                            samples, image_manifold_size(samples.shape[0]),
                            './{}/train_{:02d}_{:04d}.png'.format(
                                config.smaple_dir, epoch, idx))
                        print(
                            '[Sample] d_loss: {:0.8f}, g_loss: {:0.8f}'.format(
                                d_loss, g_loss))
                    else:
                        try:
                            samples, d_loss, g_loss = self.sess.run(
                                [self.sampler, self.d_loss, self.g_loss],
                                feed_dict={
                                    self.z: sample_z,
                                    self.inputs: sample_inputs
                                })
                            save_images(
                                samples, image_manifold_size(sample.shape[0]),
                                './{}/train_{:02d}_{:04d}.png'.format(
                                    config.sample_dir, epoch, idx))
                            print('[Sample] d_loss: {:0.8f}, g_loss: {:0.8f}'.
                                  format(d_loss, g_loss))
                        except:
                            print('one pic error!...')

                if np.mod(counter, 500) == 2:
                    self.save(config.checkpoint_dir, counter)