Exemplo n.º 1
0
    def generate(self, source, destination = None, samples = 20, grid_width=480, grid_height=240):
        # Load the input and output of the graph
        input  = self.graph.get_tensor_by_name("en_input:0"        )
        output = self.graph.get_tensor_by_name("decoder/main_out:0")

        # For generating a single image for a source image
        if os.path.isfile(source):
            image = loader.load_image(source, self.image_size, self.image_size)
            batch = np.asarray([image for _ in range(self.batch_size)])
            preds = self.session.run(output, feed_dict = {input: batch})
            batch[0] = utils.add_border(batch[0],color = [1.0,0.0,0.0])
            grid  = np.concatenate((batch[0], preds[0]), axis=1)
            grid  = (grid * 255.0).astype(np.uint8)
        # For generating a grid of images from a directory of images
        elif os.path.isdir(source):
            data = loader.DataSet(images_dir = source,
                                  hard_load  = False,
                                  width      = self.image_size,
                                  height     = self.image_size)
            batch,_= data.next_batch(self.batch_size)

            preds  = self.session.run(output, feed_dict={input: batch})
            for i in range(samples):
                batch[i] = utils.add_border(batch[i],color = [1.0,0.0,0.0])
            grid = self.construct_image_grid(batch,preds,samples,grid_width,grid_height)
        else:
            print(source,"must be an image pathname or a directory of images")
            sys.exit()

        if destination:
            cv2.imwrite(destination, grid)
        else:
            cv2.imshow("images", grid)
            cv2.waitKey()
Exemplo n.º 2
0
    def generate(self):
        self.data = loader.DataSet(train_path=self.train_path,
                                   width=self.image_size,
                                   height=self.image_size,
                                   validation_size=self.validation_size,
                                   hard_load=False)

        f, a = plt.subplots(4, 10, figsize=(10, 4))
        x_batch, y_batch = self.data.valid.next_batch(self.batch_size)

        output = self.graph.get_tensor_by_name("decoder/main_out:0")
        input = self.graph.get_tensor_by_name("en_input:0")
        label = self.graph.get_tensor_by_name("labels:0")
        preds = self.session.run(output,
                                 feed_dict={
                                     input: x_batch,
                                     label: y_batch
                                 })

        for i in range(10):
            a[0][i].imshow(x_batch[i])
            a[1][i].imshow(preds[i])
            a[2][i].imshow(x_batch[10 + i])
            a[3][i].imshow(preds[10 + i])

        f.show()
        plt.draw()
        plt.waitforbuttonpress()
Exemplo n.º 3
0
    def train_initialization(self,
                             hard_load=True,
                             en_iterations=1,
                             de_iterations=1):
        # initialize training dataset
        self.data = loader.DataSet(images_dir=self.train_path,
                                   width=self.image_size,
                                   height=self.image_size,
                                   split_ratio=self.validation_size,
                                   hard_load=hard_load,
                                   verbose=self.verbose)

        self.en_iterations = max(1, en_iterations)
        self.de_iterations = max(1, de_iterations)

        # get number of batches for a single epoch
        self.num_batches = self.data.train.size // self.batch_size

        if self.verbose:
            print("Train Data size=", self.data.train.size)
            print("Test  Data size=", self.data.valid.size)
            print("Batch size     =", self.batch_size)
            print("Learning rate  =", self.lr_rate)

        self.noise_shape = [self.batch_size, self.latent_dim]
        self.input_shape = [
            self.batch_size, self.image_size, self.image_size, self.channels
        ]
        self.label_shape = [self.batch_size, self.cond_dim]
        self.en_input = tf.placeholder(tf.float32,
                                       shape=self.input_shape,
                                       name='en_input')  # Input
        self.labels = tf.placeholder(tf.float32,
                                     shape=self.label_shape,
                                     name='labels')  # Condition vector
        self.noise_input = tf.placeholder(tf.float32,
                                          shape=self.noise_shape,
                                          name='noise_input')

        # create the network's model and optimizer
        self.create_network()
        self.create_optimizer()

        # initialize of all global variables
        global_variables = tf.global_variables_initializer()
        self.session.run(global_variables)
        self.saver = tf.train.Saver()

        if self.pretrained == True:
            if self.verbose == True:
                print("Loading pretrained model...", end='')
            meta_graph = self.model_path + self.model_name + '.meta'
            checkpoint = tf.train.latest_checkpoint(self.model_path)  #
            self.saver.restore(self.session, checkpoint)  # Load the weights
            if self.verbose == True: print("done")