Beispiel #1
0
def test():
    filename_queue = tf.train.string_input_producer([tfrecord_filename])
    data_array, target_array = reader.read_and_decode(filename_queue,
                                                      shuffle_batch=False)
    with tf.device('/gpu:0'):
        global_step = slim.create_global_step()
    with tf.device('/cpu:0'):
        lrn_rate = tf.train.exponential_decay(FLAGS.learning_rate,
                                              global_step,
                                              5000,
                                              0.917,
                                              staircase=True)
        tf.summary.scalar('learning_rate', lrn_rate)

    with tf.device('/gpu:0'):
        # tf.train.exponential_decay(
        #     FLAGS.learning_rate, global_step, 5000, 0.92, staircase=True)
        # visualizing learning
        # tf.summary.scalar('learning_rate', lrn_rate)
        optimizer = tf.train.AdamOptimizer(lrn_rate)
        # data = tf.placeholder(tf.float32, [FLAGS.batch_size, 32, 32, 32, 2])
        # target = tf.placeholder(tf.float32, [FLAGS.batch_size, 32, 32, 32, 1])
        ops = epn_test.model(input_data=data_array)
        loss, mask = losses.get_l1_loss(input_data=data_array,
                                        pre_data=ops,
                                        label=target_array)
        # tf.summary.scalar('loss_function', loss)
        # train_step = optimizer.minimize(loss=loss, global_step=global_step)
    with tf.device('/cpu:0'):
        tf.summary.scalar('loss_function', loss)
        saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, model_path)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        loss_value_total = 0.0
        for i in range(1):
            loss_value_total = loss_value_total + sess.run(loss)
            data_origin, pre_origin, target_origin = sess.run(
                [data_array, ops, target_array])
            # loss_value_total=tf.concat([loss_value_total,loss_value],axis=0)
            print str(i) + 'step'
            # print type(loss_value)
        print 'loss_mean=' + str(loss_value_total)
        # input = tf.reshape(data_array[:, :, :, :, 0], [32, 32, 32])
        # pre = tf.subtract(tf.exp(pre), tf.constant(1, tf.float32))
        # pre = tf.reshape(pre, [32, 32, 32])
        # target = tf.subtract(tf.exp(target_array), tf.constant(1, tf.float32))
        # target = tf.reshape(target, [32, 32, 32])
        input = np.reshape(data_origin[:, :, :, :, 0], (32, 32, 32))
        pre = np.exp(pre_origin) - 1
        pre = np.reshape(pre, [32, 32, 32])
        target = np.exp(target_origin) - 1
        target = np.reshape(target, [32, 32, 32])
        outprefix = os.path.join(FLAGS.output_path, 'sample_')
        export_prediction_to_mesh(outprefix, input, pre, target)

        coord.request_stop()
        coord.join(threads)
Beispiel #2
0
def inputs(filename, batch_size, num_epochs2, num_threads,
           imshape, num_examples_per_epoch=128):
  """Reads input tfrecord file num_epochs times. Use it for validation.

  Args:
    filename: The path to the .tfrecords file to be read
    batch_size: Number of examples per returned batch.
    num_epochs: Number of times to read the input ckpt, or 0/None to
       train forever.
    num_threads: Number of reader workers to enqueue
    imshape: The shape of image in the format
    num_examples_per_epoch: Number of images to use per epoch

  Returns:
    A tuple (images, labels), where:
    * images is a float tensor with shape [batch_size, mnist.IMAGE_PIXELS]
      in the range [-0.5, 0.5].
    * labels is an int32 tensor with shape [batch_size] with the true label,
      a number in the range [0, mnist.NUM_CLASSES).
    Note that an tf.train.QueueRunner is added to the graph, which
    must be run using e.g. tf.train.start_queue_runners().
  """


  tf.local_variables_initializer()
  
  if not num_epochs2:
    num_epochs2 = None

  with tf.name_scope('input'):
    filename_queue = tf.train.string_input_producer(
      [filename], num_epochs=num_epochs2, name='string_input_producer')

    # Even when reading in multiple threads, share the filename
    # queue.
    #image, label = reader.read_and_decode(filename_queue, imshape, normalize=True, flatten=False)
    image, label = reader.read_and_decode(filename_queue, imshape, normalize=True)

    # Convert from [0, 255] -> [-0.5, 0.5] floats. The normalize param in read_and_decode will do the same job.
    # image = tf.cast(image, tf.float32)
    # image = tf.cast(image, tf.float32) * (1. / 255) - 0.5

    # Shuffle the examples and collect them into batch_size batches.
    # (Internally uses a RandomShuffleQueue.)
    # We run this in two threads to avoid being a bottleneck.
    # Ensure that the random shuffling has good mixing properties.
    min_fraction_of_examples_in_queue = 0.4
    min_queue_examples = int(num_examples_per_epoch *
                             min_fraction_of_examples_in_queue)

    images, sparse_labels = tf.train.shuffle_batch(  
      [image, label], batch_size=batch_size, num_threads=num_threads,
      capacity=min_queue_examples + 3 * batch_size, enqueue_many=False,
      # Ensures a minimum amount of shuffling of examples.
      min_after_dequeue=min_queue_examples, name='batching_shuffling')
      
    return images, sparse_labels
    def build_model(self):
        if F.use_tfrecords == True:
            # load images from tfrecords + queue thread runner for better GPU utilization
            tfrecords_filename = ['train_data/' + x for x in os.listdir('train_data/')]
            filename_queue = tf.train.string_input_producer(
                                tfrecords_filename, num_epochs=100)


            self.images, _ = reader.read_and_decode(filename_queue, F.batch_size)

            if F.output_size == 64:
                self.images = tf.image.resize_images(self.images, (64, 64))

            self.images = (self.images / 127.5) - 1

        else:    
            self.images = tf.placeholder(tf.float32,
                                       [F.batch_size, F.output_size, F.output_size,
                                        F.c_dim],
                                       name='real_images')
        
        self.z_gen = tf.placeholder(tf.float32, [None, F.z_dim], name='z')        

        self.G_mean = self.generator(self.z_gen)
        self.D, self.D_logits = self.discriminator(self.images, reuse=False)
        self.D_, self.D_logits_, = self.discriminator(self.G_mean, reuse=True)

        #calculations for getting hard predictions
        # +1 means fooled the D network while -1 mean D has won
        self.d_loss_real = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits = self.D_logits, labels = tf.ones_like(self.D)))
        self.d_loss_fake = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits = self.D_logits_, labels = tf.zeros_like(self.D_)))
        self.d_loss = self.d_loss_real + self.d_loss_fake

        self.g_loss_actual = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits = self.D_logits_, labels = tf.ones_like(self.D_)))

        self.g_loss = tf.constant(0)        

        self.output = self.generator(self.images_i)
        self.mse_loss = 

        if F.error_conceal == True:
            self.mask = tf.placeholder(tf.float32, [F.batch_size] + self.image_shape, name='mask')
            self.contextual_loss = tf.reduce_sum(
              tf.contrib.layers.flatten(
              tf.abs(tf.multiply(self.mask, self.G_mean) - tf.multiply(self.mask, self.images))), 1)
            self.perceptual_loss = self.g_loss_actual
            self.complete_loss = self.contextual_loss + F.lam * self.perceptual_loss
            self.grad_complete_loss = tf.gradients(self.complete_loss, self.z_gen)

        t_vars = tf.trainable_variables()
        self.d_vars = [var for var in t_vars if 'd_' in var.name]
        self.g_vars = [var for var in t_vars if 'g_' in var.name]

        self.saver = tf.train.Saver()
Beispiel #4
0
def inputs(filename, batch_size, num_epochs, num_threads, imshape, num_examples_per_epoch=128):
    """Reads input tfrecord file num_epochs times. Use it for validation.

  Args:
    filename: The path to the .tfrecords file to be read
    batch_size: Number of examples per returned batch.
    num_epochs: Number of times to read the input ckpt, or 0/None to
       train forever.
    num_threads: Number of reader workers to enqueue
    imshape: The shape of image in the format
    num_examples_per_epoch:

  Returns:
    A tuple (images, labels), where:
    * images is a float tensor with shape [batch_size, mnist.IMAGE_PIXELS]
      in the range [-0.5, 0.5].
    * labels is an int32 tensor with shape [batch_size] with the true label,
      a number in the range [0, mnist.NUM_CLASSES).
    Note that an tf.train.QueueRunner is added to the graph, which
    must be run using e.g. tf.train.start_queue_runners().
  """
    if not num_epochs:
        num_epochs = None

    with tf.name_scope("input"):
        filename_queue = tf.train.string_input_producer([filename], num_epochs=num_epochs, name="string_input_producer")

        # Even when reading in multiple threads, share the filename
        # queue.
        image, label = reader.read_and_decode(filename_queue, imshape, normalize=True)

        # Convert from [0, 255] -> [-0.5, 0.5] floats. The normalize param in read_and_decode will do the same job.
        # image = tf.cast(image, tf.float32)
        # image = tf.cast(image, tf.float32) * (1. / 255) - 0.5

        # Shuffle the examples and collect them into batch_size batches.
        # (Internally uses a RandomShuffleQueue.)
        # We run this in two threads to avoid being a bottleneck.
        # Ensure that the random shuffling has good mixing properties.
        min_fraction_of_examples_in_queue = 0.4
        min_queue_examples = int(num_examples_per_epoch * min_fraction_of_examples_in_queue)

        images, sparse_labels = tf.train.shuffle_batch(
            [image, label],
            batch_size=batch_size,
            num_threads=num_threads,
            capacity=min_queue_examples + 3 * batch_size,
            enqueue_many=False,
            # Ensures a minimum amount of shuffling of examples.
            min_after_dequeue=min_queue_examples,
            name="batching_shuffling",
        )

        return images, sparse_labels
Beispiel #5
0
def train():
    filename_queue = tf.train.string_input_producer([tfrecord_filename])
    data_array, target_array = reader.read_and_decode(filename_queue)
    with tf.device('/gpu:0'):
        global_step = slim.create_global_step()
    with tf.device('/cpu:0'):
        lrn_rate = tf.train.exponential_decay(FLAGS.learning_rate,
                                              global_step,
                                              5000,
                                              0.917,
                                              staircase=True)
        tf.summary.scalar('learning_rate', lrn_rate)

    with tf.device('/gpu:0'):
        optimizer = tf.train.AdamOptimizer(lrn_rate)
        # data = tf.placeholder(tf.float32, [FLAGS.batch_size, 32, 32, 32, 2])
        # target = tf.placeholder(tf.float32, [FLAGS.batch_size, 32, 32, 32, 1])
        ops = epn.model(input_data=data_array)
        loss, mask = losses.get_l1_loss(input_data=data_array,
                                        pre_data=ops,
                                        label=target_array)
        train_step = optimizer.minimize(loss=loss, global_step=global_step)
    with tf.device('/cpu:0'):
        tf.summary.scalar('loss_function', loss)
        #saver = tf.train.Saver()

    # print target_array[0][0][0][0][0]
    # sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True))

    # print loss.get_shape().as_list()
    with tf.Session() as sess:
        sess.run(tf.local_variables_initializer())
        sess.run(tf.global_variables_initializer())
        merged_summary_op = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter(
            '/home/zzxmllq/h5_shapenet_dim32_sdf/h5_shapenet_dim32_sdf',
            sess.graph)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        # for j in range(FLAGS.max_epoch):
        for j in range(FLAGS.max_epoch):
            for i in range(2000):
                _, loss_value, mask_v = sess.run([train_step, loss, mask])
                print str(j + 1) + ' epoch' + ' ' + str(
                    i) + ' minibatch' + ':' + str(loss_value)
                if i % 10 == 0:
                    summary_str = sess.run(merged_summary_op)
                    summary_writer.add_summary(summary_str, j * 2000 + i)
        #save_path=saver.save(sess,model_path)
        #print("Model saved in file:%s"% save_path)
        coord.request_stop()
        coord.join(threads)
Beispiel #6
0
def train():
    filename_queue = tf.train.string_input_producer([tfrecord_filename])
    images, labels = reader.read_and_decode(filename_queue=filename_queue,
                                            batch_size=FLAGS.batch_size)
    with tf.device('/gpu:0'):
        global_step = slim.create_global_step()
    with tf.device('/cpu:0'):
        num_batches_per_epoch = FLAGS.data_size / FLAGS.batch_size
        decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
        lrn_rate = tf.train.exponential_decay(FLAGS.learning_rate,
                                              global_step,
                                              decay_steps,
                                              LEARNING_RATE_DECAY_FACTOR,
                                              staircase=True)
    with tf.device('/gpu:0'):
        fc8 = alexnet.model(input_data=images,
                            n_classes=FLAGS.n_classes,
                            keep_prob=FLAGS.keep_prob)
        total_loss = loss.get_loss(input_data=fc8, grdtruth=labels)
        loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
        losses = tf.get_collection('losses')
        loss_averages_op = loss_averages.apply(losses + [total_loss])
        with tf.control_dependencies([loss_averages_op]):
            optimizer = tf.train.AdamOptimizer(lrn_rate)
        train_step = optimizer.minimize(loss=total_loss,
                                        global_step=global_step)
        variable_averages = tf.train.ExponentialMovingAverage(
            MOVING_AVERAGE_DECAY, global_step)
        variable_averages_op = variable_averages.apply(
            tf.trainable_variables())
        with tf.control_dependencies([train_step, variable_averages_op]):
            train_step = tf.no_op(name='train')
        prediction = alexnet.classify(fc8)
    with tf.Session() as sess:
        sess.run(tf.local_variables_initializer())
        sess.run(tf.global_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        for j in range(FLAGS.max_epoch):
            for i in range(200):
                _, loss_value, pre, grd = sess.run(
                    [train_step, total_loss, prediction, labels])
                print str(j + 1) + ' epoch' + ' ' + str(
                    i) + ' minibatch' + ':' + str(loss_value)
                print str(j + 1) + ' epoch' + ' ' + str(
                    i) + ' minibatch' + ':' + str(pre) + " " + str(grd)
        coord.request_stop()
        coord.join(threads)
Beispiel #7
0
def train():
    filename_queue = tf.train.string_input_producer([tfrecord_filename])
    images, labels = reader.read_and_decode(filename_queue=filename_queue,
                                            batch_size=FLAGS.batch_size)
    with tf.device('/gpu:0'):
        global_step = slim.create_global_step()
    with tf.device('/cpu:0'):
        num_batches_per_epoch = FLAGS.data_size / FLAGS.batch_size
        decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
        lrn_rate = tf.train.exponential_decay(FLAGS.learning_rate,
                                              global_step,
                                              decay_steps,
                                              LEARNING_RATE_DECAY_FACTOR,
                                              staircase=True)
        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = 0.6
    with tf.device('/gpu:0'):
        optimizer = tf.train.AdamOptimizer(lrn_rate)
        fc8 = vgg_m.model(input_data=images,
                          n_classes=FLAGS.n_classes,
                          keep_prob=FLAGS.keep_prob)
        losses = loss.get_loss(input_data=fc8, grdtruth=labels)
        train_step = optimizer.minimize(loss=losses, global_step=global_step)
        prediction = vgg_m.classify(fc8)
    with tf.device('/cpu:0'):
        saver = tf.train.Saver()
    with tf.Session(config=config) as sess:
        sess.run(tf.local_variables_initializer())
        sess.run(tf.global_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        for j in range(FLAGS.max_epoch):
            for i in range(200):
                _, loss_value, pre, grd = sess.run(
                    [train_step, losses, prediction, labels])
                print str(j + 1) + ' epoch' + ' ' + str(
                    i) + ' minibatch' + ':' + str(loss_value)
                print str(j + 1) + ' epoch' + ' ' + str(
                    i) + ' minibatch' + ':' + str(pre) + " " + str(grd)
        save_path = saver.save(sess, model_path)
        print("Model saved in file:%s" % save_path)
        coord.request_stop()
        coord.join(threads)
Beispiel #8
0
def test():
    filename_queue = tf.train.string_input_producer([tfrecord_filename])
    images, labels = reader.read_and_decode(filename_queue=filename_queue,
                                            batch_size=FLAGS.batch_size,
                                            shuffle_batch=False)
    # with tf.device('/gpu:0'):
    #     global_step = slim.create_global_step()
    # with tf.device('/cpu:0'):
    #     num_batches_per_epoch = FLAGS.data_size / FLAGS.batch_size
    #     decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
    #     lrn_rate = tf.train.exponential_decay(
    #         FLAGS.learning_rate, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True)
    with tf.device('/gpu:0'):
        # optimizer = tf.train.AdamOptimizer(lrn_rate)
        fc8 = alexnet.model(input_data=images,
                            n_classes=FLAGS.n_classes,
                            keep_prob=FLAGS.keep_prob)
        losses = loss.get_loss(input_data=fc8, grdtruth=labels)
        # train_step = optimizer.minimize(loss=losses, global_step=global_step)
        prediction = alexnet.classify(fc8)
    with tf.device('/cpu:0'):
        saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.local_variables_initializer())
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, model_path)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        total = 0.0
        right = 0.0
        for i in range(80):
            loss_value, pre, grd = sess.run([losses, prediction, labels])
            print str(i + 1) + 'image loss :' + str(loss_value)
            print str(i + 1) + ' result:' + ':' + str(pre) + " " + str(grd)
            right += np.sum(np.equal(pre, grd))
            total += 10
            print 'accurcy:' + str(right / total)

        coord.request_stop()
        coord.join(threads)
Beispiel #9
0
def distorted_inputs(filename,
                     batch_size,
                     num_epochs,
                     num_threads,
                     imshape,
                     imsize,
                     num_examples_per_epoch=128):
    """Construct distorted input for training using the Reader ops.

  Raises:
    ValueError: if no data_dir

  Args:
    filename:
    batch_size:
    num_epochs:
    num_threads:
    imshape:
    imsize:
    num_examples_per_epoch:

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """

    if not num_epochs:
        num_epochs = None

    with tf.name_scope('input'):
        filename_queue = tf.train.string_input_producer(
            [filename],
            num_epochs=num_epochs,
            name='string_DISTORTED_input_producer')

        # Even when reading in multiple threads, share the filename
        # queue.
        image, label = reader.read_and_decode(filename_queue, imshape)

        # Reshape to [32, 32, 3] as distortion methods need this shape
        image = tf.reshape(image, imshape)

        height = imsize
        width = imsize

        # Image processing for training the network. Note the many random
        # distortions applied to the image.

        # Removed random_crop in new TensorFlow release.
        # Randomly crop a [height, width] section of the image.
        # distorted_image = tf.image.random_crop(image, [height, width])
        #
        # Randomly flip the image horizontally.
        distorted_image = tf.image.random_flip_left_right(image)
        #
        # Because these operations are not commutative, consider randomizing
        # randomize the order their operation.
        distorted_image = tf.image.random_brightness(distorted_image,
                                                     max_delta=63)
        distorted_image = tf.image.random_contrast(distorted_image,
                                                   lower=0.2,
                                                   upper=1.8)

        # # Subtract off the mean and divide by the variance of the pixels.
        float_image = tf.image.per_image_whitening(distorted_image)

        num_elements = 1
        for i in imshape:
            num_elements = num_elements * i
        image = tf.reshape(float_image, [num_elements])

        # Ensure that the random shuffling has good mixing properties.
        min_fraction_of_examples_in_queue = 0.4
        min_queue_examples = int(num_examples_per_epoch *
                                 min_fraction_of_examples_in_queue)
        images, sparse_labels = tf.train.shuffle_batch(
            [image, label],
            batch_size=batch_size,
            num_threads=num_threads,
            capacity=min_queue_examples + 3 * batch_size,
            enqueue_many=False,
            # Ensures a minimum amount of shuffling of examples.
            min_after_dequeue=min_queue_examples,
            name='batching_shuffling_distortion')

    return images, sparse_labels
Beispiel #10
0
    def build_model(self):
        # main method for training the conditional GAN
        if F.use_tfrecords == True:
            # load images from tfrecords + queue thread runner for better GPU utilization
            tfrecords_filename = [
                'train_records/' + x for x in os.listdir('train_records/')
            ]
            filename_queue = tf.train.string_input_producer(tfrecords_filename,
                                                            num_epochs=100)

            self.images, self.keypoints = reader.read_and_decode(
                filename_queue, F.batch_size)

            if F.output_size == 64:
                self.images = tf.image.resize_images(self.images, (64, 64))
                self.keypoints = tf.image.resize_images(
                    self.keypoints, (64, 64))

            self.images = (self.images / 127.5) - 1
            self.keypoints = (self.keypoints / 127.5) - 1

        else:
            self.images = tf.placeholder(
                tf.float32,
                [F.batch_size, F.output_size, F.output_size, F.c_dim],
                name='real_images')
            self.keypoints = tf.placeholder(
                tf.float32,
                [F.batch_size, F.output_size, F.output_size, F.c_dim],
                name='keypts')

        self.is_training = tf.placeholder(tf.bool, name='is_training')
        self.z_gen = tf.placeholder(tf.float32, [F.batch_size, F.z_dim],
                                    name='z')

        self.G = self.generator(self.z_gen, self.keypoints)
        self.D, self.D_logits = self.discriminator(self.images,
                                                   self.keypoints,
                                                   reuse=False)
        self.D_, self.D_logits_, = self.discriminator(self.G,
                                                      self.keypoints,
                                                      reuse=True)

        self.d_loss_real = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits,
                                                    labels=tf.ones_like(
                                                        self.D)))
        self.d_loss_fake = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
                                                    labels=tf.zeros_like(
                                                        self.D_)))
        self.d_loss = self.d_loss_real + self.d_loss_fake
        self.g_loss_actual = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
                                                    labels=tf.ones_like(
                                                        self.D_)))

        if F.error_conceal == True:
            self.mask = tf.placeholder(tf.float32,
                                       [F.batch_size] + self.image_shape,
                                       name='mask')
            self.contextual_loss = tf.reduce_sum(
                tf.contrib.layers.flatten(
                    tf.abs(
                        tf.multiply(self.mask, self.G) -
                        tf.multiply(self.mask, self.images))), 1)
            self.perceptual_loss = self.g_loss_actual
            self.complete_loss = self.contextual_loss + F.lam * self.perceptual_loss
            self.grad_complete_loss = tf.gradients(self.complete_loss,
                                                   self.z_gen)

        # create summaries  for Tensorboard visualization
        tf.summary.scalar('disc_loss', self.d_loss)
        tf.summary.scalar('disc_loss_real', self.d_loss_real)
        tf.summary.scalar('disc_loss_fake', self.d_loss_fake)
        tf.summary.scalar('gen_loss', self.g_loss_actual)

        self.g_loss = tf.constant(0)

        t_vars = tf.trainable_variables()
        self.d_vars = [var for var in t_vars if 'D/d_' in var.name]
        self.g_vars = [var for var in t_vars if 'G/g_' in var.name]

        self.saver = tf.train.Saver()
Beispiel #11
0
def main():
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    # read data from tfrecord files
    img, label = reader.read_and_decode(args.tfrecords,
                                        epochs=args.num_epochs,
                                        size=args.img_size)
    img_batch, label_batch = tf.train.shuffle_batch([img, label],
                                                    batch_size=args.batch_size,
                                                    capacity=2000,
                                                    min_after_dequeue=1000)

    global_step = tf.Variable(0, name="global_step", trainable=False)
    # construct network model
    model = AlexNet(img_batch, args.dropout_rate, args.num_bit, args.num_class,
                    skip_layers)

    D = model.softsign
    [d_loss, out] = hashing_loss(D, label_batch, args.alpha, args.belta,
                                 args.gama, args.num_bit)

    # List of trainable variables of the layers to finetune
    var_list1 = [
        v for v in tf.trainable_variables()
        if v.name.split('/')[0] not in skip_layers
    ]
    # List of trainable variables of the layers to train from scratch
    var_list2 = [
        v for v in tf.trainable_variables()
        if v.name.split('/')[0] in skip_layers
    ]

    # learning rate
    learning_rate = tf.train.exponential_decay(args.lr,
                                               global_step,
                                               args.decay_step,
                                               args.decay_rate,
                                               staircase=True)
    opt1 = tf.train.AdamOptimizer(learning_rate * 0.001)
    opt2 = tf.train.AdamOptimizer(learning_rate)

    # apply different grads for two type layers
    grads = tf.gradients(d_loss, var_list1 + var_list2)
    grads1 = grads[:len(var_list1)]
    grads2 = grads[len(var_list1):]
    train_op1 = opt1.apply_gradients(zip(grads1, var_list1))
    train_op2 = opt2.apply_gradients(zip(grads2, var_list2),
                                     global_step=global_step)
    train_op = tf.group(train_op1, train_op2)

    with tf.Session(config=config) as sess:
        saver = tf.train.Saver(tf.global_variables())
        sess.run([
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
        ])
        if args.checkpoint is not None:
            checkpoint = tf.train.latest_checkpoint(args.checkpoint)
            print('Restoring model from {}'.format(checkpoint))
            saver.restore(sess, checkpoint)
        else:
            # Load the pretrained weights into the non-trainable layer
            model.load_initial_weights(sess)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        start_time = time.time()
        try:
            while not coord.should_stop():
                _, loss_t, dt, step1 = sess.run(
                    [train_op, d_loss, out, global_step])
                elapsed_time = time.time() - start_time
                start_time = time.time()

                if step1 % 10 == 0:
                    print("iter: %4d, loss: %.8f, time: %.3f" %
                          (step1, loss_t, elapsed_time))
                if step1 % args.save_freq == 0:
                    saver.save(sess,
                               args.output_dir + '/model.ckpt',
                               global_step=step1)

        except tf.errors.OutOfRangeError:
            saver.save(sess, args.output_dir + '/model-done.ckpt')
            print('Done training -- epoch limit reached')
        finally:
            coord.request_stop()
        coord.join(threads)
Beispiel #12
0
def distorted_inputs(filename, batch_size, num_epochs, num_threads, imshape, imsize, num_examples_per_epoch=128):
    """Construct distorted input for training using the Reader ops.

  Raises:
    ValueError: if no data_dir

  Args:
    filename:
    batch_size:
    num_epochs:
    num_threads:
    imshape:
    imsize:
    num_examples_per_epoch:

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """

    if not num_epochs:
        num_epochs = None

    with tf.name_scope("input"):
        filename_queue = tf.train.string_input_producer(
            [filename], num_epochs=num_epochs, name="string_DISTORTED_input_producer"
        )

        # Even when reading in multiple threads, share the filename
        # queue.
        image, label = reader.read_and_decode(filename_queue, imshape)

        # Reshape to [32, 32, 3] as distortion methods need this shape
        image = tf.reshape(image, imshape)

        height = imsize
        width = imsize

        # Image processing for training the network. Note the many random
        # distortions applied to the image.

        # Removed random_crop in new TensorFlow release.
        # Randomly crop a [height, width] section of the image.
        # distorted_image = tf.image.random_crop(image, [height, width])
        #
        # Randomly flip the image horizontally.
        distorted_image = tf.image.random_flip_left_right(image)
        #
        # Because these operations are not commutative, consider randomizing
        # randomize the order their operation.
        distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
        distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8)

        # # Subtract off the mean and divide by the variance of the pixels.
        float_image = tf.image.per_image_whitening(distorted_image)

        num_elements = 1
        for i in imshape:
            num_elements = num_elements * i
        image = tf.reshape(float_image, [num_elements])

        # Ensure that the random shuffling has good mixing properties.
        min_fraction_of_examples_in_queue = 0.4
        min_queue_examples = int(num_examples_per_epoch * min_fraction_of_examples_in_queue)
        images, sparse_labels = tf.train.shuffle_batch(
            [image, label],
            batch_size=batch_size,
            num_threads=num_threads,
            capacity=min_queue_examples + 3 * batch_size,
            enqueue_many=False,
            # Ensures a minimum amount of shuffling of examples.
            min_after_dequeue=min_queue_examples,
            name="batching_shuffling_distortion",
        )

    return images, sparse_labels
Beispiel #13
0
def distorted_inputs(filename, batch_size, num_epochs, num_threads,
                     imshape, num_examples_per_epoch=128, flatten=True):
  """Construct distorted input for training using the Reader ops.

  Raises:
    ValueError: if no data_dir

  Args:
    filename: The name of the file containing the images
    batch_size: The number of images per batch
    num_epochs: The number of epochs passed to string_input_producer
    num_threads: The number of threads passed to shuffle_batch
    imshape: Shape of image in [height, width, n_channels] format
    num_examples_per_epoch: Number of images to use per epoch
    flatten: Whether to flatten image after image transformations

  Returns:
    images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
    labels: Labels. 1D tensor of [batch_size] size.
  """
  
  tf.local_variables_initializer()
  tf.global_variables_initializer()
  
  if not num_epochs:
    num_epochs = None

  with tf.name_scope('input'):
    filename_queue = tf.train.string_input_producer(
      [filename], num_epochs=num_epochs, name='string_DISTORTED_input_producer')

    # Even when reading in multiple threads, share the filename
    # queue.
    image, label = reader.read_and_decode(filename_queue, imshape)

    # Reshape to imshape as distortion methods need this shape
    image = tf.reshape(image, imshape)

    # Image processing for training the network. Note the many random
    # distortions applied to the image.

    # Removed random_crop in new TensorFlow release.
    # Randomly crop a [height, width] section of the image.
    # distorted_image = tf.image.random_crop(image, [height, width])
    #
    # Randomly flip the image horizontally.
    distorted_image = tf.image.random_flip_left_right(image)
    #
    # Randomly apply image transformations in random_functions list
    random_functions = [_random_brightness_helper, _random_contrast_helper]
    shuffle(random_functions)
    for fcn in random_functions:
      distorted_image = fcn(distorted_image)

    # # Subtract off the mean and divide by the variance of the pixels.
    float_image = tf.image.per_image_standardization(distorted_image)

    if flatten:
      num_elements = 1
      for i in imshape: num_elements = num_elements * i
      image = tf.reshape(float_image, [num_elements])
    else:
      image = float_image

    # Ensure that the random shuffling has good mixing properties.
    min_fraction_of_examples_in_queue = 0.4
    min_queue_examples = int(num_examples_per_epoch *
                             min_fraction_of_examples_in_queue)
    images, sparse_labels = tf.train.shuffle_batch([image, label],
                                                   batch_size=batch_size,
                                                   num_threads=num_threads,
                                                   capacity=min_queue_examples + 3 * batch_size,
                                                   enqueue_many=False,
                                                   # Ensures a minimum amount of shuffling of examples.
                                                   min_after_dequeue=min_queue_examples,
                                                   name='batching_shuffling_distortion')

  return images, sparse_labels
Beispiel #14
0
    def build_model(self):
        # main method for training the conditional GAN
        if F.use_tfrecords == True:
            # load images from tfrecords + queue thread runner for better GPU utilization
            tfrecords_filename = [
                'train_records/' + x for x in os.listdir('train_records/')
            ]
            filename_queue = tf.train.string_input_producer(tfrecords_filename,
                                                            num_epochs=100)

            self.images_i, self.images_o = reader.read_and_decode(
                filename_queue, F.batch_size)

            if F.output_size == 64:
                self.images_i = tf.image.resize_images(self.images_i, (64, 64))
                self.images_o = tf.image.resize_images(self.images_o, (64, 64))
            print(self.images_i.shape)
            print('*********')
            # augmentation
            #self.images_i = tf.image.random_brightness(self.images_i, max_delta=0.5)
            #self.images_i = tf.image.random_contrast(self.images_i, 0.2, 0.5)
            #self.images_i = tf.image.random_hue(self.images_i, 0.2)

            # self.images_i = gaussian_noise_layer(self.images_i, 0.2)

            self.images_i = (self.images_i / 127.5) - 1
            self.images_o = (self.images_o / 127.5) - 1

        else:
            self.images_i = tf.placeholder(
                tf.float32,
                [F.batch_size, F.output_size, F.output_size, F.c_dim],
                name='real_images_i')
            self.images_o = tf.placeholder(
                tf.float32,
                [F.batch_size, F.output_size, F.output_size, F.c_dim_o],
                name='real_images_o')

        # self.mask = tf.placeholder(tf.float32, [F.batch_size, F.output_size, F.output_size, 3], name='mask')
        #here the function of mask is to be added so that mask is extracted from image
        # this mask needs to be the subtitles in images.
        # self.mask = self.images_i
        # just for running code I am using mask same as input image.
        # for img in range(F.batch_size):
        #     mask = get_mask(self.images_i[img])
        #     self.images_i[img] = tf.multiply(mask, self.images_i[img])

        self.is_training = tf.placeholder(tf.bool, name='is_training')
        # self.get_z_init = tf.placeholder(tf.bool, name='get_z_init')

        # self.images_m = tf.multiply(self.mask, self.images_i)
        self.output = self.unet(self.images_i)
        # self.z_gen = tf.cond(self.get_z_init, lambda: self.generate_z(self.images_), lambda: tf.placeholder(tf.float32, [F.batch_size, 100], name='z_gen'))

        # self.G = self.generator(self.z_gen)
        self.loss = 0

        if F.vgg_loss == True:
            data_dict = loadWeightsData('./vgg16.npy')
            lambda_f = 1
            # content target feature
            vgg_c = custom_Vgg16(self.output, data_dict=data_dict)
            # feature_i = [vgg_c.conv1_2, vgg_c.conv2_2, vgg_c.conv3_3, vgg_c.conv4_3, vgg_c.conv5_3]
            feature_i = [vgg_c.conv3_3, vgg_c.conv4_3]
            # feature after transformation
            vgg = custom_Vgg16(self.images_o, data_dict=data_dict)
            # feature_o = [vgg.conv1_2, vgg.conv2_2, vgg.conv3_3, vgg.conv4_3, vgg.conv5_3]
            feature_o = [vgg.conv3_3, vgg.conv4_3]
            # compute feature loss
            # self.loss = tf.zeros(F.batch_size, tf.float32)
            print(tf.shape(self.loss))
            for f, f_ in zip(feature_i, feature_o):
                # self.loss += lambda_f * tf.reduce_mean(tf.subtract(f, f_) ** 2, [1, 2, 3])
                self.loss += lambda_f * tf.reduce_mean(tf.subtract(f_, f)**2)
            # self.loss += tf.reduce_mean(tf.square(vgg_net['relu3_3'][:F.batch_size] - vgg_net['relu3_3'][F.batch_size:]))# + \
            # tf.reduce_mean(tf.square(vgg_net[:F.batch_size] - vgg_net[F.batch_size:]))
        else:
            self.loss += (
                tf.reduce_sum(tf.square(self.output - self.images_o)) +
                (0.01 * tf.reduce_sum(tf.image.total_variation(self.output))))
            #self.loss = tf.reduce_sum(tf.image.total_variation(self.output))
            # self.loss += tf.reduce_sum(tf.square(self.output - self.images_o))

        tf.summary.scalar('loss', self.loss)

        # create summaries  for Tensorboard visualization
        # self.g_loss = tf.constant(0)

        t_vars = tf.trainable_variables()
        # print t_vars
        self.z_vars = [var for var in t_vars if 'U/' in var.name]

        #print self.z_vars
        # self.g_vars = [var for var in t_vars if 'G/g_' in var.name]
        # self.d_vars = [var for var in t_vars if 'G/d_' in var.name]

        # self.saver_gen = tf.train.Saver(self.g_vars) # + self.d_vars)
        # try:
        self.saver = tf.train.Saver()
Beispiel #15
0
    def build_model(self):
        if F.use_tfrecords == True:
            # load images from tfrecords + queue thread runner for better GPU utilization
            tfrecords_filename = [
                'train_records/' + x for x in os.listdir('train_records/')
            ]
            filename_queue = tf.train.string_input_producer(tfrecords_filename,
                                                            num_epochs=100)

            self.images_i, self.images_o = reader.read_and_decode(
                filename_queue, F.batch_size)

            print('image_loaded')
            if F.output_size == 64:
                self.images_i = tf.image.resize_images(self.images_i, (64, 64))
                self.images_o = tf.image.resize_images(self.images_o, (64, 64))
            print(self.images_i.shape)
            print('*********')
            # augmentation
            #self.images_i = tf.image.random_brightness(self.images_i, max_delta=0.5)
            #self.images_i = tf.image.random_contrast(self.images_i, 0.2, 0.5)
            #self.images_i = tf.image.random_hue(self.images_i, 0.2)

            # self.images_i = gaussian_noise_layer(self.images_i, 0.2)

            self.images_i = (self.images_i / 127.5) - 1
            self.images_o = (self.images_o / 127.5) - 1

        else:
            self.images_o = tf.placeholder(
                tf.float32,
                [F.batch_size, F.output_size, F.output_size, F.c_dim],
                name='real_images')

        # self.z_gen = tf.placeholder(tf.float32, [None, F.z_dim], name='z')

        self.is_training = tf.placeholder(tf.bool, name='is_training')

        self.G_mean = self.generator(self.images_i)
        self.D, self.D_logits = self.discriminator(self.images_o,
                                                   self.images_i,
                                                   reuse=False)
        self.D_, self.D_logits_, = self.discriminator(self.G_mean,
                                                      self.images_i,
                                                      reuse=True)

        #calculations for getting hard predictions
        # +1 means fooled the D network while -1 mean D has won
        self.d_loss_real = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits,
                                                    labels=tf.ones_like(
                                                        self.D)))
        self.d_loss_fake = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
                                                    labels=tf.zeros_like(
                                                        self.D_)))
        self.d_loss = self.d_loss_real + self.d_loss_fake

        self.u_loss = tf.reduce_mean(tf.square(self.G_mean - self.images_o))

        self.g_loss_actual = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
                                                    labels=tf.ones_like(
                                                        self.D_)))

        tf.summary.scalar('loss', self.g_loss_actual)

        # self.g_loss = tf.constant(0)
        # print('loss calculated')
        # if F.error_conceal == True:
        #     self.mask = tf.placeholder(tf.float32, [F.batch_size] + self.image_shape, name='mask')
        #     self.contextual_loss = tf.reduce_sum(
        #       tf.contrib.layers.flatten(
        #       tf.abs(tf.multiply(self.mask, self.G_mean) - tf.multiply(self.mask, self.images_o))), 1)
        #     self.perceptual_loss = self.g_loss_actual
        #     self.complete_loss = self.contextual_loss + F.lam * self.perceptual_loss
        #     self.grad_complete_loss = tf.gradients(self.complete_loss, self.z_gen)

        t_vars = tf.trainable_variables()
        self.d_vars = [var for var in t_vars if 'd_' in var.name]
        self.g_vars = [var for var in t_vars if 'g_' in var.name]

        self.saver = tf.train.Saver()