示例#1
0
    def next_batch_train(self, initial_step):
        """
        returns:
            a tuple(image, depths) where:
                image is a float tensor with shape [batch size] + input_size
                depth is a float tensor with shape [batch size] + depth_size
        """

        filenames = self.train_file

        dataset = tf.contrib.data.TFRecordDataset(filenames)
        dataset = dataset.map(
            self._parse_function)  # Parse the record into tensors.
        dataset = dataset.shuffle(buffer_size=3000)
        dataset = dataset.batch(self.config_dict["batch_size"])
        dataset = dataset.repeat(
            self.config_dict["num_epochs"])  # Repeat the input.
        dataset = dataset.skip(initial_step)

        iterator = dataset.make_one_shot_iterator()

        images, depths = iterator.get_next()
        #depths = tf.reshape(depths, [None] + self.output_size)
        #images = tf.reshape(images, [None] + self.input_size)
        images = simulator.applyTurbidity(images, depths, self.c, self.binf,
                                          self.range_array)
        tf.summary.image("depth", depths)
        tf.summary.image("image", images)
        return images, depths
示例#2
0
def input_fn(filenames, train=True, batch_size=16, buffer_size=512):
    # Args:
    # filenames:   Filenames for the TFRecords files.
    # train:       Boolean whether training (True) or testing (False).
    # batch_size:  Return batches of this size.
    # buffer_size: Read buffers of this size. The random shuffling
    #              is done on the buffer, so it must be big enough.

    # Create a TensorFlow Dataset-object which has functionality
    # for reading and shuffling data from TFRecords files.
    dataset = tf.data.TFRecordDataset(filenames=filenames)

    # Parse the serialized data in the TFRecords files.
    # This returns TensorFlow tensors for the image and labels.
    dataset = dataset.map(parse)

    if train:
        # If training then read a buffer of the given size and
        # randomly shuffle it.
        dataset = dataset.shuffle(buffer_size=buffer_size)

        # Allow infinite reading of the data.
        num_repeat = None
    else:
        # If testing then don't shuffle the data.
        num_repeat = 1

    # Repeat the dataset the given number of times.
    dataset = dataset.repeat(num_repeat)

    # Get a batch of data with the given size.
    dataset = dataset.batch(batch_size)

    # Create an iterator for the dataset and the above modifications.
    iterator = dataset.make_one_shot_iterator()

    # Get the next batch of images and labels, may take dimensionality info later but for now we set to _
    images_batch, labels_batch, \
        xdim_batch, ydim_batch, channels_batch = iterator.get_next()

    if train:
        images_batch = distort_batch(images_batch)

    # The input-function must return a dict wrapping the images.
    x = {'x': images_batch}
    y = labels_batch

    return x, y
    def next_batch_train(self, initial_step):
        """
        args:
            batch_size:
                number of examples per returned batch
            num_epochs:
                number of time to read the input data

        returns:
            a tuple(image, transmissions) where:
                image is a float tensor with shape [batch size] + patch_size
                transmissions is a float tensor with shape [batch size]
        """

        filenames = self.train_file

        dataset = tf.contrib.data.TFRecordDataset(filenames)
        dataset = dataset.map(
            self._parse_function)  # Parse the record into tensors.
        dataset = dataset.shuffle(buffer_size=3000)
        dataset = dataset.batch(self.config_dict["batch_size"])
        dataset = dataset.repeat(
            self.config_dict["num_epochs"])  # Repeat the input.
        dataset = dataset.skip(initial_step)

        iterator = dataset.make_one_shot_iterator()

        images = iterator.get_next()

        size_x = self.config_dict['patch_size'][0]
        size_y = self.config_dict['patch_size'][1]
        offset_x = random.randint(0, self.input_size[0] - size_x - 1)
        offset_y = random.randint(0, self.input_size[0] - size_y - 1)

        images = images[:, offset_x:offset_x + size_x,
                        offset_y:offset_y + size_y]
        transmissions = self.random_transmissions(self.batch_size)
        images = simulator.applyTurbidityTransmission(images, self.binf,
                                                      transmissions)
        tf.summary.image("image", images)
        return images, transmissions
示例#4
0
    def next_batch_train(self, initial_step, sess):
        """
        returns:
            a tuple(image, depths) where:
                image is a float tensor with shape [batch size] + input_size
                depth is a float tensor with shape [batch size] + depth_size
        """

        filenames = self.train_file

        dataset = tf.contrib.data.TFRecordDataset(filenames)
        dataset = dataset.map(
            self._parse_function)  # Parse the record into tensors.
        dataset = dataset.shuffle(buffer_size=3000)
        dataset = dataset.batch(self.config_dict["batch_size"])
        dataset = dataset.repeat(
            self.config_dict["num_epochs"])  # Repeat the input.
        dataset = dataset.skip(initial_step)

        iterator = dataset.make_one_shot_iterator()

        images_gt, depths = iterator.get_next()
        #print (depths.eval(session=sess))
        #depths = tf.reshape(depths, [None] + self.output_size)
        #images = tf.reshape(images, [None] + self.input_size)
        images = simulator.applyTurbidity(images_gt, depths, self.c, self.binf,
                                          self.range_array)
        tf.summary.image("image_gt", images_gt)
        tf.summary.image("depth", depths)
        tf.summary.image("image", images)
        # sess = tf.Session()
        # d, im = sess.run([depths[0], images_gt[0]])
        # print (im.dtype)
        # pic = Image.fromarray(img_as_ubyte(im))
        # print("depths[0]", np.min(d), np.max(d), np.mean(d))
        # pic.show()
        return images, images_gt
dense_ssd = Densenet_SSD(4, 12, training_flag)
anchors = dense_ssd.anchors

gclasses, glocations, gscores = dense_ssd.bboxes_encode(
    train_y, train_location, anchors)
predictions, locations = dense_ssd.densenet_ssd(train_x)
#predictions=tf.nn.softmax(predictions)
#tf.cast(predictions,tf.int32)
loss = dense_ssd.loss(predictions, locations, gclasses, glocations, gscores)
optimizer = tf.train.AdagradOptimizer(learning_rate=1e-4)
train = optimizer.minimize(loss)
dataset = dataset.get_dataset('./train.tfrecords')
dataset = dataset.shuffle(2)
dataset = dataset.batch(1)
dataset = dataset.repeat(2)
iterator = dataset.make_one_shot_iterator()
initializer = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(initializer)
    for i in range(iteration):
        data_x, data_y, data_location = iterator.get_next()
        data_x = tf.decode_raw(data_x, tf.uint8)
        data_x = tf.reshape(data_x, [-1, 4096, 2048, 3])
        data_y = tf.reshape(data_y, [-1, 1])
        data_location = tf.reshape(data_location, [-1, 4])
        #print(data_x)
        #dic={train_x:data_x,train_y:data_y,train_location:data_location}
        data_x, data_y, data_location = sess.run(
            [data_x, data_y, data_location])
        sess.run(train,