示例#1
0
    def next_batch_train(self, initial_step):
        """
        returns:
            a tuple(image, depths) where:
                image is a float tensor with shape [batch size] + input_size
                depth is a float tensor with shape [batch size] + depth_size
        """

        filenames = self.train_file

        dataset = tf.contrib.data.TFRecordDataset(filenames)
        dataset = dataset.map(
            self._parse_function)  # Parse the record into tensors.
        dataset = dataset.shuffle(buffer_size=3000)
        dataset = dataset.batch(self.config_dict["batch_size"])
        dataset = dataset.repeat(
            self.config_dict["num_epochs"])  # Repeat the input.
        dataset = dataset.skip(initial_step)

        iterator = dataset.make_one_shot_iterator()

        images, depths = iterator.get_next()
        #depths = tf.reshape(depths, [None] + self.output_size)
        #images = tf.reshape(images, [None] + self.input_size)
        images = simulator.applyTurbidity(images, depths, self.c, self.binf,
                                          self.range_array)
        tf.summary.image("depth", depths)
        tf.summary.image("image", images)
        return images, depths
示例#2
0
def input_fn(filenames, train=True, batch_size=16, buffer_size=512):
    # Args:
    # filenames:   Filenames for the TFRecords files.
    # train:       Boolean whether training (True) or testing (False).
    # batch_size:  Return batches of this size.
    # buffer_size: Read buffers of this size. The random shuffling
    #              is done on the buffer, so it must be big enough.

    # Create a TensorFlow Dataset-object which has functionality
    # for reading and shuffling data from TFRecords files.
    dataset = tf.data.TFRecordDataset(filenames=filenames)

    # Parse the serialized data in the TFRecords files.
    # This returns TensorFlow tensors for the image and labels.
    dataset = dataset.map(parse)

    if train:
        # If training then read a buffer of the given size and
        # randomly shuffle it.
        dataset = dataset.shuffle(buffer_size=buffer_size)

        # Allow infinite reading of the data.
        num_repeat = None
    else:
        # If testing then don't shuffle the data.
        num_repeat = 1

    # Repeat the dataset the given number of times.
    dataset = dataset.repeat(num_repeat)

    # Get a batch of data with the given size.
    dataset = dataset.batch(batch_size)

    # Create an iterator for the dataset and the above modifications.
    iterator = dataset.make_one_shot_iterator()

    # Get the next batch of images and labels, may take dimensionality info later but for now we set to _
    images_batch, labels_batch, \
        xdim_batch, ydim_batch, channels_batch = iterator.get_next()

    if train:
        images_batch = distort_batch(images_batch)

    # The input-function must return a dict wrapping the images.
    x = {'x': images_batch}
    y = labels_batch

    return x, y
    def next_batch_train(self, initial_step):
        """
        args:
            batch_size:
                number of examples per returned batch
            num_epochs:
                number of time to read the input data

        returns:
            a tuple(image, transmissions) where:
                image is a float tensor with shape [batch size] + patch_size
                transmissions is a float tensor with shape [batch size]
        """

        filenames = self.train_file

        dataset = tf.contrib.data.TFRecordDataset(filenames)
        dataset = dataset.map(
            self._parse_function)  # Parse the record into tensors.
        dataset = dataset.shuffle(buffer_size=3000)
        dataset = dataset.batch(self.config_dict["batch_size"])
        dataset = dataset.repeat(
            self.config_dict["num_epochs"])  # Repeat the input.
        dataset = dataset.skip(initial_step)

        iterator = dataset.make_one_shot_iterator()

        images = iterator.get_next()

        size_x = self.config_dict['patch_size'][0]
        size_y = self.config_dict['patch_size'][1]
        offset_x = random.randint(0, self.input_size[0] - size_x - 1)
        offset_y = random.randint(0, self.input_size[0] - size_y - 1)

        images = images[:, offset_x:offset_x + size_x,
                        offset_y:offset_y + size_y]
        transmissions = self.random_transmissions(self.batch_size)
        images = simulator.applyTurbidityTransmission(images, self.binf,
                                                      transmissions)
        tf.summary.image("image", images)
        return images, transmissions
示例#4
0
    def next_batch_train(self, initial_step, sess):
        """
        returns:
            a tuple(image, depths) where:
                image is a float tensor with shape [batch size] + input_size
                depth is a float tensor with shape [batch size] + depth_size
        """

        filenames = self.train_file

        dataset = tf.contrib.data.TFRecordDataset(filenames)
        dataset = dataset.map(
            self._parse_function)  # Parse the record into tensors.
        dataset = dataset.shuffle(buffer_size=3000)
        dataset = dataset.batch(self.config_dict["batch_size"])
        dataset = dataset.repeat(
            self.config_dict["num_epochs"])  # Repeat the input.
        dataset = dataset.skip(initial_step)

        iterator = dataset.make_one_shot_iterator()

        images_gt, depths = iterator.get_next()
        #print (depths.eval(session=sess))
        #depths = tf.reshape(depths, [None] + self.output_size)
        #images = tf.reshape(images, [None] + self.input_size)
        images = simulator.applyTurbidity(images_gt, depths, self.c, self.binf,
                                          self.range_array)
        tf.summary.image("image_gt", images_gt)
        tf.summary.image("depth", depths)
        tf.summary.image("image", images)
        # sess = tf.Session()
        # d, im = sess.run([depths[0], images_gt[0]])
        # print (im.dtype)
        # pic = Image.fromarray(img_as_ubyte(im))
        # print("depths[0]", np.min(d), np.max(d), np.mean(d))
        # pic.show()
        return images, images_gt
train_location = tf.placeholder(tf.float32, shape=(None, 4))
training_flag = tf.placeholder(tf.bool)

dense_ssd = Densenet_SSD(4, 12, training_flag)
anchors = dense_ssd.anchors

gclasses, glocations, gscores = dense_ssd.bboxes_encode(
    train_y, train_location, anchors)
predictions, locations = dense_ssd.densenet_ssd(train_x)
#predictions=tf.nn.softmax(predictions)
#tf.cast(predictions,tf.int32)
loss = dense_ssd.loss(predictions, locations, gclasses, glocations, gscores)
optimizer = tf.train.AdagradOptimizer(learning_rate=1e-4)
train = optimizer.minimize(loss)
dataset = dataset.get_dataset('./train.tfrecords')
dataset = dataset.shuffle(2)
dataset = dataset.batch(1)
dataset = dataset.repeat(2)
iterator = dataset.make_one_shot_iterator()
initializer = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(initializer)
    for i in range(iteration):
        data_x, data_y, data_location = iterator.get_next()
        data_x = tf.decode_raw(data_x, tf.uint8)
        data_x = tf.reshape(data_x, [-1, 4096, 2048, 3])
        data_y = tf.reshape(data_y, [-1, 1])
        data_location = tf.reshape(data_location, [-1, 4])
        #print(data_x)
        #dic={train_x:data_x,train_y:data_y,train_location:data_location}
        data_x, data_y, data_location = sess.run(
示例#6
0
from __future__ import print_function

import tensorflow as tf

import dataset

import matplotlib.pyplot as plt
import numpy as np
import os
import time

import squeezenet

dataset = dataset.Data_set()
dataset.open('./face_photos',8)
dataset.shuffle()
#dataset.read(30)

#[None,224,224,3]

# x = tf.placeholder(tf.float32,shape=[None,224,224,3])
# y = tf.placeholder(tf.float32,shape=[None,5])
x, y = dataset.read()
# print(x)
x = tf.reshape(x,shape=[-1,224,224,3])
# x = tf.transpose(x,[0,3,1,2])
print(x)

class netInit(object):
    num_classes=5
    weight_decay=0.1
示例#7
0
def train():

    training_filename = [
        "/home/szaman5/Phytoplankton_Classifier/test_data/train.tfrecords"
    ]
    validation_filename = [
        "/home/szaman5/Phytoplankton_Classifier/test_data/validation.tfrecords"
    ]

    filename = tf.placeholder(tf.string, shape=[None])

    dataset = tf.data.TFRecordDataset(filename)

    dataset = dataset.map(_parser, num_parallel_calls=40)
    dataset = dataset.shuffle(buffer_size=int(sys.argv[1]))
    dataset = dataset.batch(int(sys.argv[2]))

    dataset = dataset.prefetch(buffer_size=int(sys.argv[2]) * 100)
    iterator = dataset.make_initializable_iterator()
    next_element = iterator.get_next()
    counter = 0

    NUM_EPOCHS = 40

    val_acc = 0
    acc = 0
    count = 0
    for epoch in range(NUM_EPOCHS):
        session.run(iterator.initializer,
                    feed_dict={filename: training_filename})
        #x_batch, y_true_batch = data.train.next_batch(batch_size)

        while True:
            try:
                t = time()
                x_batch, y_true_batch = session.run(next_element)

                #print("Load time is",time()-t)
                feed_dict_tr = {
                    x: x_batch,
                    y_true: y_true_batch,
                    keep_prob: 0.3
                }

                _, acc = session.run([optimizer, accuracy],
                                     feed_dict=feed_dict_tr)
                #acc = session.run(accuracy,feed_dict= feed_dict_tr)
                #print(time()-t)
                count += 1
                #print("Batch %d complete",count)
            except tf.errors.OutOfRangeError as e:
                break
        #print("Training complete. Starting Test set")
        session.run(iterator.initializer,
                    feed_dict={filename: validation_filename})

        val_b = 0
        while True:
            try:
                x_valid_batch, y_valid_batch = session.run(next_element)
                feed_dict_val = {
                    x: x_valid_batch,
                    y_true: y_valid_batch,
                    keep_prob: 1
                }
                #val_loss = session.run(cost,feed_dict=feed_dict_val)

                val_loss, val_acc, valacc3 = session.run(
                    [cost, accuracy2, accuracy3], feed_dict=feed_dict_val)
                #summary,val_acc = session.run([merged,accuracy],feed_dict=feed_dict_val)
                #print("Batch Accuracy: ",epoch,val_acc)
                val_acc3 = session.run(accuracy3, feed_dict=feed_dict_val)
                print(val_acc, val_acc3)
                val_b += 1
                #writer.add_summary(summary,epoch)
                #print("Validation bath:",val_b)

            except tf.errors.OutOfRangeError as e:
                show_progress(epoch, acc, val_acc, val_loss)
                saver.save(
                    session,
                    "/home/szaman5/Phytoplankton_Classifier/trained_model/")
                break
示例#8
0
                        }))

    return (
        np.concatenate(rotated_xs, axis=0),
        np.concatenate(zoomed_xs, axis=0),
    )


with tf.Session() as session:
    training_file = f"{config.DATA_DIR}/train.p"
    with open(training_file, mode='rb') as f:
        train = pickle.load(f)
    X_train, y_train = train["features"], train["labels"]
    # Need to shuffle because some perturbations will be constant per
    # batch.
    X_train, y_train = dataset.shuffle(X_train, y_train)

    rotated_x, zoomed_x = perturb_x(session, X_train, y_train)
    print(rotated_x.shape)
    print(zoomed_x.shape)

    X_train_augmented = np.concatenate([X_train, rotated_x, zoomed_x], axis=0)
    y_train_augmented = np.concatenate([y_train, y_train, y_train], axis=0)

    augmented_training_file = f"/{config.DATA_DIR}/train_augmented.p"
    with open(augmented_training_file, "wb") as f:
        pickle.dump(
            {
                "features": X_train_augmented,
                "labels": y_train_augmented,
            }, f)
示例#9
0
        "2019-03-24_18-07-29_res_128",
        "2019-03-24_19-05-22_res_128",
    ]

    train_sessions = sess_names[:50]
    valid_sessions = sess_names[50:]

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        prev_sess = None

        for epoch in range(5000):

            for data in shuffle(
                    iterate_sequences(iterate_dataset('data-good'),
                                      window_size=1)):

                x0_val = data[0]['density']
                x1_val = data[2]['density']
                y_val = data[1]['density']

                if sess_names in train_sessions:
                    _, summary = sess.run([optimizer, summaries],
                                          feed_dict={
                                              x0: x0_val,
                                              x1: x1_val,
                                              y: y_val,
                                          })

                    writer_train.add_summary(summary, epoch)