コード例 #1
0
    def next_batch_test(self):
        """
        returns:
            a tuple(image, depths) where:
                image is a float tensor with shape [batch size] + input_size
                depth is a float tensor with shape [batch size] + depth_size
        """

        filenames = self.test_file

        dataset = tf.contrib.data.TFRecordDataset(filenames)
        dataset = dataset.map(
            self._parse_function)  # Parse the record into tensors.
        dataset = dataset.batch(self.config_dict["batch_size"])
        iterator = dataset.make_initializable_iterator()
        initializer = iterator.initializer

        images_gt, depths = iterator.get_next()
        # depths = tf.reshape(depths, [self.batch_size] + self.output_size)
        # images = tf.reshape(images, [self.batch_size] + self.input_size)
        images = simulator.applyTurbidity(images_gt, depths, self.c, self.binf,
                                          self.range_array)
        tf.summary.image("image_gt", images_gt)
        tf.summary.image("image", images)
        print(images.shape, images_gt.shape)
        return images, images_gt, initializer
コード例 #2
0
    def next_batch_test(self):
        """
        returns:
            a tuple(image, transmissions) where:
                image is a float tensor with shape [batch size] + input_size
                transmissions is a float tensor with shape [batch size]
        """

        filenames = self.test_file

        dataset = tf.contrib.data.TFRecordDataset(filenames)
        dataset = dataset.map(
            self._parse_function)  # Parse the record into tensors.
        dataset = dataset.batch(self.config_dict["batch_size"])
        iterator = dataset.make_initializable_iterator()
        initializer = iterator.initializer

        images = iterator.get_next()

        size_x = self.config_dict['patch_size'][0]
        size_y = self.config_dict['patch_size'][1]
        offset_x = random.randint(0, self.input_size[0] - size_x - 1)
        offset_y = random.randint(0, self.input_size[0] - size_y - 1)

        images = images[:, offset_x:offset_x + size_x,
                        offset_y:offset_y + size_y]
        # depths = tf.reshape(depths, [self.batch_size] + self.output_size)
        # images = tf.reshape(images, [self.batch_size] + self.input_size)

        transmissions = self.random_transmissions(self.batch_size)
        images = simulator.applyTurbidityTransmission(images, self.binf,
                                                      transmissions)
        tf.summary.image("image", images)
        return images, transmissions, initializer
コード例 #3
0
    def next_batch_train(self, initial_step):
        """
        returns:
            a tuple(image, depths) where:
                image is a float tensor with shape [batch size] + input_size
                depth is a float tensor with shape [batch size] + depth_size
        """

        filenames = self.train_file

        dataset = tf.contrib.data.TFRecordDataset(filenames)
        dataset = dataset.map(
            self._parse_function)  # Parse the record into tensors.
        dataset = dataset.shuffle(buffer_size=3000)
        dataset = dataset.batch(self.config_dict["batch_size"])
        dataset = dataset.repeat(
            self.config_dict["num_epochs"])  # Repeat the input.
        dataset = dataset.skip(initial_step)

        iterator = dataset.make_one_shot_iterator()

        images, depths = iterator.get_next()
        #depths = tf.reshape(depths, [None] + self.output_size)
        #images = tf.reshape(images, [None] + self.input_size)
        images = simulator.applyTurbidity(images, depths, self.c, self.binf,
                                          self.range_array)
        tf.summary.image("depth", depths)
        tf.summary.image("image", images)
        return images, depths
コード例 #4
0
def input_fn(filenames, train=True, batch_size=16, buffer_size=512):
    # Args:
    # filenames:   Filenames for the TFRecords files.
    # train:       Boolean whether training (True) or testing (False).
    # batch_size:  Return batches of this size.
    # buffer_size: Read buffers of this size. The random shuffling
    #              is done on the buffer, so it must be big enough.

    # Create a TensorFlow Dataset-object which has functionality
    # for reading and shuffling data from TFRecords files.
    dataset = tf.data.TFRecordDataset(filenames=filenames)

    # Parse the serialized data in the TFRecords files.
    # This returns TensorFlow tensors for the image and labels.
    dataset = dataset.map(parse)

    if train:
        # If training then read a buffer of the given size and
        # randomly shuffle it.
        dataset = dataset.shuffle(buffer_size=buffer_size)

        # Allow infinite reading of the data.
        num_repeat = None
    else:
        # If testing then don't shuffle the data.
        num_repeat = 1

    # Repeat the dataset the given number of times.
    dataset = dataset.repeat(num_repeat)

    # Get a batch of data with the given size.
    dataset = dataset.batch(batch_size)

    # Create an iterator for the dataset and the above modifications.
    iterator = dataset.make_one_shot_iterator()

    # Get the next batch of images and labels, may take dimensionality info later but for now we set to _
    images_batch, labels_batch, \
        xdim_batch, ydim_batch, channels_batch = iterator.get_next()

    if train:
        images_batch = distort_batch(images_batch)

    # The input-function must return a dict wrapping the images.
    x = {'x': images_batch}
    y = labels_batch

    return x, y
コード例 #5
0
    def next_batch_train(self, initial_step):
        """
        args:
            batch_size:
                number of examples per returned batch
            num_epochs:
                number of time to read the input data

        returns:
            a tuple(image, transmissions) where:
                image is a float tensor with shape [batch size] + patch_size
                transmissions is a float tensor with shape [batch size]
        """

        filenames = self.train_file

        dataset = tf.contrib.data.TFRecordDataset(filenames)
        dataset = dataset.map(
            self._parse_function)  # Parse the record into tensors.
        dataset = dataset.shuffle(buffer_size=3000)
        dataset = dataset.batch(self.config_dict["batch_size"])
        dataset = dataset.repeat(
            self.config_dict["num_epochs"])  # Repeat the input.
        dataset = dataset.skip(initial_step)

        iterator = dataset.make_one_shot_iterator()

        images = iterator.get_next()

        size_x = self.config_dict['patch_size'][0]
        size_y = self.config_dict['patch_size'][1]
        offset_x = random.randint(0, self.input_size[0] - size_x - 1)
        offset_y = random.randint(0, self.input_size[0] - size_y - 1)

        images = images[:, offset_x:offset_x + size_x,
                        offset_y:offset_y + size_y]
        transmissions = self.random_transmissions(self.batch_size)
        images = simulator.applyTurbidityTransmission(images, self.binf,
                                                      transmissions)
        tf.summary.image("image", images)
        return images, transmissions
コード例 #6
0
ファイル: train.py プロジェクト: HuayueZhang/Fusion_siamese
def train_one_step(b, sess, ops, writer, is_training):
    # An iteration/step
    # Get train data
    batch_paths = DATA_PATHS[b * FLAGS.batch_size:(b * FLAGS.batch_size +
                                                   FLAGS.batch_size)]
    b_left_in, b_righ_in, b_label = dataset.batch(batch_paths)
    feed_dict = {
        ops['is_training_pl']: is_training,
        ops['left_in_pl']: b_left_in,
        ops['righ_in_pl']: b_righ_in,
        ops['label_pl']: b_label
    }

    # run the training step
    summary_str, _, loss_rslt, global_steps = sess.run(
        [ops['merged'], ops['train_op'], ops['loss'], ops['global_steps']],
        feed_dict=feed_dict)

    writer.add_summary(summary_str, global_steps)
    # 使得在tensorboard中,显示summary随global_steps的变化情况
    # 如果使用writer.add_summary(summary,global_step)时没有传global_step参数,会使scarlar_summary变成一条直线
    return global_steps, loss_rslt
コード例 #7
0
    def next_batch_train(self, initial_step, sess):
        """
        returns:
            a tuple(image, depths) where:
                image is a float tensor with shape [batch size] + input_size
                depth is a float tensor with shape [batch size] + depth_size
        """

        filenames = self.train_file

        dataset = tf.contrib.data.TFRecordDataset(filenames)
        dataset = dataset.map(
            self._parse_function)  # Parse the record into tensors.
        dataset = dataset.shuffle(buffer_size=3000)
        dataset = dataset.batch(self.config_dict["batch_size"])
        dataset = dataset.repeat(
            self.config_dict["num_epochs"])  # Repeat the input.
        dataset = dataset.skip(initial_step)

        iterator = dataset.make_one_shot_iterator()

        images_gt, depths = iterator.get_next()
        #print (depths.eval(session=sess))
        #depths = tf.reshape(depths, [None] + self.output_size)
        #images = tf.reshape(images, [None] + self.input_size)
        images = simulator.applyTurbidity(images_gt, depths, self.c, self.binf,
                                          self.range_array)
        tf.summary.image("image_gt", images_gt)
        tf.summary.image("depth", depths)
        tf.summary.image("image", images)
        # sess = tf.Session()
        # d, im = sess.run([depths[0], images_gt[0]])
        # print (im.dtype)
        # pic = Image.fromarray(img_as_ubyte(im))
        # print("depths[0]", np.min(d), np.max(d), np.mean(d))
        # pic.show()
        return images, images_gt
コード例 #8
0
ファイル: train.py プロジェクト: HuayueZhang/Fusion_siamese
def eval_one_step(sess, ops, writer, is_training):
    loss = 0
    for eb in range(eval_n_batches):
        # An iteration/step
        # Get eval data
        batch_paths = EVAL_DATA_PATHS[eb *
                                      FLAGS.batch_size:(eb * FLAGS.batch_size +
                                                        FLAGS.batch_size)]
        b_left_in, b_righ_in, b_label = dataset.batch(batch_paths)
        feed_dict = {
            ops['is_training_pl']: is_training,
            ops['left_in_pl']: b_left_in,
            ops['righ_in_pl']: b_righ_in,
            ops['label_pl']: b_label
        }

        # Pass through the network without running the training step
        summary_str, loss_rslt, global_steps = sess.run(
            [ops['merged'], ops['loss'], ops['global_steps']],
            feed_dict=feed_dict)
        writer.add_summary(summary_str, global_steps)
        loss += loss_rslt
    loss = loss / eval_n_batches
    return loss
コード例 #9
0
training_flag = tf.placeholder(tf.bool)

dense_ssd = Densenet_SSD(4, 12, training_flag)
anchors = dense_ssd.anchors

gclasses, glocations, gscores = dense_ssd.bboxes_encode(
    train_y, train_location, anchors)
predictions, locations = dense_ssd.densenet_ssd(train_x)
#predictions=tf.nn.softmax(predictions)
#tf.cast(predictions,tf.int32)
loss = dense_ssd.loss(predictions, locations, gclasses, glocations, gscores)
optimizer = tf.train.AdagradOptimizer(learning_rate=1e-4)
train = optimizer.minimize(loss)
dataset = dataset.get_dataset('./train.tfrecords')
dataset = dataset.shuffle(2)
dataset = dataset.batch(1)
dataset = dataset.repeat(2)
iterator = dataset.make_one_shot_iterator()
initializer = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(initializer)
    for i in range(iteration):
        data_x, data_y, data_location = iterator.get_next()
        data_x = tf.decode_raw(data_x, tf.uint8)
        data_x = tf.reshape(data_x, [-1, 4096, 2048, 3])
        data_y = tf.reshape(data_y, [-1, 1])
        data_location = tf.reshape(data_location, [-1, 4])
        #print(data_x)
        #dic={train_x:data_x,train_y:data_y,train_location:data_location}
        data_x, data_y, data_location = sess.run(
            [data_x, data_y, data_location])
コード例 #10
0
def train():

    training_filename = [
        "/home/szaman5/Phytoplankton_Classifier/test_data/train.tfrecords"
    ]
    validation_filename = [
        "/home/szaman5/Phytoplankton_Classifier/test_data/validation.tfrecords"
    ]

    filename = tf.placeholder(tf.string, shape=[None])

    dataset = tf.data.TFRecordDataset(filename)

    dataset = dataset.map(_parser, num_parallel_calls=40)
    dataset = dataset.shuffle(buffer_size=int(sys.argv[1]))
    dataset = dataset.batch(int(sys.argv[2]))

    dataset = dataset.prefetch(buffer_size=int(sys.argv[2]) * 100)
    iterator = dataset.make_initializable_iterator()
    next_element = iterator.get_next()
    counter = 0

    NUM_EPOCHS = 40

    val_acc = 0
    acc = 0
    count = 0
    for epoch in range(NUM_EPOCHS):
        session.run(iterator.initializer,
                    feed_dict={filename: training_filename})
        #x_batch, y_true_batch = data.train.next_batch(batch_size)

        while True:
            try:
                t = time()
                x_batch, y_true_batch = session.run(next_element)

                #print("Load time is",time()-t)
                feed_dict_tr = {
                    x: x_batch,
                    y_true: y_true_batch,
                    keep_prob: 0.3
                }

                _, acc = session.run([optimizer, accuracy],
                                     feed_dict=feed_dict_tr)
                #acc = session.run(accuracy,feed_dict= feed_dict_tr)
                #print(time()-t)
                count += 1
                #print("Batch %d complete",count)
            except tf.errors.OutOfRangeError as e:
                break
        #print("Training complete. Starting Test set")
        session.run(iterator.initializer,
                    feed_dict={filename: validation_filename})

        val_b = 0
        while True:
            try:
                x_valid_batch, y_valid_batch = session.run(next_element)
                feed_dict_val = {
                    x: x_valid_batch,
                    y_true: y_valid_batch,
                    keep_prob: 1
                }
                #val_loss = session.run(cost,feed_dict=feed_dict_val)

                val_loss, val_acc, valacc3 = session.run(
                    [cost, accuracy2, accuracy3], feed_dict=feed_dict_val)
                #summary,val_acc = session.run([merged,accuracy],feed_dict=feed_dict_val)
                #print("Batch Accuracy: ",epoch,val_acc)
                val_acc3 = session.run(accuracy3, feed_dict=feed_dict_val)
                print(val_acc, val_acc3)
                val_b += 1
                #writer.add_summary(summary,epoch)
                #print("Validation bath:",val_b)

            except tf.errors.OutOfRangeError as e:
                show_progress(epoch, acc, val_acc, val_loss)
                saver.save(
                    session,
                    "/home/szaman5/Phytoplankton_Classifier/trained_model/")
                break
コード例 #11
0
def main(_):
    tf_record_base = '/home/westwell/Desktop/dolores_storage/humpback_whale_identification/' \
                     'data/all/tfrecord_single_image/'
    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
    if FLAGS.cfg_file is None:
        raise ValueError('You must supply the cfg file !')

    cfg = _cfg_from_file(FLAGS.cfg_file)
    train_cfg = cfg['train']

    # print all configs
    print('############################ cfg ############################')
    for k in cfg:
        print('%s: %s' % (k, cfg[k]))

    tf.logging.set_verbosity(tf.logging.INFO)
    #######################################################################
    ##############              sigle GPU version            ##############
    #######################################################################

    #### get features ####
    input_image = tf.placeholder(tf.uint8,
                                 shape=[None, None, 3],
                                 name='input_image')
    image = resize_to_range(input_image, cfg['min_resize_value'],
                            cfg['max_resize_value'])
    image = corp_image(image, cfg['corp_size'], random_crop=False)
    image = tf.expand_dims(image, axis=0)
    feature_for_dst, _ = feature_extractor.extract_features(
        images=image,
        num_classes=None,
        output_stride=cfg['output_stride'],
        global_pool=True,
        model_variant=cfg['model_variant'],
        weight_decay=0.0,
        dropout_keep_prob=1.0,
        regularize_depthwise=False,
        reuse=tf.AUTO_REUSE,
        is_training=False,
        fine_tune_batch_norm=False,
        cfg=cfg)
    if len(feature_for_dst.shape) == 4:
        feature_for_dst = tf.squeeze(feature_for_dst,
                                     axis=[1, 2],
                                     name='features_for_dst')
    elif len(feature_for_dst.shape) == 2:
        feature_for_dst = tf.identity(feature_for_dst, name='features_for_dst')
    else:
        raise Exception('feature_for_dst shape not right, got %s' %
                        (feature_for_dst.shape))

    #### get similarity probs of two features ####
    ref_features = tf.placeholder(tf.float32,
                                  shape=[None, feature_for_dst.shape[-1]],
                                  name='ref_features')
    dut_feature = tf.placeholder(tf.float32,
                                 shape=[1, feature_for_dst.shape[-1]],
                                 name='dut_features')
    prob_same_ids = similarity_prob_for_one_query(
        ref_features=ref_features,
        dut_feature=dut_feature,
        d_cfg=cfg['distance_config'],
        scope='similarity_prob_for_one_query')

    #### set up session config ####
    # session config:
    sess_cfg = tf.ConfigProto(allow_soft_placement=True,
                              log_device_placement=False)
    sess_cfg.gpu_options.allow_growth = True

    #### do test the model ####
    with tf.Session(config=sess_cfg) as sess:
        # init
        #sess.run(tf.global_variables_initializer())
        #sess.run(tf.local_variables_initializer())

        # restore vars from pretrained ckpt:
        vars_to_restore = _var_to_restore(None)
        for v in vars_to_restore:
            print(v.op.name)
        restor_saver = tf.train.Saver(var_list=vars_to_restore)
        restor_saver.restore(sess,
                             tf.train.latest_checkpoint(FLAGS.output_dir))

        # forward all ref images
        filenames = _get_tfrecord_names(tf_record_base, FLAGS.ref_images_set)
        dataset = tf.data.TFRecordDataset(filenames)
        dataset = dataset.map(
            lambda record: _parser_humpback_whale(record, 'eval'))
        dataset.batch(batch_size=1)
        iterator = dataset.make_one_shot_iterator()
        ref_image, _, ref_image_name, ref_class_name, _, _ = iterator.get_next(
        )

        all_ref_features = None
        all_ref_cls_name = []
        all_ref_images_name = []
        i = 0
        while True:
            try:
                one_ref_image, one_ref_image_name, one_ref_class_name = sess.run(
                    [ref_image, ref_image_name, ref_class_name])
                if i % 100 == 0:
                    print(i, one_ref_class_name)
                all_ref_cls_name.append(one_ref_class_name)
                all_ref_images_name.append(one_ref_image_name)
                one_ref_feature = sess.run(
                    tf.get_default_graph().get_tensor_by_name(
                        'features_for_dst:0'),
                    feed_dict={'input_image:0': one_ref_image})
                if all_ref_features is None:
                    all_ref_features = one_ref_feature
                else:
                    all_ref_features = np.concatenate(
                        (all_ref_features, one_ref_feature), axis=0)
                i += 1
            except tf.errors.OutOfRangeError:
                tf.logging.info('End of forward ref images')
                break
        if FLAGS.save_features:
            ref_concated = np.concatenate(
                (all_ref_features, np.array(all_ref_images_name).reshape(
                    (all_ref_features.shape[0], 1)),
                 np.array(all_ref_cls_name).reshape(
                     (all_ref_features.shape[0], 1))),
                axis=1)
            np.save(
                os.path.join(FLAGS.output_dir, '..',
                             'ref_concated_%s.npy' % (FLAGS.ref_images_set)),
                ref_concated)
        all_ref_cls_name.append('new_whale'.encode(encoding='utf-8'))

        # forward all test images
        filenames = _get_tfrecord_names(tf_record_base, FLAGS.dut_images_set)
        dataset = tf.data.TFRecordDataset(filenames)
        dataset = dataset.map(
            lambda record: _parser_humpback_whale(record, 'eval'))
        dataset.batch(batch_size=1)
        iterator = dataset.make_one_shot_iterator()
        dut_image, _, dut_image_name, dut_class_name, _, _ = iterator.get_next(
        )

        all_dut_featurs = None
        all_dut_cls_name = []
        all_dut_image_names = []
        i = 0
        while True:
            try:
                one_dut_image, one_dut_image_name, one_dut_class_name = sess.run(
                    [dut_image, dut_image_name, dut_class_name])
                if i % 100 == 0:
                    print(i, one_dut_image_name)
                all_dut_cls_name.append(one_dut_class_name)
                all_dut_image_names.append(one_dut_image_name)
                one_dut_feature = sess.run(
                    tf.get_default_graph().get_tensor_by_name(
                        'features_for_dst:0'),
                    feed_dict={'input_image:0': one_dut_image})
                if all_dut_featurs is None:
                    all_dut_featurs = one_dut_feature
                else:
                    all_dut_featurs = np.concatenate(
                        (all_dut_featurs, one_dut_feature), axis=0)
                i += 1
            except tf.errors.OutOfRangeError:
                tf.logging.info('End of forward dut images')
                break
        if FLAGS.save_features:
            dut_concated = np.concatenate(
                (all_dut_featurs, np.array(all_dut_image_names).reshape(
                    (all_dut_featurs.shape[0], 1)),
                 np.array(all_dut_cls_name).reshape(
                     (all_dut_featurs.shape[0], 1))),
                axis=1)
            np.save(
                os.path.join(FLAGS.output_dir, '..',
                             'dut_concated_%s.npy' % (FLAGS.dut_images_set)),
                dut_concated)

        # got prob_same_id for every test image and write result
        # submission file
        for nw_prob in FLAGS.new_whale_prob:
            output_file_path = os.path.join(
                FLAGS.output_dir, '..',
                'submission_%s_%s.csv' % (nw_prob, time.time()))
            if os.path.isfile(output_file_path):
                raise Exception("submission file exists!! : %s" %
                                (output_file_path))
            with open(output_file_path, 'w') as f:
                f.write('Image,Id\n')

            for i in range(len(all_dut_image_names)):
                one_prob_same_ids = sess.run(
                    tf.get_default_graph().get_tensor_by_name(
                        'similarity_prob_for_one_query/prob_same_ids:0'),
                    feed_dict={
                        'ref_features:0':
                        all_ref_features,
                        'dut_features:0':
                        np.expand_dims(all_dut_featurs[i], axis=0)
                    })
                one_prob_same_ids = np.concatenate(
                    (np.squeeze(one_prob_same_ids), [nw_prob]), axis=0)
                if i % 100 == 0:
                    print('compare with: %f' % (nw_prob), i,
                          all_dut_image_names[i], one_prob_same_ids.min(),
                          one_prob_same_ids.max())
                one_order = np.argsort(one_prob_same_ids)[::-1]  # prob index
                one_order = one_order.tolist()

                one_predictions = []
                for idx in one_order:
                    tmp_prediction = all_ref_cls_name[idx]
                    if tmp_prediction not in one_predictions:
                        one_predictions.append(tmp_prediction)
                    if len(one_predictions) == 5:  # write one result
                        with open(output_file_path, 'a') as f:
                            content = os.path.basename(
                                all_dut_image_names[i].decode()) + ','
                            for j in range(len(one_predictions)):
                                if j == 0:
                                    content = content + one_predictions[
                                        j].decode()
                                else:
                                    content = content + ' ' + one_predictions[
                                        j].decode()
                            content = content + '\n'
                            f.write(content)
                        break  # finish on dut image
                i += 1