Exemple #1
0
def main(_):
    # Specify which gpu to be used
    os.environ["CUDA_VISIBLE_DEVICES"] = '1'

    num_samples = FLAGS.num_samples
    dataset = get_record_dataset(FLAGS.record_path,
                                 num_samples=num_samples,
                                 num_classes=FLAGS.num_classes)
    data_provider = slim.dataset_data_provider.DatasetDataProvider(dataset)
    image, label = data_provider.get(['image', 'label'])

    # Border expand and resize
    image = preprocessing.border_expand(image,
                                        resize=True,
                                        output_height=368,
                                        output_width=368)

    inputs, labels = tf.train.batch(
        [image, label],
        batch_size=FLAGS.batch_size,
        #capacity=5*FLAGS.batch_size,
        allow_smaller_final_batch=True)

    cls_model = model.Model(is_training=True, num_classes=FLAGS.num_classes)
    preprocessed_inputs = cls_model.preprocess(inputs)
    prediction_dict = cls_model.predict(preprocessed_inputs)
    loss_dict = cls_model.loss(prediction_dict, labels)
    loss = loss_dict['loss']
    postprocessed_dict = cls_model.postprocess(prediction_dict)
    acc = cls_model.accuracy(postprocessed_dict, labels)
    tf.summary.scalar('loss', loss)
    tf.summary.scalar('accuracy', acc)

    global_step = slim.create_global_step()
    learning_rate = configure_learning_rate(num_samples, global_step)
    optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
                                           momentum=0.9)
    #    optimizer = tf.train.AdamOptimizer(learning_rate=0.00001)
    train_op = slim.learning.create_train_op(loss,
                                             optimizer,
                                             summarize_gradients=True)
    tf.summary.scalar('learning_rate', learning_rate)

    init_fn = get_init_fn()

    slim.learning.train(train_op=train_op,
                        logdir=FLAGS.logdir,
                        init_fn=init_fn,
                        number_of_steps=FLAGS.num_steps,
                        save_summaries_secs=20,
                        save_interval_secs=600)
Exemple #2
0
def load_batch(dataset, batch_size, height, width, num_classes):

    data_provider = slim.dataset_data_provider.DatasetDataProvider(dataset)
    image, label = data_provider.get(['image', 'label'])

    # Border expand and resize
    image = preprocessing.border_expand(image,
                                        output_height=height,
                                        output_width=width)

    inputs, labels = tf.train.batch([image, label],
                                    batch_size=batch_size,
                                    allow_smaller_final_batch=True)
    labels = slim.one_hot_encoding(labels, num_classes)
    return inputs, labels