示例#1
0
def get_variables_to_restore(scope_to_include, suffix_to_exclude):
    """to parse which var to include and which
    var to exclude"""

    vars_to_include = []
    for scope in scope_to_include:
        vars_to_include += slim.get_variables(scope)

    vars_to_exclude = set()
    for scope in suffix_to_exclude:
        vars_to_exclude |= set(slim.get_variables_by_suffix(scope))

    return [v for v in vars_to_include if v not in vars_to_exclude]
示例#2
0
def modelVariable():
    weight1 = slim.model_variable(
        name="weight1",
        shape=[2, 3],
        initializer=tf.truncated_normal_initializer(stddev=0.1),
        regularizer=slim.l2_regularizer(scale=0.05))
    weight2 = slim.model_variable(
        name="weight2",
        shape=[2, 3],
        initializer=tf.truncated_normal_initializer(stddev=0.1),
        regularizer=slim.l2_regularizer(scale=0.05))
    model_variable = slim.get_model_variables()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        print(sess.run(weight1))
        print("-----------------")
        print(sess.run(weight2))
        print("----------------")
        print(sess.run(slim.get_variables_by_suffix("weight1")))
示例#3
0
'''
    https://www.cnblogs.com/bmsl/p/dongbin_bmsl_01.html
    http://blog.csdn.net/guvcolie/article/details/77686555
    https://www.2cto.com/kf/201706/649266.html
    
    《Learning TensorFlow_ A Guide to - Tom Hope》 很好的一章第七章slim
'''
'''
TF - Slim的优势:slim作为一种轻量级的tensorflow库,使得模型的构建,训练,测试都变得更加简单。
'''
# 1.使用方法:
import tensorflow as tf
import tensorflow.contrib.slim as slim
# 2.组成部分:
# arg_scope: 使得用户可以在同一个arg_scope中使用默认的参数
# data,evaluation,layers,learning,losses,metrics,nets,queues,regularizers,variables
# 3.定义模型
# 在slim中,组合使用 variables, layers 和 scopes 可以简洁的定义模型。
''' variales '''
# (1)variables: 定义于variables.py。生成一个weight变量, 用truncated
# normal初始化它, 并使用l2正则化,并将其放置于CPU上, 只需下面的代码即可:
weights = slim.variable(
    'weights',
    shape=[10, 10, 3, 3],
    initializer=tf.truncated_normal_initializer(stddev=0.1),
    regularizer=slim.l2_regularizer(0.05),
    device='/CPU:0')
# 原生tensorflow包含两类变量:普通变量和局部变量。大部分变量都是普通变量,它们一旦生成就可以通过食用saver存入硬盘,
# 局部变量只在session中存在,不会保存。
# slim进一步的区分了变量类型,定义了model
示例#4
0
def train(resume, visualize):
    np.random.seed(cfg.random_seed)
    dataset, train_imdb = get_dataset()
    do_val = len(cfg.train.val_imdb) > 0

    class_weights = class_equal_weights(train_imdb)
    (preloaded_batch, enqueue_op, enqueue_placeholders,
     q_size) = setup_preloading(
            Gnet.get_batch_spec(train_imdb['num_classes']))
    reg = tf.contrib.layers.l2_regularizer(cfg.train.weight_decay)
    net = Gnet(num_classes=train_imdb['num_classes'], batch=preloaded_batch,
               weight_reg=reg, class_weights=class_weights)
    lr_gen = LearningRate()
    # reg_ops = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    # reg_op = tf.reduce_mean(reg_ops)
    # optimized_loss = net.loss + reg_op
    optimized_loss = tf.contrib.losses.get_total_loss()
    learning_rate, train_op = get_optimizer(
        optimized_loss, net.trainable_variables)

    val_net = val_imdb = None
    if do_val:
        val_imdb = imdb.get_imdb(cfg.train.val_imdb, is_training=False)
        val_net = Gnet(num_classes=val_imdb['num_classes'], reuse=True)

    with tf.name_scope('summaries'):
        tf.summary.scalar('loss', optimized_loss)
        tf.summary.scalar('data_loss', net.loss)
        tf.summary.scalar('data_loss_normed', net.loss_normed)
        tf.summary.scalar('data_loss_unnormed', net.loss_unnormed)
        tf.summary.scalar('lr', learning_rate)
        tf.summary.scalar('q_size', q_size)
        if cfg.train.histograms:
            tf.summary.histogram('roi_feats', net.roifeats)
            tf.summary.histogram('det_imfeats', net.det_imfeats)
            tf.summary.histogram('pw_feats', net.pw_feats)
            for i, blockout in enumerate(net.block_feats):
                tf.summary.histogram('block{:02d}'.format(i + 1),
                                     blockout)
        merge_summaries_op = tf.summary.merge_all()

    with tf.name_scope('averaging'):
        ema = tf.train.ExponentialMovingAverage(decay=0.7)
        maintain_averages_op = ema.apply(
            [net.loss_normed, net.loss_unnormed, optimized_loss])
        # update moving averages after every loss evaluation
        with tf.control_dependencies([train_op]):
            train_op = tf.group(maintain_averages_op)
        smoothed_loss_normed = ema.average(net.loss_normed)
        smoothed_loss_unnormed = ema.average(net.loss_unnormed)
        smoothed_optimized_loss = ema.average(optimized_loss)

    restorer = ckpt = None
    if resume:
        ckpt = tf.train.get_checkpoint_state('./')
        restorer = tf.train.Saver()
    elif cfg.gnet.imfeats:
        variables_to_restore = slim.get_variables_to_restore(
            include=["resnet_v1"])
        variables_to_exclude = \
            slim.get_variables_by_suffix('Adam_1', scope='resnet_v1') + \
            slim.get_variables_by_suffix('Adam', scope='resnet_v1') + \
            slim.get_variables_by_suffix('Momentum', scope='resnet_v1')
        restorer = tf.train.Saver(
            list(set(variables_to_restore) - set(variables_to_exclude)))

    saver = tf.train.Saver(max_to_keep=None)
    model_manager = ModelManager()
    config = tf.ConfigProto()
    with tf.Session(config=config) as sess:
        train_writer = tf.summary.FileWriter(cfg.log_dir, sess.graph)
        tf.global_variables_initializer().run()
        tf.local_variables_initializer().run()
        coord = start_preloading(
            sess, enqueue_op, dataset, enqueue_placeholders)

        start_iter = 1
        if resume:
            restorer.restore(sess, ckpt.model_checkpoint_path)
            tensor = tf.get_default_graph().get_tensor_by_name("global_step:0")
            start_iter = sess.run(tensor + 1)
        elif cfg.gnet.imfeats:
            restorer.restore(sess, cfg.train.pretrained_model)

        for it in range(start_iter, cfg.train.num_iter + 1):
            if coord.should_stop():
                break

            if visualize:
                # don't do actual training, just visualize data
                visualize_detections(sess, it, learning_rate, lr_gen, net,
                                     train_op)
                continue

            (_, val_total_loss, val_loss_normed, val_loss_unnormed,
             summary) = sess.run(
                [train_op, smoothed_optimized_loss, smoothed_loss_normed,
                 smoothed_loss_unnormed, merge_summaries_op],
                feed_dict={learning_rate: lr_gen.get_lr(it)})
            train_writer.add_summary(summary, it)

            if it % cfg.train.display_iter == 0:
                print(('{}  iter {:6d}   lr {:8g}   opt loss {:8g}     '
                       'data loss normalized {:8g}   '
                       'unnormalized {:8g}').format(
                    datetime.now(), it, lr_gen.get_lr(it), val_total_loss,
                    val_loss_normed, val_loss_unnormed))

            if do_val and it % cfg.train.val_iter == 0:
                print('{}  starting validation'.format(datetime.now()))
                val_map, mc_ap, pc_ap = val_run(sess, val_net, val_imdb)
                print(('{}  iter {:6d}   validation pass:   mAP {:5.1f}   '
                       'multiclass AP {:5.1f}').format(
                      datetime.now(), it, val_map, mc_ap))

                #save_path = saver.save(sess, net.name, global_step=it)
                #print('wrote model to {}'.format(save_path))
                # dump_debug_info(sess, net, it)
                #model_manager.add(it, val_map, save_path)
                #model_manager.print_summary()
                #model_manager.write_link_to_best('./gnet_best')

            #elif it % cfg.train.save_iter == 0 or it == cfg.train.num_iter:
            #    save_path = saver.save(sess, net.name, global_step=it)
            #    print('wrote model to {}'.format(save_path))
            #    # dump_debug_info(sess, net, it)

        coord.request_stop()
        coord.join()
    print('training finished')
    if do_val:
        print('summary of validation performance')
        model_manager.print_summary()
示例#5
0
import tensorflow.contrib.slim as slim
import tensorflow as tf

weight1 = slim.model_variable('weight1',
                            shape=[2, 3],
                            initializer=tf.truncated_normal_initializer(stddev=0.1), #截断正态分布随机数
                            regularizer=slim.l2_regularizer(0.05))  #正则化处理

weight2 = slim.model_variable('weight2',
                            shape=[2, 3],
                            initializer=tf.truncated_normal_initializer(stddev=0.1),
                            regularizer=slim.l2_regularizer(0.05))

model_variables = slim.get_model_variables()

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    print(sess.run(weight1))
    print("--------------------")
    print(sess.run(model_variables))
    print("--------------------")
    print(sess.run(slim.get_variables_by_suffix("weight1")))
示例#6
0
total_loss2 = slim.losses.get_total_loss()

total_loss = slim.losses.get_total_loss()
optimizer = tf.train.GradientDescentOptimizer(0.1)
train_op = slim.learning.create_train_op(total_loss, optimizer)
logdir = ""

slim.learning.train(train_op,
                    logdir,
                    number_of_steps=1000,
                    save_summaries_secs=300,
                    save_interval_secs=600)

variables_to_restore = slim.get_variables_by_name("v2")
# or
variables_to_restore = slim.get_variables_by_suffix("2")
# or
variables_to_restore = slim.get_variables(scope="nested")
# or
variables_to_restore = slim.get_variables_to_restore(include=["nested"])
# or
variables_to_restore = slim.get_variables_to_restore(exclude=["v1"])

restorer = tf.train.Saver(variables_to_restore)
with tf.Session as sess:
    restorer.restore(sess, "/tmp/model.ckpt")

# Load the data
images, labels = None, None

# Define the network
示例#7
0
def create_model(images, labels):
    """
    This methods initialize the inception v1 model with weights generated
    from training on the ImageNet dataset for all layers expect the last.
    The last layer is adjusted to output only 9 classes (instead of the
    1000 required for ImageNet). Note also that the methods set the model
    for fine-tuning meaning that during training only the last layer's
    weights can change.

    :param images: A tensor containing the images.
    :param labels: A tensor representing the correct labels for the images.

    :return restore_op: The operation used to restore the weights of the model.
    :return feed_dict: The feed_dict used for restoring the model.
    :return train_op: The train_op used to train the model.
    :return metrics_to_values: The metrics collected when training.
    :return metrics_to_updates: The metrics update op used when training.
    """
    with slim.arg_scope(inception.inception_v1_arg_scope()):
        #  Load the deep learning model.
        logits, end_points = inception.inception_v1(images,
                                                    num_classes=NUM_CLASSES,
                                                    is_training=False)

        # We are going to train only the last layer of the model.
        trainable_layer = 'InceptionV1/Logits/Conv2d_0c_1x1'

        variables_to_restore = slim.get_variables_to_restore(
            exclude=[trainable_layer])
        variables_to_train = slim.get_variables_by_suffix('', trainable_layer)

        # Transform the labels into one hot encoding.
        one_hot_labels = tf.one_hot(
            labels,
            NUM_CLASSES,
        )

        # Define the loss function.
        loss = tf.losses.softmax_cross_entropy(
            one_hot_labels,
            end_points['Logits'],
        )

        # Select the optimizer.
        optimizer = tf.train.AdamOptimizer(1e-4)

        # Create a train op.
        train_op = tf.contrib.training.create_train_op(
            loss,
            optimizer,
            variables_to_train=variables_to_train,
        )

        predictions = tf.argmax(end_points['Predictions'],
                                1,
                                name="predictions")
        metrics_to_values, metrics_to_updates = \
            slim.metrics.aggregate_metric_map({
                'accuracy': tf.metrics.accuracy(predictions, labels),
                'mean_loss': tf.metrics.mean(loss),
            })

        # Define load predefined model operation.
        restore_op, feed_dict = slim.assign_from_checkpoint(
            'inception_v1.ckpt', variables_to_restore)

        return (
            restore_op,
            feed_dict,
            train_op,
            metrics_to_values,
            metrics_to_updates,
        )