示例#1
0
def eavluate(path,recordfile,resultfile):
    inputdata,arraylength = GetDate(path,recordfile)
    inputdata = tf.convert_to_tensor(inputdata)
    inputdata = tf.reshape(inputdata,(arraylength,140,400,1))
    inputdata = tf.cast(inputdata,dtype=tf.float32)
    is_training = False
    labels_nums = 2
    keep_prob=1
    with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        out, end_points = resnet_v1.resnet_v1_101(inputs=inputdata, num_classes=labels_nums, is_training=is_training,global_pool=True)
    #outputprobability = tf.nn.softmax(out)
    outputprobability = out
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.get_variable_scope().reuse_variables()
        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess,ckpt.model_checkpoint_path)
            probabilitylist = sess.run(outputprobability)
            probabilityresult = probabilitylist
            print (type(probabilitylist))
            print (probabilityresult)
            np.savetxt(resultfile,probabilityresult)
            #print (accuracy_score_test)
            #print (type(accuracy_score_test))
            #print ('After %s training step(s),validation''test_accury = %g'%(global_step,accuracy_score_test))
            #print ('After %smtraining step(s),validation''train_accury = %g'%(global_step,accuracy_score_train))
        else:
            print('No checkpoint file found')
            return
示例#2
0
def  predict(models_path,image_dir,labels_filename,labels_nums, data_format):
    [batch_size, resize_height, resize_width, depths] = data_format

    #labels = np.loadtxt(labels_filename, str, delimiter='\t')
    input_images = tf.placeholder(dtype=tf.float32, shape=[None, resize_height, resize_width, depths], name='input')

    #其他模型预测请修改这里
    with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        out, end_points = resnet_v1.resnet_v1_101(inputs=input_images, num_classes=labels_nums,is_training=False)

    # 将输出结果进行softmax分布,再求最大概率所属类别
    score = tf.nn.softmax(out,name='pre')
    class_id = tf.argmax(score, 1)

    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    saver.restore(sess, models_path)
    images_list=glob.glob(os.path.join(image_dir,'*.jpg'))
    for image_path in images_list:
        im=read_image(image_path,resize_height,resize_width,normalization=True)
        im=im[np.newaxis,:]
        #pred = sess.run(f_cls, feed_dict={x:im, keep_prob:1.0})
        pre_score,pre_label = sess.run([score,class_id], feed_dict={input_images:im})
        max_score=pre_score[0,pre_label]
        print("{} is: pre labels:{},name:{} score: {}".format(image_path,pre_label,list(labels_filename.keys())[list(labels_filename.values()).index(pre_label)], max_score))
    sess.close()
示例#3
0
def predict(models_path, image_dir, labels_filename, labels_nums, data_format):
    [batch_size, resize_height, resize_width, depths] = data_format

    labels = np.loadtxt(labels_filename, str, delimiter='\t')
    input_images = tf.placeholder(
        dtype=tf.float32,
        shape=[None, resize_height, resize_width, depths],
        name='input')

    # model
    with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        out, end_points = resnet_v1.resnet_v1_50(inputs=input_images,
                                                 num_classes=labels_nums,
                                                 is_training=False)

    # out = tf.squeeze(out, )
    # out = tf.squeeze(out, [1, 2])
    # 将输出结果进行softmax分布,再求最大概率所属类别
    score = tf.nn.softmax(out, name='pre')
    class_id = tf.argmax(score, 1)

    sess = tf.InteractiveSession()

    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    saver.restore(sess, models_path)
    images_list = glob.glob(os.path.join(image_dir, '*.jpg'))

    for image_path in images_list:
        im = read_image(image_path,
                        resize_height,
                        resize_width,
                        normalization=True)
        im = im[np.newaxis, :]

        pre_score, out_, pre_label = sess.run([score, out, class_id],
                                              feed_dict={input_images: im})
        print("out_  =============> \n", out_)

        # pre_score, pre_label = sess.run([score, class_id], feed_dict={input_images: im})

        max_score = pre_score[0, pre_label]

        print(
            "_______________________________________________________________________\n\n"
        )
        print("{} is: pre labels:{}, score: {}".format(image_path, pre_label,
                                                       max_score))
        # print("{} is: pre labels:{},name:{} score: {}".format(image_path, pre_label, labels[pre_label], max_score))
    sess.close()
    def extract_features(self, preprocessed_inputs):
        """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]

    Raises:
      ValueError: depth multiplier is not supported.
    """
        if self._depth_multiplier != 1.0:
            raise ValueError('Depth multiplier not supported.')

        preprocessed_inputs = shape_utils.check_min_image_dim(
            129, preprocessed_inputs)

        with tf.variable_scope(self._resnet_scope_name,
                               reuse=self._reuse_weights) as scope:
            with slim.arg_scope(resnet_v1.resnet_arg_scope()):
                with (slim.arg_scope(self._conv_hyperparams_fn())
                      if self._override_base_feature_extractor_hyperparams else
                      context_manager.IdentityContextManager()):
                    with slim.arg_scope([resnet_v1.bottleneck],
                                        use_bounded_activations=self.
                                        _use_bounded_activations):
                        _, activations = self._resnet_base_fn(
                            inputs=ops.pad_to_multiple(preprocessed_inputs,
                                                       self._pad_to_multiple),
                            num_classes=None,
                            is_training=None,
                            global_pool=False,
                            output_stride=None,
                            store_non_strided_activations=True,
                            scope=scope)

            with slim.arg_scope(self._conv_hyperparams_fn()):
                feature_maps = feature_map_generators.pooling_pyramid_feature_maps(
                    base_feature_map_depth=self._base_feature_map_depth,
                    num_layers=self._num_layers,
                    image_features={
                        'image_features':
                        self._filter_features(activations)['block3']
                    })
        return feature_maps.values()
示例#5
0
def train(val_record_file, labels_nums, data_shape, snapshot_prefix):
    '''
    :param train_record_file: 训练的tfrecord文件
    :param train_log_step: 显示训练过程log信息间隔
    :param train_param: train参数
    :param val_record_file: 验证的tfrecord文件
    :param val_log_step: 显示验证过程log信息间隔
    :param val_param: val参数
    :param labels_nums: labels数
    :param data_shape: 输入数据shape
    :param snapshot: 保存模型间隔
    :param snapshot_prefix: 保存模型文件的前缀名
    :return:
    '''
    #[base_lr,max_steps]=train_param
    [batch_size, resize_height, resize_width, depths] = data_shape

    val_images_batch, val_labels_batch = get_batch_images(val_record_file)
    # Define the model:
    with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        out, end_points = resnet_v1.resnet_v1_101(inputs=input_images,
                                                  num_classes=labels_nums,
                                                  is_training=is_training,
                                                  global_pool=True)

    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(out, 1), tf.argmax(input_labels, 1)),
                tf.float32))
    saver = tf.train.Saver()
    while True:
        with tf.Session() as sess:
            tf.get_variable_scope().reuse_variables()
            ckpt = tf.train.get_checkpoint_state(snapshot_prefix)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                print(global_step)
                coord = tf.train.Coordinator()
                threads = tf.train.start_queue_runners(sess=sess, coord=coord)
                #batch_input_images, batch_input_labels = sess.run([train_images_batch, train_labels_batch])
                mean_acc = net_evaluation(sess, accuracy, val_images_batch,
                                          val_labels_batch)
                print("%s: val accuracy :  %g" % (datetime.now(), mean_acc))
                coord.request_stop()
                coord.join(threads)
示例#6
0
def extract_feature(imgList, args):
    tf.reset_default_graph()

    queue = tf.train.string_input_producer(imgList,
                                           num_epochs=None,
                                           shuffle=False)
    reader = tf.WholeFileReader()

    img_path, img_data = reader.read(queue)
    img = vgg_preprocessing.preprocess_image(
        tf.image.decode_jpeg(contents=img_data, channels=3), args.imgSize,
        args.imgSize)
    img = tf.expand_dims(img, 0)
    with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        net, end_points = resnet_v1.resnet_v1_152(inputs=img,
                                                  is_training=False)
    feat1 = end_points['resnet_v1_152/block4']
    feat2 = end_points['pool5']

    saver = tf.train.Saver()
    init_op = tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(init_op)
        saver.restore(sess, args.cnnModel)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        feats1 = []
        feats2 = []
        for i in range(len(imgList)):
            f1, f2 = sess.run([feat1, feat2
                               ])  # f1: (1, 7, 7, 2048)   f2: (1, 1, 1, 2048)
            feats1.append(f1[0])
            feats2.append(f2[0][0][0])
        coord.request_stop()
        coord.join(threads)
    return feats1, feats2
示例#7
0
def train(train_record_file, train_log_step, train_param, val_record_file,
          val_log_step, labels_nums, data_shape, snapshot, snapshot_prefix):
    '''
    :param train_record_file: 训练的tfrecord文件
    :param train_log_step: 显示训练过程log信息间隔
    :param train_param: train参数
    :param val_record_file: 验证的tfrecord文件
    :param val_log_step: 显示验证过程log信息间隔
    :param val_param: val参数
    :param labels_nums: labels数
    :param data_shape: 输入数据shape
    :param snapshot: 保存模型间隔
    :param snapshot_prefix: 保存模型文件的前缀名
    :return:
    '''
    [base_lr, max_steps] = train_param
    [batch_size, resize_height, resize_width, depths] = data_shape

    # 获得训练和测试的样本数
    #train_nums=get_example_nums(train_record_file)
    #val_nums=get_example_nums(val_record_file)
    print('train nums:%d,val nums:%d' % (train_nums, val_nums))

    # 从record中读取图片和labels数据
    # train数据,训练数据一般要求打乱顺序shuffle=True
    #train_images, train_labels = read_records(train_record_file, resize_height, resize_width, type='normalization')
    #train_images_batch, train_labels_batch = get_batch_images(train_images, train_labels,
    #                                                          batch_size=batch_size, labels_nums=labels_nums,
    #                                                          one_hot=True, shuffle=True)
    # val数据,验证数据可以不需要打乱数据
    #val_images, val_labels = read_records(val_record_file, resize_height, resize_width, type='normalization')
    #val_images_batch, val_labels_batch = get_batch_images(val_images, val_labels,
    #                                                      batch_size=batch_size, labels_nums=labels_nums,
    #                                                      one_hot=True, shuffle=False)
    train_images_batch, train_labels_batch = get_batch_images(
        train_record_file)
    val_images_batch, val_labels_batch = get_batch_images(val_record_file,
                                                          is_train=False)
    # Define the model:
    with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        out, end_points = resnet_v1.resnet_v1_101(inputs=input_images,
                                                  num_classes=labels_nums,
                                                  is_training=is_training,
                                                  global_pool=True)
    # with slim.arg_scope(mobilenet_v1.mobilenet_v1_arg_scope()):
    #     out, end_points = mobilenet_v1.mobilenet_v1(inputs=input_images, num_classes=labels_nums,
    #                                                 dropout_keep_prob=keep_prob, is_training=is_training,
    #                                                 global_pool=True)

    # Specify the loss function: tf.losses定义的loss函数都会自动添加到loss函数,不需要add_loss()了
    tf.losses.softmax_cross_entropy(onehot_labels=input_labels,
                                    logits=out)  # 添加交叉熵损失loss=1.6
    # slim.losses.add_loss(my_loss)
    loss = tf.losses.get_total_loss(
        add_regularization_losses=True)  # 添加正则化损失loss=2.2
    #global_step = tf.Variable(0,trainable = False)
    # Specify the optimization scheme:
    #variable_averages=tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
    #variable_averages_op=variable_averages.apply(tf.trainable_variables())
    #learning_rate=tf.train.exponential_decay(
    #base_lr,
    #global_step,
    #train_nums/batch_size,
    #LEARNING_RATE_DECAY)
    #train_step=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
    #with tf.control_dependencies([train_step,variable_averages_op]):train_op=tf.np_op(name='train')
    # 在定义训练的时候, 注意到我们使用了`batch_norm`层时,需要更新每一层的`average`和`variance`参数,
    # 更新的过程不包含在正常的训练过程中, 需要我们去手动像下面这样更新
    # 通过`tf.get_collection`获得所有需要更新的`op`
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    # 使用`tensorflow`的控制流, 先执行更新算子, 再执行训练
    with tf.control_dependencies(update_ops):
        print("update_ops:{}".format(update_ops))
        # create_train_op that ensures that when we evaluate it to get the loss,
        # the update_ops are done and the gradient updates are computed.

        # train_op = tf.train.MomentumOptimizer(learning_rate=base_lr, momentum=0.9).minimize(loss)
        train_op = tf.train.AdadeltaOptimizer(
            learning_rate=base_lr).minimize(loss)
        #train_op=tf.np_op(name='train')

    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(out, 1), tf.argmax(input_labels, 1)),
                tf.float32))
    # 循环迭代过程
    step_train(train_op=train_op,
               loss=loss,
               accuracy=accuracy,
               train_images_batch=train_images_batch,
               train_labels_batch=train_labels_batch,
               train_nums=train_nums,
               train_log_step=train_log_step,
               val_images_batch=val_images_batch,
               val_labels_batch=val_labels_batch,
               val_nums=val_nums,
               val_log_step=val_log_step,
               snapshot_prefix=snapshot_prefix,
               snapshot=snapshot)
  def extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]

    Raises:
      ValueError: depth multiplier is not supported.
    """
    if self._depth_multiplier != 1.0:
      raise ValueError('Depth multiplier not supported.')

    preprocessed_inputs = shape_utils.check_min_image_dim(
        129, preprocessed_inputs)

    with tf.variable_scope(
        self._resnet_scope_name, reuse=self._reuse_weights) as scope:
      with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        with (slim.arg_scope(self._conv_hyperparams_fn())
              if self._override_base_feature_extractor_hyperparams else
              context_manager.IdentityContextManager()):
          _, image_features = self._resnet_base_fn(
              inputs=ops.pad_to_multiple(preprocessed_inputs,
                                         self._pad_to_multiple),
              num_classes=None,
              is_training=None,
              global_pool=False,
              output_stride=None,
              store_non_strided_activations=True,
              scope=scope)
          image_features = self._filter_features(image_features)
      with slim.arg_scope(self._conv_hyperparams_fn()):
        with tf.variable_scope(self._fpn_scope_name,
                               reuse=self._reuse_weights):
          base_fpn_max_level = min(self._fpn_max_level, 5)
          feature_block_list = []
          for level in range(self._fpn_min_level, base_fpn_max_level + 1):
            feature_block_list.append('block{}'.format(level - 1))
          fpn_features = feature_map_generators.fpn_top_down_feature_maps(
              [(key, image_features[key]) for key in feature_block_list],
              depth=256)
          feature_maps = []
          for level in range(self._fpn_min_level, base_fpn_max_level + 1):
            feature_maps.append(
                fpn_features['top_down_block{}'.format(level - 1)])
          last_feature_map = fpn_features['top_down_block{}'.format(
              base_fpn_max_level - 1)]
          # Construct coarse features
          for i in range(base_fpn_max_level, self._fpn_max_level):
            last_feature_map = slim.conv2d(
                last_feature_map,
                num_outputs=256,
                kernel_size=[3, 3],
                stride=2,
                padding='SAME',
                scope='bottom_up_block{}'.format(i))
            feature_maps.append(last_feature_map)
    return feature_maps
示例#9
0
from PIL import Image

import tensorflow as tf

from slim.nets import resnet_v1
from slim.preprocessing import vgg_preprocessing
slim = tf.contrib.slim

FRAME_HOME = '../data/ucf101-frames'
FEATURE_HOME = 'ucf101-block4-7-7-2048-features'

img = tf.placeholder(dtype=tf.float32)
pre_img = vgg_preprocessing.preprocess_image(img, 224, 224, is_training=False)
pre_img = tf.expand_dims(pre_img, 0)

with slim.arg_scope(resnet_v1.resnet_arg_scope()):
    _, end_points = resnet_v1.resnet_v1_152(inputs=pre_img, is_training=False)
feature = tf.squeeze(end_points['resnet_v1_152/block4'])

if not os.path.exists('resnet_v1_152.ckpt'):
    os.system(
        'wget http://download.tensorflow.org/models/resnet_v1_152_2016_08_28.tar.gz'
    )
    os.system('tar -xvzf resnet_v1_152_2016_08_28.tar.gz')
    os.system('rm resnet_v1_152_2016_08_28.tar.gz')

if not os.path.isdir(FEATURE_HOME):
    os.mkdir(FEATURE_HOME)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
def train(train_record_file, train_log_step, train_param, val_record_file,
          val_log_step, labels_nums, data_shape, snapshot, snapshot_prefix):
    '''
    :param train_record_file: 训练的tfrecord文件
    :param train_log_step: 显示训练过程log信息间隔
    :param train_param: train参数
    :param val_record_file: 验证的tfrecord文件
    :param val_log_step: 显示验证过程log信息间隔
    :param val_param: val参数
    :param labels_nums: labels数
    :param data_shape: 输入数据shape
    :param snapshot: 保存模型间隔
    :param snapshot_prefix: 保存模型文件的前缀名
    :return:
    '''
    [base_lr, max_steps] = train_param
    [batch_size, resize_height, resize_width, depths] = data_shape

    # 获得训练和测试的样本数
    train_nums = get_example_nums(train_record_file)
    val_nums = get_example_nums(val_record_file)
    print('train nums:%d,val nums:%d' % (train_nums, val_nums))

    # 从record中读取图片和labels数据
    # train数据,训练数据一般要求打乱顺序shuffle=True
    train_images, train_labels = read_records(train_record_file,
                                              resize_height,
                                              resize_width,
                                              type='normalization')
    train_images_batch, train_labels_batch = get_batch_images(
        train_images,
        train_labels,
        batch_size=batch_size,
        labels_nums=labels_nums,
        one_hot=True,
        shuffle=True)
    # val数据,验证数据可以不需要打乱数据
    val_images, val_labels = read_records(val_record_file,
                                          resize_height,
                                          resize_width,
                                          type='normalization')
    val_images_batch, val_labels_batch = get_batch_images(
        val_images,
        val_labels,
        batch_size=batch_size,
        labels_nums=labels_nums,
        one_hot=True,
        shuffle=False)

    # Define the model:
    with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        out, end_points = resnet_v1.resnet_v1_50(inputs=input_images,
                                                 num_classes=labels_nums,
                                                 is_training=is_training)

    # out = tf.squeeze(out, [1, 2])

    # Specify the loss function: tf.losses定义的loss函数都会自动添加到loss函数,不需要add_loss()了
    tf.losses.softmax_cross_entropy(onehot_labels=input_labels,
                                    logits=out)  # 添加交叉熵损失loss=1.6
    # slim.losses.add_loss(my_loss)
    loss = tf.losses.get_total_loss(
        add_regularization_losses=True)  # 添加正则化损失loss=2.2

    # Specify the optimization scheme:
    # global_step = tf.Variable(0, trainable=False)
    # learning_rate = tf.train.exponential_decay(base_lr, global_step, 2000, 0.90)

    # 在定义训练的时候, 注意到我们使用了`batch_norm`层时,需要更新每一层的`average`和`variance`参数,
    # 更新的过程不包含在正常的训练过程中, 需要我们去手动像下面这样更新
    # 通过`tf.get_collection`获得所有需要更新的`op`
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    # 使用`tensorflow`的控制流, 先执行更新算子, 再执行训练
    with tf.control_dependencies(update_ops):
        print("update_ops:{}".format(update_ops))
        # create_train_op that ensures that when we evaluate it to get the loss,
        # the update_ops are done and the gradient updates are computed.
        train_op = tf.train.GradientDescentOptimizer(
            learning_rate=0.0001).minimize(loss)

    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(out, 1), tf.argmax(input_labels, 1)),
                tf.float32))
    # 循环迭代过程
    global_step = tf.Variable(0, trainable=False)
    saver = tf.train.Saver(max_to_keep=5)
    max_acc = 0.0
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        for i in range(max_steps + 1):
            if i < 10000:
                val_log_step = 500
            else:
                val_log_step = 10

            if i == 3000:
                saver.save(sess, snapshot_prefix, global_step=i)

                learning_rate = tf.train.exponential_decay(
                    0.01, global_step, 1000, 0.95)

                update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
                with tf.control_dependencies(update_ops):
                    # print("update_ops:{}".format(update_ops))
                    # create_train_op that ensures that when we evaluate it to get the loss,
                    # the update_ops are done and the gradient updates are computed.
                    train_op = tf.train.GradientDescentOptimizer(
                        learning_rate).minimize(loss)

                sess.run(tf.global_variables_initializer())
                saver.restore(sess, snapshot_prefix + "-%s" % i)

            batch_input_images, batch_input_labels = sess.run(
                [train_images_batch, train_labels_batch])
            _, train_loss = sess.run(
                [train_op, loss],
                feed_dict={
                    input_images: batch_input_images,
                    input_labels: batch_input_labels,
                    is_training: True
                })
            # train测试(这里仅测试训练集的一个batch)
            if i % train_log_step == 0:
                train_acc = sess.run(accuracy,
                                     feed_dict={
                                         input_images: batch_input_images,
                                         input_labels: batch_input_labels,
                                         is_training: False
                                     })
                print(
                    "%s: Step [%d]  train Loss : %f, training accuracy :  %g" %
                    (datetime.now(), i, train_loss, train_acc))

            # val测试(测试全部val数据)
            if i % val_log_step == 0:
                mean_loss, mean_acc = net_evaluation(sess, loss, accuracy,
                                                     val_images_batch,
                                                     val_labels_batch,
                                                     val_nums)
                print("%s: Step [%d]  val Loss : %f, val accuracy :  %g" %
                      (datetime.now(), i, mean_loss, mean_acc))

            # 模型保存:每迭代snapshot次或者最后一次保存模型
            # if (i % snapshot == 0 and i > 0) or i == max_steps:
            #     print('-----save:{}-{}'.format(snapshot_prefix, i))
            #     saver.save(sess, snapshot_prefix, global_step=i)

            # 保存val准确率最高的模型
            if mean_acc > max_acc and mean_acc > 0.7:
                max_acc = mean_acc
                path = os.path.dirname(snapshot_prefix)
                best_models = os.path.join(path, 'best_models.ckpt'.format(i))
                print('------save:{}'.format(best_models))
                saver.save(sess, best_models)

        coord.request_stop()
        coord.join(threads)
示例#11
0
文件: model.py 项目: iriscxy/VMSMO
    def _add_seq2seq(self):
        """Add the whole sequence-to-sequence model to the graph."""
        hps = self._hps
        vsize = self._vocab.size()  # size of the vocabulary
        # with tf.variable_scope('image_encoder'):
        self.reshaped_pix = tf.reshape(self._side_batch, [-1, 32, 64, 3])
        with slim.arg_scope(resnet_arg_scope()):
            net, end_points = resnet_v1_152(self.reshaped_pix,
                                            is_training=FLAGS.mode == 'train')
            # feat1 = end_points['resnet_v1_152/block4']
        pic_encoded = end_points['global_pool']
        # self.end_points = end_points
        # self.net = net

        with tf.variable_scope('seq2seq'):
            # Some initializers
            self.rand_unif_init = tf.random_uniform_initializer(
                -hps.rand_unif_init_mag, hps.rand_unif_init_mag, seed=123)
            self.trunc_norm_init = tf.truncated_normal_initializer(
                stddev=hps.trunc_norm_init_std)

            # Add embedding matrix (shared by the encoder and decoder inputs)
            with tf.variable_scope('embedding'):
                embedding = tf.get_variable('embedding', [vsize, hps.emb_dim],
                                            dtype=tf.float32,
                                            initializer=self.trunc_norm_init)
                emb_enc_inputs = tf.nn.embedding_lookup(
                    embedding, self._enc_batch
                )  # tensor with shape (batch_size, max_enc_steps, emb_size)
                emb_dec_inputs = [
                    tf.nn.embedding_lookup(embedding, x)
                    for x in tf.unstack(self._dec_batch, axis=1)
                ]  # list length max_dec_steps containing shape (batch_size, emb_size)
            pic_encoded = tf.reshape(
                tf.squeeze(pic_encoded),
                [FLAGS.batch_size, FLAGS.max_side_steps, -1])
            emb_side_inputs = tf.layers.dense(pic_encoded, FLAGS.emb_dim * 2)
            # Add the encoder.
            enc_outputs, fw_st, bw_st = self._add_encoder(
                emb_enc_inputs, self._enc_lens)
            # batch_size * pic_num * emb_dim
            new_emb_side_inputs = tf.reshape(emb_side_inputs, [
                FLAGS.batch_size * int(FLAGS.max_side_steps / 5), 5,
                FLAGS.hidden_dim * 2
            ])
            # (batch_size*pic_num/5) * 5 * emb_dim

            side_states = self._add_side_rnn_encoder(
                new_emb_side_inputs, 5 * tf.ones(
                    (new_emb_side_inputs.get_shape()[0]), dtype=tf.int32))
            self._side_inputs = tf.reshape(
                side_states, [FLAGS.batch_size, -1, FLAGS.hidden_dim * 2])
            self._enc_states = enc_outputs

            # Our encoder is bidirectional and our decoder is unidirectional so we need to reduce the final encoder hidden state to the right size to be the initial decoder hidden state
            self._dec_in_state = self._reduce_states(fw_st, bw_st)
            self._last_state = tf.concat(self._dec_in_state, -1)

            with tf.variable_scope('interaction'):
                change_side_states = tf.transpose(self._side_inputs, [0, 2, 1])
                self._change_side_states = change_side_states
                attn_matrix = tf.matmul(self._enc_states, change_side_states)
                # batch_size * enc_len * side_len
                self._video_aware_enc_states = tf.matmul(
                    attn_matrix, self._side_inputs)
                self._news_aware_side_states = tf.matmul(
                    tf.transpose(attn_matrix, [0, 2, 1]), self._enc_states)
                gate = tf.layers.dense(self._last_state,
                                       1,
                                       activation=tf.nn.sigmoid)
                gate = tf.expand_dims(tf.tile(gate, [1, FLAGS.hidden_dim * 2]),
                                      1)
                ones = np.ones([FLAGS.batch_size, 1, FLAGS.hidden_dim * 2])
                self._enc_states = gate * self._enc_states + (
                    ones - gate) * self._video_aware_enc_states

            # Add the decoder.
            with tf.variable_scope('decoder'):
                decoder_outputs, self._dec_out_state, self.attn_dists, self.p_gens, self.coverage = self._add_decoder(
                    emb_dec_inputs)
                # attn_seg, attn_side = self.pic_attention(emb_side_inputs)
                # self._attn_side = attn_side

            # Add the output projection to obtain the vocabulary distribution
            with tf.variable_scope('output_projection'):
                w = tf.get_variable('w', [hps.hidden_dim, vsize],
                                    dtype=tf.float32,
                                    initializer=self.trunc_norm_init)
                w_t = tf.transpose(w)
                v = tf.get_variable('v', [vsize],
                                    dtype=tf.float32,
                                    initializer=self.trunc_norm_init)
                vocab_scores = [
                ]  # vocab_scores is the vocabulary distribution before applying softmax. Each entry on the list corresponds to one decoder step
                for i, output in enumerate(decoder_outputs):
                    if i > 0:
                        tf.get_variable_scope().reuse_variables()
                    vocab_scores.append(tf.nn.xw_plus_b(
                        output, w, v))  # apply the linear layer

                vocab_dists = [
                    tf.nn.softmax(s) for s in vocab_scores
                ]  # The vocabulary distributions. List length max_dec_steps of (batch_size, vsize) arrays. The words are in the order they appear in the vocabulary file.

            # For pointer-generator model, calc final distribution from copy distribution and vocabulary distribution
            if FLAGS.pointer_gen:
                final_dists = self._calc_final_dist(vocab_dists,
                                                    self.attn_dists)
            else:  # final distribution is just vocabulary distribution
                final_dists = vocab_dists

            if hps.mode in ['train', 'eval']:
                # Calculate the loss
                with tf.variable_scope('loss'):
                    if FLAGS.pointer_gen:
                        # Calculate the loss per step
                        # This is fiddly; we use tf.gather_nd to pick out the probabilities of the gold target words
                        loss_per_step = [
                        ]  # will be list length max_dec_steps containing shape (batch_size)
                        batch_nums = tf.range(
                            0, limit=hps.batch_size)  # shape (batch_size)
                        for dec_step, dist in enumerate(final_dists):
                            targets = self._target_batch[:,
                                                         dec_step]  # The indices of the target words. shape (batch_size)
                            indices = tf.stack((batch_nums, targets),
                                               axis=1)  # shape (batch_size, 2)
                            gold_probs = tf.gather_nd(
                                dist, indices
                            )  # shape (batch_size). prob of correct words on this step
                            losses = -tf.log(gold_probs + 1e-10)
                            loss_per_step.append(losses)

                        # Apply dec_padding_mask and get loss
                        self._loss = _mask_and_avg(loss_per_step,
                                                   self._dec_padding_mask)

                    else:  # baseline model
                        self._loss = tf.contrib.seq2seq.sequence_loss(
                            tf.stack(vocab_scores, axis=1), self._target_batch,
                            self._dec_padding_mask
                        )  # this applies softmax internally

                    tf.summary.scalar('loss', self._loss)

                    # Calculate coverage loss from the attention distributions
                    if hps.coverage:
                        with tf.variable_scope('coverage_loss'):
                            self._coverage_loss = _coverage_loss(
                                self.attn_dists, self._dec_padding_mask)
                            tf.summary.scalar('coverage_loss',
                                              self._coverage_loss)
                        self._total_loss = self._loss + hps.cov_loss_wt * self._coverage_loss
                        tf.summary.scalar('total_loss', self._total_loss)

                # with tf.variable_scope('pic_loss'):
                #     self._loss_pic = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=attn_side,
                #                                                                        labels=self._dec_pic_target))
                #     # self._loss_unified = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=attn_side,
                #     #                                                                    labels=attn_seg))
                # self._all_loss = self._loss_pic
                # self._all_loss = self._loss

        with tf.variable_scope('side'):
            emb_side_inputs = tf.nn.l2_normalize(emb_side_inputs, dim=-1)

            # self-attention
            side_outputs, sfw_st, sbw_st = self._add_side_encoder(
                self._side_inputs, self._side_lens)
            conditional_vec = tf.expand_dims(self._last_state, 1)
            conditional_weight = tf.layers.dense(
                tf.multiply(conditional_vec, side_outputs), 1)
            self._cond_side_states = tf.multiply(side_outputs,
                                                 conditional_weight)

            s_gate = tf.layers.dense(self._last_state,
                                     1,
                                     activation=tf.nn.sigmoid)
            s_gate = tf.expand_dims(s_gate, 1)
            s_ones = np.ones_like(s_gate)
            self._side_states = s_gate * self._news_aware_side_states + (
                s_ones - s_gate) * self._cond_side_states

            fusion_gate = tf.layers.dense(self._last_state,
                                          1,
                                          activation=tf.nn.sigmoid)
            fusion_gate = tf.expand_dims(
                tf.tile(fusion_gate, [1, FLAGS.hidden_dim * 2]), 1)
            fusion_ones = tf.ones_like(fusion_gate)
            side_states = tf.nn.l2_normalize(tf.reshape(
                tf.tile(tf.expand_dims(self._side_states, 1), [1, 5, 1, 1]),
                [FLAGS.batch_size, -1, FLAGS.hidden_dim * 2]),
                                             dim=-1)
            fusion_side = fusion_gate * emb_side_inputs + (
                fusion_ones - fusion_gate) * side_states

            attn_side = tf.squeeze(
                tf.layers.dense(
                    fusion_side,
                    1,
                    kernel_initializer=tf.contrib.layers.xavier_initializer()))
            attn_side = nn_ops.softmax(attn_side)
            self.attn_side = attn_side

            # last_state = tf.nn.l2_normalize(tf.tile(tf.expand_dims(self._last_state, 1), [1, 10, 1]), dim=-1)
            # emb_side_inputs = tf.nn.l2_normalize(emb_side_inputs, dim=-1)
            # attn_side = tf.squeeze(tf.layers.dense(tf.concat([last_state, emb_side_inputs], -1), 1, activation=tf.nn.sigmoid, kernel_initializer=tf.contrib.layers.xavier_initializer()))
            # self.attn_side = attn_side

            with tf.variable_scope('pic_loss'):
                # self._loss_pic = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=attn_side,
                #                                                                         labels=self._dec_pic_target))
                # self._loss_pic = pairwise_hinge_loss(logits=attn_side, labels=self._dec_pic_target)
                self._loss_pic = pairwise_hinge_loss(logits=attn_side,
                                                     labels=tf.one_hot(
                                                         self._dec_pic_target,
                                                         FLAGS.max_side_steps))
        if hps.mode in ['train', 'eval']:
            self._all_loss = self._loss + self._loss_pic

        if hps.mode == "decode" or hps.mode == 'auto_decode':
            # We run decode beam search mode one decoder step at a time
            assert len(
                final_dists
            ) == 1  # final_dists is a singleton list containing shape (batch_size, extended_vsize)
            final_dists = final_dists[0]
            topk_probs, self._topk_ids = tf.nn.top_k(
                final_dists, hps.batch_size * 2
            )  # take the k largest probs. note batch_size=beam_size in decode mode
            self._topk_log_probs = tf.log(topk_probs)
示例#12
0
def train(data_csv_path_train, train_log_step, train_param, data_csv_path_val,
          val_log_step, labels_nums, data_shape, snapshot, snapshot_prefix):
    '''
    :param data_csv_path_train: 训练的csv文件
    :param train_log_step: 显示训练过程log信息间隔
    :param train_param: train参数
    :param data_csv_path_val: 验证的val文件
    :param val_log_step: 显示验证过程log信息间隔
    :param val_param: val参数
    :param labels_nums: labels数
    :param data_shape: 输入数据shape
    :param snapshot: 保存模型间隔
    :param snapshot_prefix: 保存模型文件的前缀名
    :return:
    '''
    [base_lr, max_steps] = train_param
    [batch_size, resize_height, resize_width, depths] = data_shape

    # 获得训练和测试的样本数
    with open(dataset_csv_path_train, 'r') as f:
        train_nums = len(f.readlines())
    with open(dataset_csv_path_val, 'r') as v:
        val_nums = len(v.readlines())
    print('train nums:%d,val nums:%d' % (train_nums, val_nums))

    train_batch = data_loader.load_data(data_csv_path_train,
                                        image_type=args.image_type,
                                        image_size_before_crop=resize_height,
                                        labels_nums=labels_nums)
    train_images_batch = train_batch['image']
    train_labels_batch = train_batch['label']
    #     print('......................................................')
    #     print(train_images_batch)
    #     print(train_labels_batch)

    # val数据,验证数据可以不需要打乱数据
    val_batch = data_loader.load_data(data_csv_path_val,
                                      image_type=args.image_type,
                                      image_size_before_crop=resize_height,
                                      labels_nums=labels_nums,
                                      do_shuffle=False)
    val_images_batch = val_batch['image']
    val_labels_batch = val_batch['label']

    # Define the model:
    with slim.arg_scope(resnet_v1.resnet_arg_scope()):
        out, end_points = resnet_v1.resnet_v1_101(inputs=input_images,
                                                  num_classes=labels_nums,
                                                  is_training=is_training)

    # Specify the loss function: tf.losses定义的loss函数都会自动添加到loss函数,不需要add_loss()了
    tf.losses.softmax_cross_entropy(onehot_labels=input_labels,
                                    logits=out)  #添加交叉熵损失loss=1.6
    # slim.losses.add_loss(my_loss)
    loss = tf.losses.get_total_loss(
        add_regularization_losses=False)  #添加正则化损失loss=2.2
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(out, 1), tf.argmax(input_labels, 1)),
                tf.float32))

    # Specify the optimization scheme:
    # optimizer = tf.train.GradientDescentOptimizer(learning_rate=base_lr)

    # global_step = tf.Variable(0, trainable=False)
    # learning_rate = tf.train.exponential_decay(0.05, global_step, 150, 0.9)
    # optimizer = tf.train.MomentumOptimizer(learning_rate=base_lr,momentum= 0.9)
    # optimizer = tf.train.GradientDescentOptimizer(learning_rate=base_lr)
    # # train_tensor = optimizer.minimize(loss, global_step)
    # train_op = slim.learning.create_train_op(loss, optimizer,global_step=global_step)

    # 在定义训练的时候, 注意到我们使用了`batch_norm`层时,需要更新每一层的`average`和`variance`参数,
    # 更新的过程不包含在正常的训练过程中, 需要我们去手动像下面这样更新
    # 通过`tf.get_collection`获得所有需要更新的`op`
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    # 使用`tensorflow`的控制流, 先执行更新算子, 再执行训练
    with tf.control_dependencies(update_ops):
        # create_train_op that ensures that when we evaluate it to get the loss,
        # the update_ops are done and the gradient updates are computed.
        # train_op = slim.learning.create_train_op(total_loss=loss,optimizer=optimizer)
        train_op = tf.train.AdadeltaOptimizer(
            learning_rate=base_lr).minimize(loss)

    # 循环迭代过程
    step_train(train_op, loss, accuracy, train_images_batch,
               train_labels_batch, train_nums, train_log_step,
               val_images_batch, val_labels_batch, val_nums, val_log_step,
               snapshot_prefix, snapshot)