예제 #1
0
def  predict(models_path,image_dir,labels_filename,labels_nums, data_format):
    [batch_size, resize_height, resize_width, depths] = data_format

    #labels = np.loadtxt(labels_filename, str, delimiter='\t')
    input_images = tf.placeholder(dtype=tf.float32, shape=[None, resize_height, resize_width, depths], name='input')

    #其他模型预测请修改这里
    with slim.arg_scope(mobilenet_v1.mobilenet_v1_arg_scope()):
        out, end_points = mobilenet_v1.mobilenet_v1(inputs=input_images, num_classes=labels_nums, dropout_keep_prob=1.0, is_training=False,global_pool=True)

    # 将输出结果进行softmax分布,再求最大概率所属类别
    score = tf.nn.softmax(out,name='pre')
    class_id = tf.argmax(score, 1)

    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    saver.restore(sess, models_path)
    images_list=glob.glob(os.path.join(image_dir,'*.jpg'))
    for image_path in images_list:
        im=read_image(image_path,resize_height,resize_width,normalization=True)
        im=im[np.newaxis,:]
        #pred = sess.run(f_cls, feed_dict={x:im, keep_prob:1.0})
        pre_score,pre_label = sess.run([score,class_id], feed_dict={input_images:im})
        max_score=pre_score[0,pre_label]
        print("{} is: pre labels:{},name:{} score: {}".format(image_path,pre_label,list(labels_filename.keys())[list(labels_filename.values()).index(pre_label)], max_score))
    sess.close()
예제 #2
0
def build_model():
    """Build the mobilenet_v1 model for evaluation.

  Returns:
    g: graph with rewrites after insertion of quantization ops and batch norm
    folding.
    eval_ops: eval ops for inference.
    variables_to_restore: List of variables to restore from checkpoint.
  """
    g = tf.Graph()
    with g.as_default():
        inputs, labels = imagenet_input(is_training=False)

        scope = mobilenet_v1.mobilenet_v1_arg_scope(is_training=False,
                                                    weight_decay=0.0)
        with slim.arg_scope(scope):
            logits, _ = mobilenet_v1.mobilenet_v1(
                inputs,
                is_training=False,
                depth_multiplier=FLAGS.depth_multiplier,
                num_classes=FLAGS.num_classes)

        if FLAGS.quantize:
            tf.contrib.quantize.create_eval_graph()

        eval_ops = metrics(logits, labels)

    return g, eval_ops
예제 #3
0
def build_model():
    """Builds graph for model to train with rewrites for quantization.

  Returns:
    g: Graph with fake quantization ops and batch norm folding suitable for
    training quantized weights.
    train_tensor: Train op for execution during training.
  """
    g = tf.Graph()
    with g.as_default(), tf.device(
            tf.train.replica_device_setter(FLAGS.ps_tasks)):
        inputs, labels = imagenet_input(is_training=True)
        with slim.arg_scope(
                mobilenet_v1.mobilenet_v1_arg_scope(is_training=True)):
            logits, _ = mobilenet_v1.mobilenet_v1(
                inputs,
                is_training=True,
                depth_multiplier=FLAGS.depth_multiplier,
                num_classes=FLAGS.num_classes)

        tf.losses.softmax_cross_entropy(labels, logits)

        # Call rewriter to produce graph with fake quant ops and folded batch norms
        # quant_delay delays start of quantization till quant_delay steps, allowing
        # for better model accuracy.
        if FLAGS.quantize:
            tf.contrib.quantize.create_training_graph(
                quant_delay=get_quant_delay())

        total_loss = tf.losses.get_total_loss(name='total_loss')
        # Configure the learning rate using an exponential decay.
        num_epochs_per_decay = 2.5
        imagenet_size = 1271167
        decay_steps = int(imagenet_size / FLAGS.batch_size *
                          num_epochs_per_decay)

        learning_rate = tf.train.exponential_decay(
            get_learning_rate(),
            tf.train.get_or_create_global_step(),
            decay_steps,
            _LEARNING_RATE_DECAY_FACTOR,
            staircase=True)
        opt = tf.train.GradientDescentOptimizer(learning_rate)

        train_tensor = slim.learning.create_train_op(total_loss, optimizer=opt)

    slim.summaries.add_scalar_summary(total_loss, 'total_loss', 'losses')
    slim.summaries.add_scalar_summary(learning_rate, 'learning_rate',
                                      'training')
    return g, train_tensor
  def extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """
    preprocessed_inputs = shape_utils.check_min_image_dim(
        33, preprocessed_inputs)

    feature_map_layout = {
        'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '',
                       '', ''][:self._num_layers],
        'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers],
        'use_explicit_padding': self._use_explicit_padding,
        'use_depthwise': self._use_depthwise,
    }

    with tf.variable_scope('MobilenetV1',
                           reuse=self._reuse_weights) as scope:
      with slim.arg_scope(
          mobilenet_v1.mobilenet_v1_arg_scope(
              is_training=None, regularize_depthwise=True)):
        with (slim.arg_scope(self._conv_hyperparams_fn())
              if self._override_base_feature_extractor_hyperparams
              else context_manager.IdentityContextManager()):
          _, image_features = mobilenet_v1.mobilenet_v1_base(
              ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
              final_endpoint='Conv2d_13_pointwise',
              min_depth=self._min_depth,
              depth_multiplier=self._depth_multiplier,
              use_explicit_padding=self._use_explicit_padding,
              scope=scope)
      with slim.arg_scope(self._conv_hyperparams_fn()):
        feature_maps = feature_map_generators.multi_resolution_feature_maps(
            feature_map_layout=feature_map_layout,
            depth_multiplier=self._depth_multiplier,
            min_depth=self._min_depth,
            insert_1x1_conv=True,
            image_features=image_features)

    return feature_maps.values()
예제 #5
0
    def _extract_proposal_features(self, preprocessed_inputs, scope):
        """Extracts first stage RPN features.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
      activations: A dictionary mapping feature extractor tensor names to
        tensors

    Raises:
      InvalidArgumentError: If the spatial size of `preprocessed_inputs`
        (height or width) is less than 33.
      ValueError: If the created network is missing the required activation.
    """

        preprocessed_inputs.get_shape().assert_has_rank(4)
        preprocessed_inputs = shape_utils.check_min_image_dim(
            min_dim=33, image_tensor=preprocessed_inputs)

        with slim.arg_scope(
                mobilenet_v1.mobilenet_v1_arg_scope(
                    is_training=self._train_batch_norm,
                    weight_decay=self._weight_decay)):
            with tf.variable_scope('MobilenetV1',
                                   reuse=self._reuse_weights) as scope:
                params = {}
                if self._skip_last_stride:
                    params[
                        'conv_defs'] = _get_mobilenet_conv_no_last_stride_defs(
                            conv_depth_ratio_in_percentage=self.
                            _conv_depth_ratio_in_percentage)
                _, activations = mobilenet_v1.mobilenet_v1_base(
                    preprocessed_inputs,
                    final_endpoint='Conv2d_11_pointwise',
                    min_depth=self._min_depth,
                    depth_multiplier=self._depth_multiplier,
                    scope=scope,
                    **params)
        return activations['Conv2d_11_pointwise'], activations
예제 #6
0
    def _extract_box_classifier_features(self, proposal_feature_maps, scope):
        """Extracts second stage box classifier features.

    Args:
      proposal_feature_maps: A 4-D float tensor with shape
        [batch_size * self.max_num_proposals, crop_height, crop_width, depth]
        representing the feature map cropped to each proposal.
      scope: A scope name (unused).

    Returns:
      proposal_classifier_features: A 4-D float tensor with shape
        [batch_size * self.max_num_proposals, height, width, depth]
        representing box classifier features for each proposal.
    """
        net = proposal_feature_maps

        conv_depth = 1024
        if self._skip_last_stride:
            conv_depth_ratio = float(
                self._conv_depth_ratio_in_percentage) / 100.0
            conv_depth = int(float(conv_depth) * conv_depth_ratio)

        depth = lambda d: max(int(d * 1.0), 16)
        with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights):
            with slim.arg_scope(
                    mobilenet_v1.mobilenet_v1_arg_scope(
                        is_training=self._train_batch_norm,
                        weight_decay=self._weight_decay)):
                with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
                                    padding='SAME'):
                    net = slim.separable_conv2d(net,
                                                depth(conv_depth), [3, 3],
                                                depth_multiplier=1,
                                                stride=2,
                                                scope='Conv2d_12_pointwise')
                    return slim.separable_conv2d(net,
                                                 depth(conv_depth), [3, 3],
                                                 depth_multiplier=1,
                                                 stride=1,
                                                 scope='Conv2d_13_pointwise')
예제 #7
0
def main(_):
  batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
  num_classes = 1001

  print ("tsdeepak testing")
  print (FLAGS.input_dir)
  print (FLAGS.output_file)
  print (FLAGS.checkpoint_path)
  import os
  print ("directory")
  print (os.path.dirname(os.path.realpath(__file__)))
  print (os.getcwd())

  tf.logging.set_verbosity(tf.logging.INFO)

  with tf.Graph().as_default():
    # Prepare graph
    x_input = tf.placeholder(tf.float32, shape=batch_shape)

    with slim.arg_scope(mobilenet.mobilenet_v1_arg_scope()):
      _, end_points = mobilenet.mobilenet_v1(
          x_input, num_classes=num_classes, is_training=False, spatial_squeeze=False)

    predicted_labels = tf.argmax(end_points['Predictions'], 1)

    # Run computation
    saver = tf.train.Saver(slim.get_model_variables())
    session_creator = tf.train.ChiefSessionCreator(
        scaffold=tf.train.Scaffold(saver=saver),
        checkpoint_filename_with_path=FLAGS.checkpoint_path,
        master=FLAGS.master)

    with tf.train.MonitoredSession(session_creator=session_creator) as sess:
      with tf.gfile.Open(FLAGS.output_file, 'w') as out_file:
        for filenames, images in load_images(FLAGS.input_dir, batch_shape):
          labels = sess.run(predicted_labels, feed_dict={x_input: images})
          for filename, label in zip(filenames, labels):
            out_file.write('{0},{1}\n'.format(filename, label))
  def extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """
    preprocessed_inputs = shape_utils.check_min_image_dim(
        33, preprocessed_inputs)

    with tf.variable_scope('MobilenetV1',
                           reuse=self._reuse_weights) as scope:
      with slim.arg_scope(
          mobilenet_v1.mobilenet_v1_arg_scope(
              is_training=None, regularize_depthwise=True)):
        with (slim.arg_scope(self._conv_hyperparams_fn())
              if self._override_base_feature_extractor_hyperparams
              else context_manager.IdentityContextManager()):
          _, image_features = mobilenet_v1.mobilenet_v1_base(
              ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
              final_endpoint='Conv2d_13_pointwise',
              min_depth=self._min_depth,
              depth_multiplier=self._depth_multiplier,
              use_explicit_padding=self._use_explicit_padding,
              scope=scope)
      with slim.arg_scope(self._conv_hyperparams_fn()):
        feature_maps = feature_map_generators.pooling_pyramid_feature_maps(
            base_feature_map_depth=0,
            num_layers=6,
            image_features={
                'image_features': image_features['Conv2d_11_pointwise']
            })
    return feature_maps.values()
def train(train_record_file, train_log_step, train_param, val_record_file,
          val_log_step, labels_nums, data_shape, snapshot, snapshot_prefix):
    '''
    :param train_record_file: 训练的tfrecord文件
    :param train_log_step: 显示训练过程log信息间隔
    :param train_param: train参数
    :param val_record_file: 验证的tfrecord文件
    :param val_log_step: 显示验证过程log信息间隔
    :param val_param: val参数
    :param labels_nums: labels数
    :param data_shape: 输入数据shape
    :param snapshot: 保存模型间隔
    :param snapshot_prefix: 保存模型文件的前缀名
    :return:
    '''
    [base_lr, max_steps] = train_param
    [batch_size, resize_height, resize_width, depths] = data_shape

    # 获得训练和测试的样本数
    train_nums = get_example_nums(train_record_file)
    val_nums = get_example_nums(val_record_file)
    print('train nums:%d,val nums:%d' % (train_nums, val_nums))

    # 从record中读取图片和labels数据
    # train数据,训练数据一般要求打乱顺序shuffle=True
    train_images, train_labels = read_records(train_record_file,
                                              resize_height,
                                              resize_width,
                                              type='normalization')
    train_images_batch, train_labels_batch = get_batch_images(
        train_images,
        train_labels,
        batch_size=batch_size,
        labels_nums=labels_nums,
        one_hot=True,
        shuffle=True)
    # val数据,验证数据可以不需要打乱数据
    val_images, val_labels = read_records(val_record_file,
                                          resize_height,
                                          resize_width,
                                          type='normalization')
    val_images_batch, val_labels_batch = get_batch_images(
        val_images,
        val_labels,
        batch_size=batch_size,
        labels_nums=labels_nums,
        one_hot=True,
        shuffle=False)

    # Define the model:
    with slim.arg_scope(mobilenet_v1.mobilenet_v1_arg_scope()):
        # with slim.arg_scope([slim.conv2d, slim.separable_conv2d],normalizer_fn=slim.batch_norm):
        out, end_points = mobilenet_v1.mobilenet_v1(
            inputs=input_images,
            num_classes=labels_nums,
            dropout_keep_prob=keep_prob,
            is_training=is_training)

        # Specify the loss function: tf.losses定义的loss函数都会自动添加到loss函数,不需要add_loss()了
    tf.losses.softmax_cross_entropy(onehot_labels=input_labels,
                                    logits=out)  # 添加交叉熵损失loss=1.6
    # slim.losses.add_loss(my_loss)
    loss = tf.losses.get_total_loss(
        add_regularization_losses=True)  # 添加正则化损失loss=2.2
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(out, 1), tf.argmax(input_labels, 1)),
                tf.float32))

    # Specify the optimization scheme:
    # optimizer = tf.train.GradientDescentOptimizer(learning_rate=base_lr)

    # global_step = tf.Variable(0, trainable=False)
    # learning_rate = tf.train.exponential_decay(0.05, global_step, 150, 0.9)
    #
    optimizer = tf.train.MomentumOptimizer(learning_rate=base_lr, momentum=0.9)
    # # train_tensor = optimizer.minimize(loss, global_step)
    # train_op = slim.learning.create_train_op(loss, optimizer,global_step=global_step)

    # 在定义训练的时候, 注意到我们使用了`batch_norm`层时,需要更新每一层的`average`和`variance`参数,
    # 更新的过程不包含在正常的训练过程中, 需要我们去手动像下面这样更新
    # 通过`tf.get_collection`获得所有需要更新的`op`
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    # 使用`tensorflow`的控制流, 先执行更新算子, 再执行训练
    with tf.control_dependencies(update_ops):
        # create_train_op that ensures that when we evaluate it to get the loss,
        # the update_ops are done and the gradient updates are computed.
        # train_op = slim.learning.create_train_op(total_loss=loss,optimizer=optimizer)
        train_op = slim.learning.create_train_op(total_loss=loss,
                                                 optimizer=optimizer)

    # train_op=optimizer.minimize(loss)
    # 循环迭代过程
    step_train(train_op=train_op,
               loss=loss,
               accuracy=accuracy,
               train_images_batch=train_images_batch,
               train_labels_batch=train_labels_batch,
               train_nums=train_nums,
               train_log_step=train_log_step,
               val_images_batch=val_images_batch,
               val_labels_batch=val_labels_batch,
               val_nums=val_nums,
               val_log_step=val_log_step,
               snapshot_prefix=snapshot_prefix,
               snapshot=snapshot)
예제 #10
0
def train(data_csv_path_train, train_log_step, train_param, data_csv_path_val,
          val_log_step, labels_nums, data_shape, snapshot, snapshot_prefix):
    '''
    :param data_csv_path_train: 训练的csv文件
    :param train_log_step: 显示训练过程log信息间隔
    :param train_param: train参数
    :param data_csv_path_val: 验证的val文件
    :param val_log_step: 显示验证过程log信息间隔
    :param val_param: val参数
    :param labels_nums: labels数
    :param data_shape: 输入数据shape
    :param snapshot: 保存模型间隔
    :param snapshot_prefix: 保存模型文件的前缀名
    :return:
    '''
    [base_lr, max_steps] = train_param
    [batch_size, resize_height, resize_width, depths] = data_shape

    # 获得训练和测试的样本数
    with open(dataset_csv_path_train, 'r') as f:
        train_nums = len(f.readlines())
    with open(dataset_csv_path_val, 'r') as v:
        val_nums = len(v.readlines())
    print('train nums:%d,val nums:%d' % (train_nums, val_nums))

    train_batch = data_loader.load_data(data_csv_path_train,
                                        image_type=args.image_type,
                                        image_size_before_crop=resize_height,
                                        labels_nums=labels_nums)
    train_images_batch = train_batch['image']
    train_labels_batch = train_batch['label']
    #     print('......................................................')
    #     print(train_images_batch)
    #     print(train_labels_batch)

    # val数据,验证数据可以不需要打乱数据
    val_batch = data_loader.load_data(data_csv_path_val,
                                      image_type=args.image_type,
                                      image_size_before_crop=resize_height,
                                      labels_nums=labels_nums,
                                      do_shuffle=False)
    val_images_batch = val_batch['image']
    val_labels_batch = val_batch['label']

    # Define the model:
    with slim.arg_scope(mobilenet_v1.mobilenet_v1_arg_scope()):
        out, end_points = mobilenet_v1.mobilenet_v1(
            inputs=input_images,
            num_classes=labels_nums,
            dropout_keep_prob=keep_prob,
            is_training=is_training,
            global_pool=True)

    # Specify the loss function: tf.losses定义的loss函数都会自动添加到loss函数,不需要add_loss()了
    tf.losses.softmax_cross_entropy(onehot_labels=input_labels,
                                    logits=out)  #添加交叉熵损失loss=1.6
    # slim.losses.add_loss(my_loss)
    loss = tf.losses.get_total_loss(
        add_regularization_losses=False)  #添加正则化损失loss=2.2
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(out, 1), tf.argmax(input_labels, 1)),
                tf.float32))

    # Specify the optimization scheme:
    # optimizer = tf.train.GradientDescentOptimizer(learning_rate=base_lr)

    # global_step = tf.Variable(0, trainable=False)
    # learning_rate = tf.train.exponential_decay(0.05, global_step, 150, 0.9)
    # optimizer = tf.train.MomentumOptimizer(learning_rate=base_lr,momentum= 0.9)
    # optimizer = tf.train.GradientDescentOptimizer(learning_rate=base_lr)
    # # train_tensor = optimizer.minimize(loss, global_step)
    # train_op = slim.learning.create_train_op(loss, optimizer,global_step=global_step)

    # 在定义训练的时候, 注意到我们使用了`batch_norm`层时,需要更新每一层的`average`和`variance`参数,
    # 更新的过程不包含在正常的训练过程中, 需要我们去手动像下面这样更新
    # 通过`tf.get_collection`获得所有需要更新的`op`
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    # 使用`tensorflow`的控制流, 先执行更新算子, 再执行训练
    with tf.control_dependencies(update_ops):
        # create_train_op that ensures that when we evaluate it to get the loss,
        # the update_ops are done and the gradient updates are computed.
        # train_op = slim.learning.create_train_op(total_loss=loss,optimizer=optimizer)
        # train_op = slim.learning.create_train_op(total_loss=loss, optimizer=optimizer)
        train_op = tf.train.AdadeltaOptimizer(
            learning_rate=base_lr).minimize(loss)

    # 循环迭代过程
    step_train(train_op, loss, accuracy, train_images_batch,
               train_labels_batch, train_nums, train_log_step,
               val_images_batch, val_labels_batch, val_nums, val_log_step,
               snapshot_prefix, snapshot)
예제 #11
0
def train(train_record_file,
          train_log_step,
          train_param,
          val_record_file,
          val_log_step,
          labels_nums,
          data_shape,
          snapshot,
          snapshot_prefix):
    '''
    该函数是主函数
    获得训练集测试集的数据
    定义网络的输出
    定义损失
    定义正则化,这里定义了正则化,在网络里面con2d定义的正则化要去掉
    定义训练OP
    定义精确度函数
    调用 Sesstion 函数

    :param train_record_file: 训练的tfrecord文件
    :param train_log_step: 显示训练过程log信息间隔
    :param train_param: train参数
    :param val_record_file: 验证的tfrecord文件
    :param val_log_step: 显示验证过程log信息间隔
    :param val_param: val参数
    :param labels_nums: labels数
    :param data_shape: 输入数据shape
    :param snapshot: 保存模型间隔
    :param snapshot_prefix: 保存模型文件的前缀名
    :return:

    '''
    [base_lr, max_steps] = train_param#   base_lr = 0.001  # 学习率 max_steps = 100000  # 迭代次数

    [batch_size, resize_height, resize_width, depths] = data_shape

    # 获得训练和测试的样本数
    train_nums = get_example_nums(train_record_file)
    val_nums = get_example_nums(val_record_file)
    print('train nums:%d,val nums:%d' % (train_nums, val_nums))

    # 从record中读取图片和labels数据
    # train数据,训练数据一般要求打乱顺序shuffle=True
    train_images, train_labels = read_records(train_record_file, resize_height, resize_width, type='normalization')
    train_images_batch, train_labels_batch = get_batch_images(train_images, train_labels,
                                                              batch_size=batch_size, labels_nums=labels_nums,
                                                              one_hot=True, shuffle=True)
    # val数据,验证数据可以不需要打乱数据
    val_images, val_labels = read_records(val_record_file, resize_height, resize_width, type='normalization')
    val_images_batch, val_labels_batch = get_batch_images(val_images, val_labels,
                                                          batch_size=batch_size, labels_nums=labels_nums,
                                                          one_hot=True, shuffle=False)

    # Define the model:
    with slim.arg_scope(mobilenet_v1.mobilenet_v1_arg_scope()):
        out, end_points = mobilenet_v1.mobilenet_v1(inputs=input_images, num_classes=labels_nums,
                                                    dropout_keep_prob=keep_prob, is_training=is_training,
                                                    global_pool=True)

        # Specify the loss function: tf.losses定义的loss函数都会自动添加到loss函数,不需要add_loss()了
    tf.losses.softmax_cross_entropy(onehot_labels=input_labels, logits=out)  # 添加交叉熵损失loss=1.6
    # slim.losses.add_loss(my_loss)
    loss = tf.losses.get_total_loss(add_regularization_losses=True)  # 添加正则化损失loss=2.2

    # Specify the optimization scheme:

    # 在定义训练的时候, 注意到我们使用了`batch_norm`层时,需要更新每一层的`average`和`variance`参数,
    # 更新的过程不包含在正常的训练过程中, 需要我们去手动像下面这样更新
    # 通过`tf.get_collection`获得所有需要更新的`op`
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    # 使用`tensorflow`的控制流, 先执行更新算子, 再执行训练
    #with tf.control_dependencies 就是说必须先执行update_ops,在能执行with里面的op
    with tf.control_dependencies(update_ops):
        print("update_ops:{}".format(update_ops))
        # create_train_op that ensures that when we evaluate it to get the loss,
        # the update_ops are done and the gradient updates are computed.
        # train_op = tf.train.MomentumOptimizer(learning_rate=base_lr, momentum=0.9).minimize(loss)
        train_op = tf.train.AdadeltaOptimizer(learning_rate=base_lr).minimize(loss)

    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(out, 1), tf.argmax(input_labels, 1)), tf.float32))
    # 循环迭代过程
    step_train(train_op=train_op, loss=loss, accuracy=accuracy,
               train_images_batch=train_images_batch,
               train_labels_batch=train_labels_batch,
               train_nums=train_nums,
               train_log_step=train_log_step,
               val_images_batch=val_images_batch,
               val_labels_batch=val_labels_batch,
               val_nums=val_nums,
               val_log_step=val_log_step,
               snapshot_prefix=snapshot_prefix,
               snapshot=snapshot)
예제 #12
0
    def extract_features(self, preprocessed_inputs):
        """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """
        preprocessed_inputs = shape_utils.check_min_image_dim(
            33, preprocessed_inputs)

        with tf.variable_scope('MobilenetV1',
                               reuse=self._reuse_weights) as scope:
            with slim.arg_scope(
                    mobilenet_v1.mobilenet_v1_arg_scope(
                        is_training=None, regularize_depthwise=True)):
                with (slim.arg_scope(self._conv_hyperparams_fn())
                      if self._override_base_feature_extractor_hyperparams else
                      context_manager.IdentityContextManager()):
                    _, image_features = mobilenet_v1.mobilenet_v1_base(
                        ops.pad_to_multiple(preprocessed_inputs,
                                            self._pad_to_multiple),
                        final_endpoint='Conv2d_13_pointwise',
                        min_depth=self._min_depth,
                        depth_multiplier=self._depth_multiplier,
                        conv_defs=self._conv_defs,
                        use_explicit_padding=self._use_explicit_padding,
                        scope=scope)

            depth_fn = lambda d: max(int(d * self._depth_multiplier), self.
                                     _min_depth)
            with slim.arg_scope(self._conv_hyperparams_fn()):
                with tf.variable_scope('fpn', reuse=self._reuse_weights):
                    feature_blocks = [
                        'Conv2d_3_pointwise', 'Conv2d_5_pointwise',
                        'Conv2d_11_pointwise', 'Conv2d_13_pointwise'
                    ]
                    base_fpn_max_level = min(self._fpn_max_level, 5)
                    feature_block_list = []
                    for level in range(self._fpn_min_level,
                                       base_fpn_max_level + 1):
                        feature_block_list.append(feature_blocks[level - 2])
                    fpn_features = feature_map_generators.fpn_top_down_feature_maps(
                        [(key, image_features[key])
                         for key in feature_block_list],
                        depth=depth_fn(self._additional_layer_depth),
                        use_depthwise=self._use_depthwise,
                        use_explicit_padding=self._use_explicit_padding)
                    feature_maps = []
                    for level in range(self._fpn_min_level,
                                       base_fpn_max_level + 1):
                        feature_maps.append(fpn_features['top_down_{}'.format(
                            feature_blocks[level - 2])])
                    last_feature_map = fpn_features['top_down_{}'.format(
                        feature_blocks[base_fpn_max_level - 2])]
                    # Construct coarse features
                    padding = 'VALID' if self._use_explicit_padding else 'SAME'
                    kernel_size = 3
                    for i in range(base_fpn_max_level + 1,
                                   self._fpn_max_level + 1):
                        if self._use_depthwise:
                            conv_op = functools.partial(slim.separable_conv2d,
                                                        depth_multiplier=1)
                        else:
                            conv_op = slim.conv2d
                        if self._use_explicit_padding:
                            last_feature_map = ops.fixed_padding(
                                last_feature_map, kernel_size)
                        last_feature_map = conv_op(
                            last_feature_map,
                            num_outputs=depth_fn(self._additional_layer_depth),
                            kernel_size=[kernel_size, kernel_size],
                            stride=2,
                            padding=padding,
                            scope='bottom_up_Conv2d_{}'.format(
                                i - base_fpn_max_level + 13))
                        feature_maps.append(last_feature_map)
        return feature_maps
    def extract_features(self, preprocessed_inputs):
        """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]

    Raises:
      ValueError: if image height or width are not 256 pixels.
    """
        image_shape = preprocessed_inputs.get_shape()
        image_shape.assert_has_rank(4)
        image_height = image_shape[1].value
        image_width = image_shape[2].value

        if image_height is None or image_width is None:
            shape_assert = tf.Assert(
                tf.logical_and(tf.equal(tf.shape(preprocessed_inputs)[1], 256),
                               tf.equal(tf.shape(preprocessed_inputs)[2],
                                        256)),
                ['image size must be 256 in both height and width.'])
            with tf.control_dependencies([shape_assert]):
                preprocessed_inputs = tf.identity(preprocessed_inputs)
        elif image_height != 256 or image_width != 256:
            raise ValueError(
                'image size must be = 256 in both height and width;'
                ' image dim = %d,%d' % (image_height, image_width))

        feature_map_layout = {
            'from_layer':
            ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', ''],
            'layer_depth': [-1, -1, 512, 256, 256],
            'conv_kernel_size': [-1, -1, 3, 3, 2],
            'use_explicit_padding':
            self._use_explicit_padding,
            'use_depthwise':
            self._use_depthwise,
        }

        with tf.variable_scope('MobilenetV1',
                               reuse=self._reuse_weights) as scope:
            with slim.arg_scope(
                    mobilenet_v1.mobilenet_v1_arg_scope(is_training=None)):
                with (slim.arg_scope(self._conv_hyperparams_fn())
                      if self._override_base_feature_extractor_hyperparams else
                      context_manager.IdentityContextManager()):
                    _, image_features = mobilenet_v1.mobilenet_v1_base(
                        ops.pad_to_multiple(preprocessed_inputs,
                                            self._pad_to_multiple),
                        final_endpoint='Conv2d_13_pointwise',
                        min_depth=self._min_depth,
                        depth_multiplier=self._depth_multiplier,
                        use_explicit_padding=self._use_explicit_padding,
                        scope=scope)
            with slim.arg_scope(self._conv_hyperparams_fn()):
                feature_maps = feature_map_generators.multi_resolution_feature_maps(
                    feature_map_layout=feature_map_layout,
                    depth_multiplier=self._depth_multiplier,
                    min_depth=self._min_depth,
                    insert_1x1_conv=True,
                    image_features=image_features)

        return feature_maps.values()
    def extract_features(self, preprocessed_inputs):
        """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """
        preprocessed_inputs = shape_utils.check_min_image_dim(
            33, preprocessed_inputs)

        with tf.variable_scope('MobilenetV1',
                               reuse=self._reuse_weights) as scope:
            with slim.arg_scope(
                    mobilenet_v1.mobilenet_v1_arg_scope(
                        is_training=None, regularize_depthwise=True)):
                with (slim.arg_scope(self._conv_hyperparams_fn())
                      if self._override_base_feature_extractor_hyperparams else
                      context_manager.IdentityContextManager()):
                    _, image_features = mobilenet_v1.mobilenet_v1_base(
                        ops.pad_to_multiple(preprocessed_inputs,
                                            self._pad_to_multiple),
                        final_endpoint='Conv2d_13_pointwise',
                        min_depth=self._min_depth,
                        depth_multiplier=self._depth_multiplier,
                        use_explicit_padding=self._use_explicit_padding,
                        scope=scope)

            depth_fn = lambda d: max(int(d * self._depth_multiplier), self.
                                     _min_depth)
            with slim.arg_scope(self._conv_hyperparams_fn()):
                with tf.variable_scope('fpn', reuse=self._reuse_weights):
                    fpn_features = feature_map_generators.fpn_top_down_feature_maps(
                        [(key, image_features[key]) for key in [
                            'Conv2d_5_pointwise', 'Conv2d_11_pointwise',
                            'Conv2d_13_pointwise'
                        ]],
                        depth=depth_fn(256))
                    last_feature_map = fpn_features[
                        'top_down_Conv2d_13_pointwise']
                    coarse_features = {}
                    for i in range(14, 16):
                        last_feature_map = slim.conv2d(
                            last_feature_map,
                            num_outputs=depth_fn(256),
                            kernel_size=[3, 3],
                            stride=2,
                            padding='SAME',
                            scope='bottom_up_Conv2d_{}'.format(i))
                        coarse_features['bottom_up_Conv2d_{}'.format(
                            i)] = last_feature_map
        return [
            fpn_features['top_down_Conv2d_5_pointwise'],
            fpn_features['top_down_Conv2d_11_pointwise'],
            fpn_features['top_down_Conv2d_13_pointwise'],
            coarse_features['bottom_up_Conv2d_14'],
            coarse_features['bottom_up_Conv2d_15']
        ]
예제 #15
0
def eavluate(path,resultfile,recordfile):
    '''
    with tf.device('/cpu:0'):
        test_set = os.path.join(CN.folder_of_dataset,'ignorepositiontest.tfrecords')
        capacity = 1000+3*CN.BATCH_SIZE
        test_data,test_label = CN.read_tfrecords(test_set)
        xtest,y_test = tf.train.shuffle_batch([test_data,test_label],batch_size = 20000,capacity = capacity,min_after_dequeue = 30)
    '''
        #train_set = os.path.join(CNN_train2.folder_of_dataset,'140*400train.tfrecords')
        #capacity = 1000+3*CNN_train2.BATCH_SIZE
        #train_data,train_label = CNN_train2.read_tfrecords(train_set)
        #xtrain,y_train = tf.train.shuffle_batch([train_data,train_label],batch_size = CNN_train2.BATCH_SIZE,capacity = capacity,min_after_dequeue = 30)

    '''
    with tf.Graph() as_default() as g:
        x = tf.placeholder(tf.float32,[None,INPUT_NODE],name = 'x-input')
        y_ = tf.placeholder(tf.float32,[None,OUTPUT_NODE],name = 'y-input')
    validate_feed = {x:testdata,y_:testlabel}
    '''
    inputdata,arraylength = GetDate(path,recordfile)
    inputdata = tf.convert_to_tensor(inputdata)
    inputdata = tf.reshape(inputdata,(arraylength,140,400,1))
    inputdata = tf.cast(inputdata,dtype=tf.float32)
    train = False
    labels_nums = 2
    keep_prob=1
    with slim.arg_scope(mobilenet_v1.mobilenet_v1_arg_scope()):
        out,end_points=mobilenet_v1.mobilenet_v1(inputs=inputdata,num_classes=labels_nums,dropout_keep_prob=keep_prob,is_training=train,global_pool=True)
    #ytest = CNN_inference.inference(inputdata,train,None)
    outputprobability = tf.nn.softmax(out)
    #ytrain = CNN_inference.inference(xtrain,train,None)
    #correct_prediction_test = tf.equal(tf.argmax(ytest,1),tf.argmax(y_test,1))
    #correct_prediction_train = tf.equal(tf.argmax(ytrain,1),tf.argmax(y_train,1))
    #accuracy_test = tf.reduce_mean(tf.cast(correct_prediction_test,tf.float32))
    #accuracy_train = tf.reduce_mean(tf.cast(correct_prediction_train,tf.float32))
    #variable_averages = tf.train.ExponentialMovingAverage(0.99)
    #variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver()
    #resultfile = './result.txt'
    #while True:
    with tf.Session() as sess:
        tf.get_variable_scope().reuse_variables()
        #coord = tf.train.Coordinator()
        #threads = tf.train.start_queue_runners(sess = sess,coord = coord)
        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess,ckpt.model_checkpoint_path)
            #global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
            #print ('**************************')
            #print (global_step)
            #global_step = tf.cast(global_step,dtype=tf.int32)
            #global_step
            #print (type(global_step))
            probabilitylist = sess.run(outputprobability)
            probabilityresult = probabilitylist
            print (type(probabilitylist))
            print (probabilityresult)
            np.savetxt(resultfile,probabilityresult)
            #print (accuracy_score_test)
            #print (type(accuracy_score_test))
            #print ('After %s training step(s),validation''test_accury = %g'%(global_step,accuracy_score_test))
            #print ('After %smtraining step(s),validation''train_accury = %g'%(global_step,accuracy_score_train))
        else:
            print('No checkpoint file found')