Exemplo n.º 1
0
def  predict(models_path,image_dir,labels_filename,labels_nums, data_format):
    [batch_size, resize_height, resize_width, depths] = data_format

    #labels = np.loadtxt(labels_filename, str, delimiter='\t')
    input_images = tf.placeholder(dtype=tf.float32, shape=[None, resize_height, resize_width, depths], name='input')

    #其他模型预测请修改这里
    with slim.arg_scope(inception_v1.inception_v1_arg_scope()):
        out, end_points = inception_v1.inception_v1(inputs=input_images, num_classes=labels_nums, dropout_keep_prob=1.0, is_training=False)

    # 将输出结果进行softmax分布,再求最大概率所属类别
    score = tf.nn.softmax(out,name='pre')
    class_id = tf.argmax(score, 1)

    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    saver.restore(sess, models_path)
    images_list=glob.glob(os.path.join(image_dir,'*.jpg'))
    for image_path in images_list:
        im=read_image(image_path,resize_height,resize_width,normalization=True)
        im=im[np.newaxis,:]
        #pred = sess.run(f_cls, feed_dict={x:im, keep_prob:1.0})
        pre_score,pre_label = sess.run([score,class_id], feed_dict={input_images:im})
        max_score=pre_score[0,pre_label]
        print("{} is: pre labels:{},name:{} score: {}".format(image_path,pre_label,list(labels_filename.keys())[list(labels_filename.values()).index(pre_label)], max_score))
    sess.close()
Exemplo n.º 2
0
    def testTrainEvalWithReuse(self):
        train_batch_size = 5
        eval_batch_size = 2
        height, width = 224, 224
        num_classes = 1000

        train_inputs = random_ops.random_uniform(
            (train_batch_size, height, width, 3))
        inception_v1.inception_v1(train_inputs, num_classes)
        eval_inputs = random_ops.random_uniform(
            (eval_batch_size, height, width, 3))
        logits, _ = inception_v1.inception_v1(eval_inputs,
                                              num_classes,
                                              reuse=True)
        predictions = math_ops.argmax(logits, 1)

        with self.test_session() as sess:
            sess.run(variables.global_variables_initializer())
            output = sess.run(predictions)
            self.assertEquals(output.shape, (eval_batch_size, ))
Exemplo n.º 3
0
    def testLogitsNotSqueezed(self):
        num_classes = 25
        images = random_ops.random_uniform([1, 224, 224, 3])
        logits, _ = inception_v1.inception_v1(images,
                                              num_classes=num_classes,
                                              spatial_squeeze=False)

        with self.test_session() as sess:
            variables.global_variables_initializer().run()
            logits_out = sess.run(logits)
            self.assertListEqual(list(logits_out.shape),
                                 [1, 1, 1, num_classes])
Exemplo n.º 4
0
    def testBuildClassificationNetwork(self):
        batch_size = 5
        height, width = 224, 224
        num_classes = 1000

        inputs = random_ops.random_uniform((batch_size, height, width, 3))
        logits, end_points = inception_v1.inception_v1(inputs, num_classes)
        self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
        self.assertListEqual(logits.get_shape().as_list(),
                             [batch_size, num_classes])
        self.assertTrue('Predictions' in end_points)
        self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
                             [batch_size, num_classes])
Exemplo n.º 5
0
def predict_images():

    models_path = './logs/model.ckpt-11999'
    images_dir = './test_images'
    labels_txt_file = './dataset/label.txt'

    num_calsses = 5
    resize_height = 224
    resize_width = 224
    channels = 3

    images_list = glob.glob(os.path.join(images_dir,
                                         '*.jpg'))  # 返回匹配路径名模式的路径列表

    # delimiter='\t'表示以空格隔开
    labels = np.loadtxt(
        labels_txt_file, str, delimiter='\t'
    )  # labels = ['flower' 'guitar' 'animal' 'houses' 'plane']
    intput_images = tf.placeholder(
        dtype=tf.float32,
        shape=[None, resize_height, resize_width, channels],
        name='input')

    with slim.arg_scope(inception_v1.inception_v1_arg_scope()):
        out, end_points = inception_v1.inception_v1(inputs=intput_images,
                                                    num_classes=num_calsses,
                                                    dropout_keep_prob=1.0,
                                                    is_training=False)
    score = tf.nn.softmax(out)
    class_id = tf.argmax(score, axis=1)  # 最大score的id值

    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)

        saver = tf.train.Saver()
        saver.restore(sess, models_path)

        for image_name in images_list:
            image = read_image(image_name,
                               resize_height,
                               resize_width,
                               normalization=True)
            image = image[np.newaxis, :]  # 给数据增加一个新的维度
            predict_score, predict_id = sess.run(
                [score, class_id], feed_dict={intput_images: image})
            max_score = predict_score[0, predict_id]  # id相对应的得分(得到的score是二维的)
            print("{} is: label:{},name:{} score: {}".format(
                image_name, predict_id, labels[predict_id], max_score))
Exemplo n.º 6
0
    def testEvaluation(self):
        batch_size = 2
        height, width = 224, 224
        num_classes = 1000

        eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
        logits, _ = inception_v1.inception_v1(eval_inputs,
                                              num_classes,
                                              is_training=False)
        predictions = math_ops.argmax(logits, 1)

        with self.test_session() as sess:
            sess.run(variables.global_variables_initializer())
            output = sess.run(predictions)
            self.assertEquals(output.shape, (batch_size, ))
Exemplo n.º 7
0
    def testUnknownBatchSize(self):
        batch_size = 1
        height, width = 224, 224
        num_classes = 1000

        inputs = array_ops.placeholder(dtypes.float32,
                                       (None, height, width, 3))
        logits, _ = inception_v1.inception_v1(inputs, num_classes)
        self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
        self.assertListEqual(logits.get_shape().as_list(), [None, num_classes])
        images = random_ops.random_uniform((batch_size, height, width, 3))

        with self.test_session() as sess:
            sess.run(variables.global_variables_initializer())
            output = sess.run(logits, {inputs: images.eval()})
            self.assertEquals(output.shape, (batch_size, num_classes))
Exemplo n.º 8
0
 def testUnknownImageShape(self):
     ops.reset_default_graph()
     batch_size = 2
     height, width = 224, 224
     num_classes = 1000
     input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
     with self.test_session() as sess:
         inputs = array_ops.placeholder(dtypes.float32,
                                        shape=(batch_size, None, None, 3))
         logits, end_points = inception_v1.inception_v1(inputs, num_classes)
         self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
         self.assertListEqual(logits.get_shape().as_list(),
                              [batch_size, num_classes])
         pre_pool = end_points['Mixed_5c']
         feed_dict = {inputs: input_np}
         variables.global_variables_initializer().run()
         pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
         self.assertListEqual(list(pre_pool_out.shape),
                              [batch_size, 7, 7, 1024])
Exemplo n.º 9
0
def train(train_tfrecords_file, base_lr, max_steps, val_tfrecords_file,
          num_classes, data_shape, train_log_dir, val_nums):
    """

    :param train_tfrecords_file: 训练数据集的tfrecords文件
    :param base_lr: 学习率
    :param max_steps: 迭代次数
    :param val_tfrecords_file: 验证数据集的tfrecords文件
    :param num_classes: 分类个数
    :param data_shape: 数据形状[batch_size, resize_height, resize_width, channels]
    :param train_log_dir: 模型文件的存放位置
    :return:
    """
    [batch_size, resize_height, resize_width, channels] = data_shape

    # 读取训练数据
    train_images, train_labels = read_tfrecords(train_tfrecords_file,
                                                resize_height,
                                                resize_width,
                                                output_model='normalization')
    train_batch_images, train_batch_labels = get_batch_images(
        train_images,
        train_labels,
        batch_size=batch_size,
        num_classes=num_classes,
        one_hot=True,
        shuffle=True)
    # 读取验证数据,验证数据集可以不用打乱
    val_images, val_labels = read_tfrecords(val_tfrecords_file,
                                            resize_height,
                                            resize_width,
                                            output_model='normalization')
    val_batch_images, val_batch_labels = get_batch_images(
        val_images,
        val_labels,
        batch_size=batch_size,
        num_classes=num_classes,
        one_hot=True,
        shuffle=False)

    with slim.arg_scope(inception_v1.inception_v1_arg_scope()
                        ):  # inception_v1.inception_v1_arg_scope()括号不能掉,表示一个函数
        out, end_points = inception_v1.inception_v1(
            inputs=input_images,
            num_classes=num_classes,
            is_training=is_training,
            dropout_keep_prob=keep_prob)

    loss = tf.losses.softmax_cross_entropy(onehot_labels=input_labels,
                                           logits=out)
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(out, 1), tf.argmax(input_labels, 1)),
                tf.float32)) * 100.0

    optimizer = tf.train.MomentumOptimizer(learning_rate=base_lr,
                                           momentum=0.9)  # 这里可以使用不同的优化函数

    # 在定义训练的时候, 注意到我们使用了`batch_norm`层时,需要更新每一层的`average`和`variance`参数,
    # 正常的训练过程不包括更新,需要我们去手动像下面这样更新
    with tf.control_dependencies(tf.get_collection(
            tf.GraphKeys.UPDATE_OPS)):  # 执行完更新操作之后,再进行训练操作
        train_op = slim.learning.create_train_op(total_loss=loss,
                                                 optimizer=optimizer)

    saver = tf.train.Saver()
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        for steps in np.arange(max_steps):
            input_batch_images, input_batch_labels = sess.run(
                [train_batch_images, train_batch_labels])
            _, train_loss = sess.run(
                [train_op, loss],
                feed_dict={
                    input_images: input_batch_images,
                    input_labels: input_batch_labels,
                    keep_prob: 0.8,
                    is_training: True
                })
            # 得到训练过程中的loss, accuracy值
            if steps % 50 == 0 or (steps + 1) == max_steps:
                train_acc = sess.run(accuracy,
                                     feed_dict={
                                         input_images: input_batch_images,
                                         input_labels: input_batch_labels,
                                         keep_prob: 1.0,
                                         is_training: False
                                     })
                print('Step: %d, loss: %.4f, accuracy: %.4f' %
                      (steps, train_loss, train_acc))

            # 在验证数据集上得到loss, accuracy值
            if steps % 200 == 0 or (steps + 1) == max_steps:
                val_images_batch, val_labels_batch = sess.run(
                    [val_batch_images, val_batch_labels])
                val_loss, val_acc = sess.run(
                    [loss, accuracy],
                    feed_dict={
                        input_images: val_images_batch,
                        input_labels: val_labels_batch,
                        keep_prob: 1.0,
                        is_training: False
                    })
                val_loss, val_acc = evaluation(sess, loss, accuracy,
                                               val_batch_images,
                                               val_batch_labels, val_nums)
                print(
                    '**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **' %
                    (steps, val_loss, val_acc))

            # 每隔2000步储存一下模型文件
            if steps % 2000 == 0 or (steps + 1) == max_steps:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=steps)

        coord.request_stop()
        coord.join(threads)
def train(train_record_file,
          train_log_step,
          train_param,
          val_record_file,
          val_log_step,
          labels_nums,
          data_shape,
          snapshot,
          snapshot_prefix):
    '''
    :param train_record_file: 训练的tfrecord文件
    :param train_log_step: 显示训练过程log信息间隔
    :param train_param: train参数
    :param val_record_file: 验证的tfrecord文件
    :param val_log_step: 显示验证过程log信息间隔
    :param val_param: val参数
    :param labels_nums: labels数
    :param data_shape: 输入数据shape
    :param snapshot: 保存模型间隔
    :param snapshot_prefix: 保存模型文件的前缀名
    :return:
    '''
    [base_lr,max_steps]=train_param
    [batch_size,resize_height,resize_width,depths]=data_shape

    # 获得训练和测试的样本数
    train_nums=get_example_nums(train_record_file)
    val_nums=get_example_nums(val_record_file)
    print('train nums:%d,val nums:%d'%(train_nums,val_nums))

    # 从record中读取图片和labels数据
    # train数据,训练数据一般要求打乱顺序shuffle=True
    train_images, train_labels = read_records(train_record_file, resize_height, resize_width, type='normalization')
    train_images_batch, train_labels_batch = get_batch_images(train_images, train_labels,
                                                              batch_size=batch_size, labels_nums=labels_nums,
                                                              one_hot=True, shuffle=True)
    # val数据,验证数据可以不需要打乱数据
    val_images, val_labels = read_records(val_record_file, resize_height, resize_width, type='normalization')
    val_images_batch, val_labels_batch = get_batch_images(val_images, val_labels,
                                                          batch_size=batch_size, labels_nums=labels_nums,
                                                          one_hot=True, shuffle=False)

    # Define the model:
    with slim.arg_scope(inception_v1.inception_v1_arg_scope()):
        out, end_points = inception_v1.inception_v1(inputs=input_images, num_classes=labels_nums, dropout_keep_prob=keep_prob, is_training=is_training)

    # Specify the loss function: tf.losses定义的loss函数都会自动添加到loss函数,不需要add_loss()了
    tf.losses.softmax_cross_entropy(onehot_labels=input_labels, logits=out)#添加交叉熵损失loss=1.6
    # slim.losses.add_loss(my_loss)
    loss = tf.losses.get_total_loss(add_regularization_losses=False)#添加正则化损失loss=2.2
    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(out, 1), tf.argmax(input_labels, 1)), tf.float32))

    # Specify the optimization scheme:
    # optimizer = tf.train.GradientDescentOptimizer(learning_rate=base_lr)


    # global_step = tf.Variable(0, trainable=False)
    # learning_rate = tf.train.exponential_decay(0.05, global_step, 150, 0.9)
    #
    optimizer = tf.train.MomentumOptimizer(learning_rate=base_lr,momentum= 0.9)
    # # train_tensor = optimizer.minimize(loss, global_step)
    # train_op = slim.learning.create_train_op(loss, optimizer,global_step=global_step)


    # 在定义训练的时候, 注意到我们使用了`batch_norm`层时,需要更新每一层的`average`和`variance`参数,
    # 更新的过程不包含在正常的训练过程中, 需要我们去手动像下面这样更新
    # 通过`tf.get_collection`获得所有需要更新的`op`
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    # 使用`tensorflow`的控制流, 先执行更新算子, 再执行训练
    with tf.control_dependencies(update_ops):
        # create_train_op that ensures that when we evaluate it to get the loss,
        # the update_ops are done and the gradient updates are computed.
        # train_op = slim.learning.create_train_op(total_loss=loss,optimizer=optimizer)
        train_op = slim.learning.create_train_op(total_loss=loss, optimizer=optimizer)


    # 循环迭代过程
    step_train(train_op, loss, accuracy,
               train_images_batch, train_labels_batch, train_nums, train_log_step,
               val_images_batch, val_labels_batch, val_nums, val_log_step,
               snapshot_prefix, snapshot)