Exemplo n.º 1
0
            # for train_index in range(len(train_out)):
            #     if train_out[train_index] == train_l[train_index]:
            #         tr_acc.append(1)
            #     else:
            #         tr_acc.append(0)
            #         # misc.imsave('/Users/wywy/Desktop/train_e1'+'/'+str(chioce_dict.get(train_out[train_index]))+'_'+bytes.decode(tarin_file[train_index]),train_img.reshape([-1,64,64]) [train_index])
            #
            # train_acc = np.mean(np.array(tr_acc))
            #
            # print('train iter :{},  train loss:{},  train acc:{}'.format(i,train_loss,train_acc))
            # # # #

            if i % 100 == 0:
                test_img, test_label1, test_name = sess.run(test_data)

                test_label = one_hot(test_label1.tolist(), 5)
                test_img1 = test_img / 255 - 0.5

                test_loss, test_acc, test_out, test_l = sess.run(
                    [net.loss, net.acc, net.argamx_out, net.argamx_label],
                    feed_dict={
                        net.x: test_img1,
                        net.y_: test_label
                    })

                tes_acc = []
                for test_index in range(len(test_out)):
                    if test_out[test_index] == test_l[test_index]:
                        tes_acc.append(1)
                    else:
                        tes_acc.append(0)
Exemplo n.º 2
0
def train(num_batches, batch_size, learning_rate, save_dir=None):
    # Build placeholders for the input samples and labels
    # 创建输入样本和标签的占位符
    width_multiplier = 1
    inputs = tf.placeholder(tf.float32, [None, 64, 64, 1], name='input')
    labels = tf.placeholder(tf.float32, [None, 5])

    # Add placeholder to indicate whether or not we're training the model
    # 创建占位符表明当前是否正在训练模型
    is_training = tf.placeholder(tf.bool, name='is_training')

    # Feed the inputs into a series of 20 convolutional layers
    # 把输入数据填充到一系列20个卷积层的神经网络中
    layer = inputs
    # for layer_i in range(1, 20):
    #     layer = conv_layer(layer, layer_i, is_training)
    #
    # # Flatten the output from the convolutional layers
    # # 将卷积层输出扁平化处理
    # orig_shape = layer.get_shape().as_list()
    # layer = tf.reshape(layer, shape=[-1, orig_shape[1]*orig_shape[2]*orig_shape[3]])
    #
    # # Add one fully connected layer
    # # 添加一个具有100个神经元的全连接层
    # layer = fully_connected(layer, 100, is_training)
    #
    # # Create the output layer with 1 node for each
    # # 为每一个类别添加一个输出节点
    # logits = tf.layers.dense(layer, 5)

    # self.is_training = tf.placeholder(tf.bool, [])
    net = conv2d(layer,
                 "conv_1",
                 round(32 * width_multiplier),
                 filter_size=3,
                 strides=2)  # ->
    net = tf.nn.relu(
        tf.layers.batch_normalization(net,
                                      name="conv_1/bn",
                                      training=is_training))  # NB+RELU
    net = _depthwise_separable_conv2d(
        net, 10, width_multiplier, "ds_conv_2",
        istraining=is_training)  # ->[N, 112, 112, 64]
    net = _depthwise_separable_conv2d(
        net,
        16,
        width_multiplier,
        "ds_conv_3",
        downsample=True,
        istraining=is_training)  # ->[N, 56, 56, 128]   #32,32
    net = _depthwise_separable_conv2d(
        net, 16, width_multiplier, "ds_conv_4",
        istraining=is_training)  # ->[N, 56, 56, 128]
    net = _depthwise_separable_conv2d(
        net,
        32,
        width_multiplier,
        "ds_conv_5",
        downsample=True,
        istraining=is_training)  # ->[N, 28, 28, 256]   #16,16
    net = _depthwise_separable_conv2d(
        net, 32, width_multiplier, "ds_conv_6",
        istraining=is_training)  # ->[N, 28, 28, 256]
    net = _depthwise_separable_conv2d(
        net,
        64,
        width_multiplier,
        "ds_conv_7",
        downsample=True,
        istraining=is_training)  # ->[N, 14, 14, 512]   #8,8
    net = _depthwise_separable_conv2d(
        net, 64, width_multiplier, "ds_conv_8",
        istraining=is_training)  # ->[N, 14, 14, 64]
    net = _depthwise_separable_conv2d(
        net, 64, width_multiplier, "ds_conv_9",
        istraining=is_training)  # ->[N, 14, 14, 64】
    net = _depthwise_separable_conv2d(net,
                                      64,
                                      width_multiplier,
                                      "ds_conv_10",
                                      istraining=is_training)
    net = _depthwise_separable_conv2d(net,
                                      64,
                                      width_multiplier,
                                      "ds_conv_11",
                                      istraining=is_training)
    net = _depthwise_separable_conv2d(
        net, 64, width_multiplier, "ds_conv_12",
        istraining=is_training)  # ->[N, 14, 14, 64]
    net = _depthwise_separable_conv2d(
        net,
        128,
        width_multiplier,
        "ds_conv_13",
        downsample=True,
        istraining=is_training)  # ->[N, 7, 7, 128]   #4,4
    net = _depthwise_separable_conv2d(
        net, 128, width_multiplier, "ds_conv_14",
        istraining=is_training)  # ->[N, 7, 7, 1024]
    net = avg_pool(net, 2, "avg_pool_15")  # ->[N, 1, 1, 128]
    net = tf.squeeze(
        net, [1, 2],
        name="SpatialSqueeze")  # 去掉维度为1的维[N, 1, 1, 128] => [N,128]
    logits = fc(net, 5, "fc_16")  # -> [N, 5]
    predictions = tf.nn.softmax(logits)

    # Define loss and training operations
    # 定义loss 函数和训练操作
    model_loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits_v2(logits=predictions,
                                                   labels=labels))

    # Tell TensorFlow to update the population statistics while training
    # 通知Tensorflow在训练时要更新均值和方差的分布
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)

    # Create operations to test accuracy
    # 创建计算准确度的操作
    arg_out = tf.reshape(tf.argmax(predictions, 1), [-1, 1], name='output')
    correct_prediction = tf.equal(tf.argmax(predictions, 1),
                                  tf.argmax(labels, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    # Train and test the network
    # 训练并测试网络模型

    train_data = train_shuffle_batch(train_filename, [64, 64, 1], 128)
    test_data = test_shuffle_batch(test_filename, [64, 64, 1], 30)

    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord, sess=sess)
        saver.restore(sess, './save/test_model.dpk')
        for batch_i in range(num_batches):
            # batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            batch_xs, batch_ys, tarin_file = sess.run(train_data)
            batch_xs = batch_xs / 255 - 0.5
            # batch_xs=[cv2.resize(pic, (28, 28), interpolation=cv2.INTER_CUBIC).reshape([28,28,1]) for pic in batch_xs]

            batch_ys = one_hot(batch_ys.tolist(), 5)
            # train this batch
            # 训练样本批次
            sess.run(train_opt, {
                inputs: np.array(batch_xs),
                labels: batch_ys,
                is_training: True
            })

            # Periodically check the validation or training loss and accuracy
            # 定期检查训练或验证集上的loss和精确度
            if batch_i % 100 == 0:
                test_img, test_label1, test_name = sess.run(test_data)

                test_label = one_hot(test_label1.tolist(), 5)

                test_img1 = test_img / 255 - 0.5
                for tt, test_image in enumerate(test_img1):

                    # test_image1 = cv2.resize(test_image, (28, 28), interpolation=cv2.INTER_CUBIC).reshape([-1,28,28,1])
                    test_image1 = test_image.reshape([-1, 64, 64, 1])

                    test_label11 = test_label[tt].reshape([-1, 5])

                    loss, acc, test_out = sess.run(
                        [model_loss, accuracy, arg_out], {
                            inputs: test_image1,
                            labels: test_label11,
                            is_training: False
                        })
                    print(
                        'Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'
                        .format(batch_i, loss, acc))
                    print('label:{},out:{}'.format(test_label1[tt], test_out))
            elif batch_i % 25 == 0:
                loss, acc = sess.run([model_loss, accuracy], {
                    inputs: batch_xs,
                    labels: batch_ys,
                    is_training: False
                })
                print(
                    'Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'
                    .format(batch_i, loss, acc))
            if save_dir and batch_i % 1000 == 0:
                saver.save(sess, save_dir)
                print('modle save at ./save/test_model.dpk' 'save succuccy!!')
                graph_def = tf.get_default_graph().as_graph_def()
                output_graph_def = tf.graph_util.convert_variables_to_constants(
                    sess, graph_def, ['output'])

                with tf.gfile.GFile("./mobile222.pb", 'wb') as f:
                    f.write(output_graph_def.SerializeToString())
Exemplo n.º 3
0
    test_data = test_shuffle_batch(test_filename, [64, 64, 1], 1)

    init = tf.global_variables_initializer()
    saver = tf.train.Saver()
    x = []
    y = []
    chioce_dict = dict(zip([0, 1, 2, 3, 4], list('ABCDX')))
    with tf.Session() as sess:
        sess.run(init)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord, sess=sess)
        saver.restore(sess, './save/test_model.dpk')
        for i in range(100000):
            train_img, train_label, tarin_file = sess.run(train_data)
            train_img1 = train_img / 255 - 0.5
            train_label = one_hot(train_label.tolist(), 5)

            _, train_loss, train_acc, train_out, train_l = sess.run(
                [net.opt, net.loss, net.acc, net.argamx_out, net.argamx_label],
                feed_dict={
                    net.x: train_img1,
                    net.y_: train_label
                })

            tr_acc = []
            for train_index in range(len(train_out)):
                if train_out[train_index] == train_l[train_index]:
                    tr_acc.append(1)
                else:
                    tr_acc.append(0)
                    # misc.imsave('/Users/wywy/Desktop/train_e1'+'/'+str(chioce_dict.get(train_out[train_index]))+'_'+bytes.decode(tarin_file[train_index]),train_img.reshape([-1,64,64]) [train_index])