def inference(inputs, input_units, output_units, is_train=True):
  """
  Define the model by model name.
  
  Return:
    The logit of the model output.
  """

  if FLAGS.model == "dnn":
    return model.dnn_inference(inputs, input_units, output_units, is_train,
                               FLAGS)
  elif FLAGS.model == "lr":
    return model.lr_inference(inputs, input_units, output_units, is_train,
                              FLAGS)
  elif FLAGS.model == "wide_and_deep":
    return model.wide_and_deep_inference(inputs, input_units, output_units,
                                         is_train, FLAGS)
  elif FLAGS.model == "customized":
    return model.customized_inference(inputs, input_units, output_units,
                                      is_train, FLAGS)
  elif FLAGS.model == "cnn":
    return model.cnn_inference(inputs, input_units, output_units, is_train,
                               FLAGS)
  elif FLAGS.model == "customized_cnn":
    return model.customized_cnn_inference(inputs, input_units, output_units,
                                          is_train, FLAGS)
  elif FLAGS.model == "lstm":
    return model.lstm_inference(inputs, input_units, output_units, is_train,
                                FLAGS)
  elif FLAGS.model == "bidirectional_lstm":
    return model.bidirectional_lstm_inference(inputs, input_units,
                                              output_units, is_train, FLAGS)
  elif FLAGS.model == "gru":
    return model.gru_inference(inputs, input_units, output_units, is_train,
                               FLAGS)
def inference(inputs, input_units, output_units, is_train=True):
  """
  Define the model by model name.
  
  Return:
    The logit of the model output.
  """

  if FLAGS.model == "dnn":
    return model.dnn_inference(inputs, input_units, output_units, is_train,
                               FLAGS)
  elif FLAGS.model == "lr":
    return model.lr_inference(inputs, input_units, output_units, is_train,
                              FLAGS)
  elif FLAGS.model == "wide_and_deep":
    return model.wide_and_deep_inference(inputs, input_units, output_units,
                                         is_train, FLAGS)
  elif FLAGS.model == "customized":
    return model.customized_inference(inputs, input_units, output_units,
                                      is_train, FLAGS)
  elif FLAGS.model == "cnn":
    return model.cnn_inference(inputs, input_units, output_units, is_train,
                               FLAGS)
  elif FLAGS.model == "customized_cnn":
    return model.customized_cnn_inference(inputs, input_units, output_units,
                                          is_train, FLAGS)
  elif FLAGS.model == "lstm":
    return model.lstm_inference(inputs, input_units, output_units, is_train,
                                FLAGS)
  elif FLAGS.model == "bidirectional_lstm":
    return model.bidirectional_lstm_inference(inputs, input_units,
                                              output_units, is_train, FLAGS)
  elif FLAGS.model == "gru":
    return model.gru_inference(inputs, input_units, output_units, is_train,
                               FLAGS)
示例#3
0
def evaluate_one_image():
    '''Test one image against the saved models and parameters
    '''

    # you need to change the directories to yours.
    # train_dir = 'D:/python/deep-learning/CatVsDog/Project/test_image/'
    train_dir = 'D:/python/deep-learning/CatVsDog/Project/test_image/'
    train, train_label = input_data.get_files(train_dir)
    image_array = get_one_image(train)

    with tf.Graph().as_default():
        BATCH_SIZE = 1
        N_CLASSES = 2

        image = tf.cast(image_array, tf.float32)
        image = tf.image.per_image_standardization(image)
        image = tf.reshape(image, [1, 208, 208, 3])
        logit = model.cnn_inference(image, BATCH_SIZE, N_CLASSES)

        logit = tf.nn.softmax(logit)

        x = tf.placeholder(tf.float32, shape=[208, 208, 3])

        # you need to change the directories to yours.
        logs_train_dir = 'D:/python/deep-learning/CatVsDog/Project/log/'

        saver = tf.train.Saver()

        with tf.Session() as sess:

            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(logs_train_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')

            prediction = sess.run(logit, feed_dict={x: image_array})
            max_index = np.argmax(prediction)
            if max_index == 0:
                print('This is a cat with possibility %.6f' % prediction[:, 0])
            else:
                print('This is a dog with possibility %.6f' % prediction[:, 1])
    plt.imshow(image_array)
    plt.show()
示例#4
0
IMG_W = 208  # resize图像,太大的话训练时间久
IMG_H = 208
BATCH_SIZE = 16
CAPACITY = 2000
MAX_STEP = 5000  # 一般5K~10k
learning_rate = 0.0001  # 一般小于0.0001

train_dir = 'F:/Data/train2/'
logs_train_dir = 'F:/Data/doglog/'  # 记录训练过程与保存模型

train, train_label = input_data.get_files(train_dir)
train_batch, train_label_batch = input_data.get_batch(train, train_label,
                                                      IMG_W, IMG_H, BATCH_SIZE,
                                                      CAPACITY)

train_logits = model.cnn_inference(train_batch, BATCH_SIZE, N_CLASSES)
train_loss = model.losses(train_logits, train_label_batch)
train_op = model.training(train_loss, learning_rate)
train__acc = model.evaluation(train_logits, train_label_batch)

summary_op = tf.summary.merge_all()  # 这个是log汇总记录

# 可视化为了画折线图
step_list = list(range(100))  # 因为后来的cnn_list加了200个
cnn_list1 = []
cnn_list2 = []
fig = plt.figure()  # 建立可视化图像框
ax = fig.add_subplot(1, 1, 1)  # 子图总行数、列数,位置
ax.yaxis.grid(True)
ax.set_title('cnn_accuracy ', fontsize=14, y=1.02)
ax.set_xlabel('step')
示例#5
0
def train():

    tf.reset_default_graph()
    sess = tf.Session()

    keep_prob_ = tf.placeholder(tf.float32, name='keep')
    learning_rate_ = tf.placeholder(tf.float32, name='learning_rate')

    inputs, labels, total_count = dataset.csv_inputs(batch_size, epochs,
                                                     n_classes, n_channels,
                                                     seq_len, trial)
    inputs = tf.cast(inputs, tf.float32)
    labels = tf.cast(labels, tf.float32)
    total_count = tf.cast(total_count, tf.float32)
    logits = model.cnn_inference(inputs, keep_prob_, n_classes)

    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,
                                                   labels=labels))
    tf.summary.scalar("cost", cost)

    train_op = tf.train.AdamOptimizer(learning_rate_)
    gradients = train_op.compute_gradients(cost)
    capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var)
                        for grad, var in gradients]
    optimizer = train_op.apply_gradients(capped_gradients)

    correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
    lg = tf.argmax(logits, 1)
    ll = tf.argmax(labels, 1)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.summary.scalar("accuracy", accuracy)

    summ = tf.summary.merge_all()
    saver = tf.train.Saver()

    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    sess.run(init_op)

    #############################################################################
    saver.restore(sess, tf.train.latest_checkpoint(save_path))
    #############################################################################

    writer_train = tf.summary.FileWriter(save_path + 'train_accuracy/',
                                         sess.graph)

    print("epoch looping")
    index = 0

    # Feed dictionary
    feed = {keep_prob_: keep_prob, learning_rate_: learning_rate}
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    e = 0
    loss_pre = 1000

    try:
        while not coord.should_stop():
            index += 1
            logits_val, labels_val, loss, _, acc, s_t, row_count = sess.run(
                [lg, ll, cost, optimizer, accuracy, summ, total_count],
                feed_dict=feed)

            writer_train.add_summary(s_t, index)
            train_acc.append(acc)
            train_loss.append(loss)

            if index % np.floor(row_count / batch_size) == 0:
                e += 1

            if loss < loss_pre:
                saver.save(sess, save_path + 'save.ckpt')
                loss_pre = loss

            if loss < 0.0000001 and acc == 1:
                print("Epoch: {}/{}".format(e, epochs),
                      "Iteration: {:d}".format(index),
                      "Train loss: {:.10f}".format(loss),
                      "Train acc: {:.4f}".format(acc))
                saver.save(sess, save_path + 'save.ckpt')
                break

            # Print at each 1000 iterations
            if index % 100 == 0:
                print("Epoch: {}/{}".format(e, epochs),
                      "Iteration: {:d}".format(index),
                      "Train loss: {:.10f}".format(loss),
                      "Train acc: {:.4f}".format(acc))

    except tf.errors.OutOfRangeError:
        print('epoch reached!')
    finally:
        print("Epoch: {}/{}".format(e,
                                    epochs), "Iteration: {:d}".format(index),
              "Final train loss: {:.10f}".format(loss_pre))
        coord.request_stop()
        coord.join(threads)

    sess.close()
示例#6
0
def test():

    t = time.time()
    tf.reset_default_graph()
    sess = tf.Session()

    keep_prob_ = tf.placeholder(tf.float32, name='keep')
    learning_rate_ = tf.placeholder(tf.float32, name='learning_rate')

    inputs, labels, trial_num, total_count = dataset.csv_test(
        batch_size, n_classes, n_channels, seq_len, trial)
    inputs = tf.cast(inputs, tf.float32)
    labels = tf.cast(labels, tf.float32)
    total_count = tf.cast(total_count, tf.uint16)

    logits = model.cnn_inference(inputs, keep_prob_, n_classes)

    correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,
                                                   labels=labels))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.summary.scalar("accuracy", accuracy)

    saver = tf.train.Saver()
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    sess.run(init_op)
    saver.restore(sess, tf.train.latest_checkpoint(save_path))

    print("epoch looping")
    index = 0

    feed = {keep_prob_: 1, learning_rate_: 1}
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    elapsed = time.time() - t
    t1 = time.time()

    try:
        while not coord.should_stop():
            index += 1
            batch_acc, batch_loss, batch_logits, batch_labels, total_counts = sess.run(
                [accuracy, cost, logits, labels, total_count], feed_dict=feed)

            probabilities.append(
                tf.nn.softmax(batch_logits).eval(session=sess))
            test_labels.append(tf.argmax(batch_labels, 1).eval(session=sess))
            predictions.append(
                tf.argmax(tf.nn.softmax(batch_logits), 1).eval(session=sess))
            test_acc.append(batch_acc)
            test_loss.append(batch_loss)

            print(
                "Iteration: {}/{}".format(
                    index,
                    np.floor(total_counts / batch_size).astype(int)),
                "Batch test accuracy: {:.6f}".format(batch_acc))

    except tf.errors.OutOfRangeError:
        print('epoch reached!')
    finally:
        coord.request_stop()
        coord.join(threads)

    elapsed1 = time.time() - t1
    clabels = np.concatenate(test_labels, axis=0)
    cpredictions = np.concatenate(predictions, axis=0)
    cprobabilities = np.concatenate(probabilities, axis=0)
    confusion_matrix = tf.confusion_matrix(
        labels=clabels, predictions=cpredictions).eval(session=sess)
    sess.close()

    print("Mean test accuracy: {:.6f}".format(np.mean(test_acc)))

    df = pd.DataFrame(confusion_matrix)
    df.to_csv(result_path + 'confusion_matrix.csv')
    df1 = pd.DataFrame(clabels)
    df1.to_csv(result_path + 'labels.csv', header=None, index=None)
    df2 = pd.DataFrame(cpredictions)
    df2.to_csv(result_path + 'predictions.csv', header=None, index=None)
    df3 = pd.DataFrame(test_acc)
    df3.to_csv(result_path + 'test_acc.csv', header=None, index=None)
    df4 = pd.DataFrame(cprobabilities)
    df4.to_csv(result_path + 'probabilities.csv', header=None, index=None)
    df5 = pd.DataFrame(test_loss)
    df5.to_csv(result_path + 'test_loss.csv', header=None, index=None)
    df6 = pd.DataFrame([
        'CNN', trial,
        np.mean(test_acc),
        np.mean(test_loss), elapsed, elapsed1 / index
    ])
    df6.to_csv(result_path + 'result.csv', header=None, index=None)

    # rows_sums = confusion_matrix.sum(axis=1)
    # normalised_confusion_matrix = confusion_matrix/rows_sums[:, np.newaxis]
    print(confusion_matrix)
    print(trial)
    print('CNN')
    print('mean test accuracy: ', np.mean(test_acc))
    print('mean test loss: ', np.mean(test_loss))
    print('T_model: ', elapsed)
    print('T_batch: ', elapsed1 / index)
示例#7
0
#一个batch_size,是继续(True)输出还是丢弃(False)。
train_dataset = train_dataset.shuffle(100).batch(BATCH_SIZE, drop_remainder = False).repeat()

#dataset实际也是一个生成器,所以要生成一个iterator来迭代获取其中的数据
train_iterator = tf.data.make_one_shot_iterator(train_dataset)

train_handle = tf.placeholder(tf.string, shape = [])
iterator_from_train_handle = tf.data.Iterator.from_string_handle(
	train_handle, train_dataset.output_types, train_dataset.output_shapes)
#关键一步,从迭代器中生成下一批数据
next_train_data, next_train_label = iterator_from_train_handle.get_next()

#定义占位符
x = tf.placeholder(tf.float32, shape = [None, IMAGE_H, IMAGE_W, CHANNELS])
#计算前向传播
y_pred = model.cnn_inference(inputs = x, num_classes = N_CLASSES)

y_true = tf.placeholder(tf.int64, shape = [None])
train_loss = model.losses(logits = y_pred, labels = y_true)
train_accuracy = model.evaluation(logits = y_pred, labels = y_true)
train_op = model.training(train_loss, learning_rate = 0.0001)

#summary_op = tf.summary.merge_all()

init_op = tf.global_variables_initializer()

#定义一个字典,来储存过程中的损失值和准确率
history = {}
history["train_loss"] = []
history["train_acc"] = []