Example #1
0
    SIZE = 128
    learningRate = 0.001
    epoch = 10000
    disp = 10
    batchSize = 200
    classNum = 2
    logs_path = "C:/tf_log/dog_cat/"
    keepprob = 0.90

    with tf.name_scope("model"):
        imgBatch, labelBatch = inputBatch("./data/train.tfrecords", batchSize,
                                          SIZE)
        labelBatch = tf.one_hot(labelBatch, depth=2, on_value=1, off_value=0)
        alexnet = AlexNet(classNum, keepprob)
        pred = alexnet.createNetwork2(imgBatch)

    with tf.name_scope("loss"):
        vars = tf.trainable_variables()
        lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in vars]) * 0.0001
        cost = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(
                logits=pred, labels=labelBatch)) + lossL2
        optimizer = tf.train.AdamOptimizer(
            learning_rate=learningRate).minimize(cost)
        tf.summary.scalar("loss", cost)

    with tf.name_scope("eval"):
        correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(labelBatch, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
        tf.summary.scalar("acc", accuracy)
Example #2
0
SIZE = 128
batchSize = 100
classNum = 2
keepprob = 0.95
pictureNum = 12500

csvfile = open('./result/csv_test3.csv', 'w', newline='')
writer = csv.writer(csvfile)
writer.writerow(['id', 'label'])

with tf.name_scope("model"):

    img = inputNoShuffle("./data/test.tfrecords", SIZE)
    alexnet = AlexNet(classNum, keepprob)

    pred = alexnet.createNetwork2(img, testFlag=True)
    ans = tf.argmax(pred, axis=1)

with tf.Session() as sess:

    saver = tf.train.Saver()
    saver.restore(sess, "./model_save/model5.ckpt")

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)

    for i in range(pictureNum):
        ansArr, predArr = sess.run([ans, pred])
        print(ansArr, predArr)
        writer.writerow([str(i + 1), float(predArr[0, 1])])