コード例 #1
0
def runInference():
    data, names = dataloader.loadTestData('data_part1/test/')
    #rdata = np.reshape(data, (len(data), 77*71))/255
    rdata = np.reshape(
        data, (len(data), IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CHANNELS)) / 255
    output_path = "tfproject4.txt"

    output = open(output_path, "w")
    with tf.Session(graph=graph) as session:
        s = tf.train.Saver().restore(session, "models/tf-project4/model.ckpt")
        print("model loaded")
        for i in range(len(data)):

            ret = session.run([result],
                              feed_dict={
                                  X: np.array([rdata[i]]),
                                  is_train: 0
                              })
            output.write("{} {}\n".format(names[i], ret[0][0]))
            # cv2.imshow(names[i], data[i])
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()
            #exit()
    output.close()
    print("output saved file: " + output_path)
コード例 #2
0
def runLogisticRegression():
    w = np.load("models/logisticRegression/w.npy")
    b = np.load("models/logisticRegression/b.npy")

    data, names = dataloader.loadTestData('data_part1/test/')
    data = np.reshape(data, (len(data), 77 * 71)) / 255
    for i in range(len(data)):
        y_ = np.dot(data[i], w) + b
        shot = np.argmax(y_)

        print("{} {}".format(names[i], shot))
コード例 #3
0
def runMlp():
    import mlp

    wj = np.load("models/mlp/wj.npy")
    bj = np.load("models/mlp/bj.npy")
    wk = np.load("models/mlp/wk.npy")
    bk = np.load("models/mlp/bk.npy")

    data, names = dataloader.loadTestData('data_part1/test/')
    data = np.reshape(data, (len(data), 77 * 71)) / 255

    for i in range(len(data)):
        print("{} {}".format(names[i], mlp.inference(data[i], wj, bj, wk, bk)))


#runMlp()
#runLogisticRegression()
#runMlpTF()
コード例 #4
0
    dropout = tf.layers.dropout(fc, 0.4, training=is_train)

    y = tf.placeholder(tf.int64, shape=(None, ))
    y_one_hot = tf.one_hot(y, 10)
    learning_rate = tf.placeholder(tf.float32)

    out = tf.layers.dense(dropout, 10, activation=tf.nn.sigmoid)

    loss = tf.reduce_mean(tf.reduce_sum((y_one_hot - out)**2))
    train_op = tf.train.GradientDescentOptimizer(
        learning_rate=learning_rate).minimize(loss)

    result = tf.argmax(out, 1)
    correct = tf.reduce_sum(tf.cast(tf.equal(result, y), tf.float32))

data, names = dataloader.loadTestData(sys.argv[1])
#rdata = np.reshape(data, (len(data), 77*71))/255
rdata = np.reshape(data,
                   (len(data), IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CHANNELS)) / 255
output = open(sys.argv[2], "w")

with tf.Session(graph=graph) as session:
    s = tf.train.Saver().restore(session, "model/model.ckpt")
    for i in range(len(data)):
        ret = session.run([result],
                          feed_dict={
                              X: np.array([rdata[i]]),
                              is_train: 0
                          })
        output.write("{} {}\n".format(names[i], ret[0][0]))
    output.close()