예제 #1
0
파일: train.py 프로젝트: ashhher3/Kaggle
def main():
    imgs = tf.placeholder(tf.float32, [None, IMAGE_WIDTH * IMAGE_HEIGHT])
    keys = tf.placeholder(tf.float32, [None, N_CLASS])

    train_model = create_CNN(imgs, Weight_Dicts, Biases_Dict, Dropout_Dict)
    # Define loss and optimizer 
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(train_model, keys))
    optimizer = tf.train.AdamOptimizer(0.01).minimize(cost)

    # Evaluate the train model
    correct_model = tf.equal(tf.argmax(train_model, 1), tf.argmax(keys, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_model, tf.float32))

    init = tf.initialize_all_variables()
    
    obj = DataSet()
    obj.load()
    with tf.Session() as sess:
        sess.run(init)
        step = 1
        while step * BATCH_SIZE < TRAINING_ITERS:
            batch_imgs, batch_keys = obj.next_batch(BATCH_SIZE)
            sess.run(optimizer, feed_dict = {imgs : batch_imgs, keys : batch_keys})
            if step % DISPLAY_STEP == 0:
                acc = sess.run(accuracy, feed_dict = {imgs : batch_imgs, keys : batch_keys})
                loss = sess.run(cost, feed_dict = {imgs : batch_imgs, keys : batch_keys})
                print "Iter " + str(step * BATCH_SIZE) + ", MiniBatch Loss = " + "{:.6f}".format(loss) + ", Training Accuracy = " + "{:.5f}".format(acc)
            step  = step + 1
예제 #2
0
파일: train.py 프로젝트: ashhher3/Kaggle
def main():
    imgs = tf.placeholder(tf.float32, [None, IMAGE_WIDTH * IMAGE_HEIGHT])
    keys = tf.placeholder(tf.float32, [None, N_CLASS])

    train_model = create_CNN(imgs, Weight_Dicts, Biases_Dict, Dropout_Dict)
    # Define loss and optimizer
    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(train_model, keys))
    optimizer = tf.train.AdamOptimizer(0.01).minimize(cost)

    # Evaluate the train model
    correct_model = tf.equal(tf.argmax(train_model, 1), tf.argmax(keys, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_model, tf.float32))

    init = tf.initialize_all_variables()

    obj = DataSet()
    obj.load()
    with tf.Session() as sess:
        sess.run(init)
        step = 1
        while step * BATCH_SIZE < TRAINING_ITERS:
            batch_imgs, batch_keys = obj.next_batch(BATCH_SIZE)
            sess.run(optimizer, feed_dict={imgs: batch_imgs, keys: batch_keys})
            if step % DISPLAY_STEP == 0:
                acc = sess.run(accuracy,
                               feed_dict={
                                   imgs: batch_imgs,
                                   keys: batch_keys
                               })
                loss = sess.run(cost,
                                feed_dict={
                                    imgs: batch_imgs,
                                    keys: batch_keys
                                })
                print "Iter " + str(
                    step *
                    BATCH_SIZE) + ", MiniBatch Loss = " + "{:.6f}".format(
                        loss) + ", Training Accuracy = " + "{:.5f}".format(acc)
            step = step + 1
예제 #3
0
with tf.name_scope("accuracy"):
    test_acc = call_precision(yl, yt, isCNN=True)
    acc_sum_1 = tf.summary.scalar('test_acc', test_acc)

    train_acc = call_precision(yl, yt, isCNN=True)
    acc_sum_2 = tf.summary.scalar('training_acc', train_acc)

sess = tf.InteractiveSession()
tf.random.set_random_seed(SEED)
tf.global_variables_initializer().run()
writer = tf.summary.FileWriter('logs', sess.graph)
no_epoch = 100000
for i in range(no_epoch):
    # breakpoint()
    xl_batch, yt_batch, xl_batch_index = l_dataset.next_batch(batch_size)
    xuindex_batch, _, xuindex_batch_index = uindex_dataset.random_batch(
        batch_size)
    xu_batch = samples[xuindex_batch]
    xn_batch_index = neighborIndex[xl_batch_index, :k_of_knn]  # .flatten()
    xr_batch_index = remoteIndex[xl_batch_index, :m_of_knn]
    xun_batch_index = neighborIndex[xuindex_batch_index +
                                    labeled_data_size, :k_of_knn]  # .flatten()
    xur_batch_index = remoteIndex[xuindex_batch_index +
                                  labeled_data_size, :m_of_knn]
    wl_n = RBF_matrix[xl_batch_index.reshape(batch_size, 1),
                      xn_batch_index].reshape(-1, k_of_knn,
                                              1)  # 该reshape是必要的,不能去掉
    wl_r = RBF_matrix[xl_batch_index.reshape(batch_size, 1),
                      xr_batch_index].reshape(-1, m_of_knn, 1)
    wu_n = RBF_matrix[(xuindex_batch_index +