Esempio n. 1
0
def train():
    with tf.name_scope('input'):
        # train_image_batch, train_labels_batch = input.read_cifar10(TRAIN_PATH, batch_size=BATCH_SIZE)
        train_image_batch, train_labels_batch = input.read_and_decode_by_tfrecorder(TRAIN_PATH, BATCH_SIZE)
        test_image_batch,test_labels_batch = input.read_and_decode_by_tfrecorder(TEST_PATH,BATCH_SIZE)
        print(train_image_batch)
        print(train_labels_batch)
        # show = cv2.imshow('test',train_image_batch[0])
        # wait = cv2.waitKeyEx()

        #logits = alex_net.alex_net(train_image_batch, NUM_CLASS)
        # logits = fcn_net.fcn_net(train_image_batch,NUM_CLASS)
        logits = cifar_net.inference(train_image_batch, batch_size=BATCH_SIZE, n_classes=NUM_CLASS,name="train")
        # logits = VGG.VGG16N(train_image_batch,n_classes=NUM_CLASS,is_pretrain=False)
        #logits = mnistnet.net(train_image_batch,num_class=NUM_CLASS)
        print(logits)
        loss = function.loss(logits=logits, labels=train_labels_batch)
        accuracy_logits = cifar_net.inference(test_image_batch,batch_size=BATCH_SIZE,n_classes=NUM_CLASS,name="test")
        accuracy = function.accuracy(logits=accuracy_logits, labels=test_labels_batch)

        my_global_step = tf.Variable(0, name='global_step')
        train_op = function.optimize(loss=loss, learning_rate=LEARNING_RATE, global_step=my_global_step)

        saver = tf.train.Saver(tf.global_variables())
        summary_op = tf.summary.merge_all()

        init = tf.global_variables_initializer()
        sess = tf.Session()
        sess.run(init)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)

    try:
        for step in np.arange(MAX_STEP):

            if coord.should_stop():
                break

            _, train_loss, train_accuracy = sess.run([train_op, loss, accuracy])
            #print('***** Step: %d, loss: %.4f *****' % (step, train_loss))
            if (step % 50 == 0) or (step == MAX_STEP):
                print('***** Step: %d, loss: %.4f' % (step, train_loss))
                summary_str = sess.run(summary_op)
                tra_summary_writer.add_summary(summary_str, step)
            if (step % 200 == 0) or (step == MAX_STEP):
                print('***** Step: %d, loss: %.4f, accuracy: %.4f%% *****' % (step, train_loss, train_accuracy))
                summary_str = sess.run(summary_op)
                tra_summary_writer.add_summary(summary_str, step)
            if step % 2000 == 0 or step == MAX_STEP:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
    except tf.errors.OutOfRangeError:
        print('error')
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
Esempio n. 2
0
def eval():
    image_batch, label_batch = input.read_and_decode_by_tfrecorder(
        TEST_PATH, TEST_BATCH_SIZE, False)
    logits = ENnet.inference(image_batch, TEST_BATCH_SIZE, NUME_CLASS)
    accuracy = function.accuracy(logits, label_batch)
    labels = tf.argmax(label_batch, 1)
    results = tf.argmax(logits, 1)
    saver = tf.train.Saver()

    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        thread = tf.train.start_queue_runners(coord=coord)
        ckpt = tf.train.get_checkpoint_state(RESULT_PATH)
        i = 0
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            try:
                while not coord.should_stop() and i < 1:
                    image, label, result = sess.run(
                        [image_batch, labels, results])
                    plot_images(image, label, result)
                    acc = sess.run(accuracy)
                    print("%.4f%%" % acc)
                    i += 1
            except tf.errors.OutOfRangeError:
                print("out of range eroor")
            finally:
                coord.request_stop()
            coord.join(threads=thread)
            sess.close()
Esempio n. 3
0
def train():
    with tf.name_scope('input'):
        # train_image_batch, train_labels_batch = input.read_cifar10(TRAIN_PATH, batch_size=BATCH_SIZE)
        train_image_batch, train_labels_batch = input.read_and_decode_by_tfrecorder(
            TRAIN_PATH, BATCH_SIZE)
        # test_image_batch, test_labels_batch = input.read_and_decode_by_tfrecorder(TEST_PATH, BATCH_SIZE)
        print(train_image_batch)
        print(train_labels_batch)

        logits = OCnet.inference(train_image_batch,
                                 batch_size=BATCH_SIZE,
                                 n_classes=NUM_CLASS,
                                 name="train")
        # logits = OCnet2.inference(train_image_batch, batch_size=BATCH_SIZE, num_class=NUM_CLASS)

        loss = function.loss(logits=logits, labels=train_labels_batch)
        accuracy_train = function.accuracy(logits=logits,
                                           labels=train_labels_batch)

        my_global_step = tf.Variable(0, name='global_step', trainable=True)
        optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
        train_op = optimizer.minimize(loss, global_step=my_global_step)
        saver = tf.train.Saver(tf.global_variables())

        init = tf.global_variables_initializer()
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        sess.run(init)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in np.arange(MAX_STEP):

            if coord.should_stop():
                break

            _, train_loss, train_accuracy = sess.run(
                [train_op, loss, accuracy_train])
            # print('***** Step: %d, loss: %.4f *****' % (step, train_loss))
            if (step % 50 == 0) or (step == MAX_STEP - 1):
                print('***** Step: %d, loss: %.4f' % (step, train_loss))
            if (step % 200 == 0) or (step == MAX_STEP - 1):
                print(
                    '***** Step: %d, loss: %.4f,train Set accuracy: %.4f%% *****'
                    % (step, train_loss, train_accuracy))
            if step % 2000 == 0 or step == MAX_STEP - 1:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
    except tf.errors.OutOfRangeError:
        print('error')
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
Esempio n. 4
0
def train():
    image_batch, label_batch = input.read_and_decode_by_tfrecorder(
        TRAIN_PATH, TRAIN_BATCH_SIZE, True)
    logits = ENnet.inference(image_batch, TRAIN_BATCH_SIZE, NUME_CLASS)
    loss = function.loss(logits, label_batch)
    accuracy = function.accuracy(logits, label_batch)
    optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
    global_step = tf.Variable(0, name="global_step")
    train_op = optimizer.minimize(loss, global_step=global_step)
    saver = tf.train.Saver()
    init = tf.initialize_all_variables()
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)

    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(init)
        coord = tf.train.Coordinator()
        thread = tf.train.start_queue_runners(sess, coord)

        try:
            for step in range(MAX_STEP + 1):
                if coord.should_stop():
                    print("coord is stop")
                    break
                acc, _, losses = sess.run([accuracy, train_op, loss])
                if step % 100 == 0:
                    print(
                        "step : %d ,loss: %.4f , accuracy on trainSet: %.4f%%"
                        % (step, losses, acc))
                if step % 10000 == 0 or step == MAX_STEP:
                    checkpoint_dir = os.path.join(RESULT_PATH, "model.ckpt")
                    saver.save(sess, checkpoint_dir, global_step=step)
        except tf.errors.OutOfRangeError:
            print("error")
        finally:
            coord.request_stop()
        coord.join(threads=thread)
        sess.close()
accuracy_save = []
start_time = time.time()

batch_size = 100

#iteration
num_of_itr = 3000
for i in range(num_of_itr):

    X_batch, T_batch = function.batch(X_train, T_train, batch_size)

    Y = function.affine(X_batch, W, B)
    E = function.error(Y, T_batch)
    E_save = np.append(E_save, E)

    Acc = function.accuracy(Y, T_batch)
    accuracy_save = np.append(accuracy_save, Acc)

    dW = function.delta_w(X_batch, T_batch, Y)
    dB = function.delta_b(T_batch, Y)

    W = function.update(W, learning_rate, dW)
    B = function.update(B, learning_rate, dB)

end_time = time.time()
total_time = end_time - start_time
print(total_time)

#show graph
function.plot_acc(accuracy_save)
function.plot_loss(E_save)
Esempio n. 6
0
        plt.axis('off')
        title = "L:" + str(labels[i]) + " R:" + str(results[i])
        plt.title(title, fontsize=14)
        plt.subplots_adjust(hspace=0.5)
        plt.imshow(images[i])
    plt.show()


image_batch, label_batch, image_raw = input.read_and_decode_by_tfrecorder_eye(
    TEST_PATH, TEST_BATCH_SIZE, False)
print(image_raw)
logits = OCnet.inference(image_batch, TEST_BATCH_SIZE, 2, "train")
labels = tf.argmax(label_batch, 1)
results = tf.argmax(logits, 1)
saver = tf.train.Saver()
accuracy = function.accuracy(logits, label_batch)

with tf.Session() as sess:
    coord = tf.train.Coordinator()  # 创建一个协调器,管理线程
    threads = tf.train.start_queue_runners(
        coord=coord)  # 启动QueueRunner, 此时文件名队列已经进队。
    ckpt = tf.train.get_checkpoint_state(CHECKPOINT)
    i = 0
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        try:
            while not coord.should_stop() and i < 1:
                # image = sess.run(image_batch)
                # print(image)
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
for i in [0.1, 0.3, 0.5]:
    print(i, "Started")
    for j in [0.1, 0.5, 0.8]:
        print(i, j, "Feature Started")

        trainx, testx, trainy, testy = train_test_split(file_name,
                                                        label,
                                                        shuffle=True,
                                                        stratify=label,
                                                        test_size=i)

        model = NB_tf()
        model.fit(trainx, trainy, j)
        result = model.predict(testx)
        acr = accuracy(result, testy)
        temp = "TF_IDF_" + str(j) + "_" + str(i) + " " + str(acr) + "\t"
        f = open('accuracy.txt', mode='a')
        f.write(temp)
        print(temp)
        f.close()
        # print(np.unique(result,return_counts=True))

        #
        # # print(np.unique(testy,return_counts=True))
        conf = confusion_matrix(testy,
                                result,
                                labels=[
                                    'comp.graphics', 'rec.sport.hockey',
                                    'sci.med', 'sci.space',
                                    'talk.politics.misc'
Esempio n. 8
0
import function as f
"""
This contains the main page for the Smart Power Prediction and Monitoring
"""

print("-" * 80)
print("-" * 80)
print("Welcome to the Main Page of Smart Power Prediction and Monitoring \n")
print(
    "Smart Power Prediction And Monitoring is a power consumpton monitoring \nand prediction software , keep this is still a prototype\n"
)

#To print the accuracy of the model
f.accuracy()

n = int(input("Enter the week number [from 1 to 104] : "))
print("\n")
volt = float(input("Enter the consumption of the metioned week : "))
z = f.make_pred(n)

#To warn user incase he/she crosses the recommended consumption levels
if volt > z:
    print("\nWarning!!! you are crossing recommended consumption levels by " +
          str(volt - z[0]) + " Watts\n")

print("Your mentioned consumption is : " + str(volt))
print("\nRecommended voltage consumption is : " + str(z[0]))
Esempio n. 9
0
 def forward(self, x, t):
     y = self.predictor(x)
     loss = nn.CrossEntropyLoss()(y, t)
     accuracy = F.accuracy(y, t)
     gstate.summary(number=y.size(0), loss=loss.item(), accuracy=accuracy)
     return loss