def main():
    #activations = {'relu': tf.nn.relu, 'tanh': tf.nn.tanh}
    #activations = {'tanh': tf.nn.tanh}
    activations = {'relu': tf.nn.relu}

    X_train, Y_train, label_names = get_data_set('train')
    X_test, Y_test, _ = get_data_set('test')

    learning_rates = get_learning_rates()

    for name_activation, activation in activations.items():
        print("Starting training %s:" % (name_activation))

        for lr in learning_rates:

            tf.reset_default_graph()

            print('Learning rate = %e' % (lr))

            job_dir = FLAGS.job_dir + 'hyper_params/' + name_activation + \
                        '/' + 'lr-' + '%e' % (lr)

            train(X_train, Y_train, FLAGS.learning_rate, FLAGS.train_steps,
                  activation, FLAGS.train_batch_size, job_dir,
                  FLAGS.device_name, FLAGS.log_period)
def main():
    activations = {'relu': tf.nn.relu, 'tanh': tf.nn.tanh}
    #activations = {'tanh': tf.nn.tanh}
    #activations = {'relu': tf.nn.relu}
        
    X_train, Y_train, label_names = get_data_set('train')
    X_test, Y_test, _ = get_data_set('test')

    for name_activation, activation in activations.items():
        print("Starting training %s:" % (name_activation))

        tf.reset_default_graph()
    
        job_dir = FLAGS.job_dir + name_activation + '/'
    
        train(X_train, Y_train, 
                FLAGS.learning_rate, FLAGS.train_steps,
                activation, FLAGS.train_batch_size, 
                job_dir,
                FLAGS.device_name, FLAGS.log_period)
Esempio n. 3
0
import numpy as np
import tensorflow as tf
from time import time
import math

from include.data import get_data_set
from include.model import model, lr

train_x, train_y = get_data_set("train")
test_x, test_y = get_data_set("test")
tf.set_random_seed(21)
x, y, output, y_pred_cls, global_step, learning_rate = model()
global_accuracy = 0
epoch_start = 0

# PARAMS
_BATCH_SIZE = 128
_EPOCH = 60
_SAVE_PATH = "./tensorboard/cifar-10-v1.0.0/"

# LOSS AND OPTIMIZER
loss = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits_v2(logits=output, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                   beta1=0.9,
                                   beta2=0.999,
                                   epsilon=1e-08).minimize(
                                       loss, global_step=global_step)

# PREDICTION AND ACCURACY CALCULATION
correct_prediction = tf.equal(y_pred_cls, tf.argmax(y, axis=1))
Esempio n. 4
0
from include.data import get_data_set
#get_data_set is manually written function helps select train or test data from Data folder

from sklearn.neighbors import KNeighborsClassifier

Xt, Yt = get_data_set("train")
# Xt is  collection of Vectors of size 64
# Yt is Y labels in one hot representation i.e for 4 classes: [1,0,0,0] <- representation for 1st class

test_x, test_y = get_data_set("test")
test = test_x[0, 0:]

neigh = KNeighborsClassifier(n_neighbors=4)
pred = neigh.fit(Xt, Yt)

prediction = neigh.predict(test.reshape(1, 64))
print("Test Row of Eating Activity")
print(test)
print("*********************************")
print("Predicted Output is [Eating,Like,Rock,Victory] :")
print(prediction)
print("*********************************")
print("Accuracy for K-Nearest")
print(neigh.score(test_x, test_y) * 100, "%")
Esempio n. 5
0
import tensorflow as tf
import numpy as np
from sklearn.metrics import confusion_matrix
from include.model import model

from include.data import get_data_set
x, y, output, global_step, y_pred_cls = model(6)

test_x, test_y = get_data_set()
test_l = ["Relax", "Ok", "Fist", "Like", "Rock", "Spock"]

saver = tf.train.Saver()
_SAVE_PATH = "./data/tensorflow_sessions/myo_armband/"
sess = tf.Session()

try:
    print("Trying to restore last checkpoint ...")
    last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=_SAVE_PATH)
    print(last_chk_path)
    saver.restore(sess, save_path=last_chk_path)
    print("Restored checkpoint from:", last_chk_path)
except:
    print("Failed to restore checkpoint. Initializing variables instead.")
    sess.run(tf.global_variables_initializer())

i = 0
predicted_class = np.zeros(shape=len(test_x), dtype=np.int)
while i < len(test_x):
    j = min(i + 300, len(test_x))
    batch_xs = test_x[i:j, :]
    batch_ys = test_y[i:j, :]
Esempio n. 6
0
import numpy as np
import tensorflow as tf

from include.data import get_data_set
from include.model import model


test_x, test_y = get_data_set("test")
x, y, output, y_pred_cls, global_step, learning_rate = model()


_BATCH_SIZE = 128
_CLASS_SIZE = 10
_SAVE_PATH = "./tensorboard/cifar-10-v1.0.0/"


saver = tf.train.Saver()
sess = tf.Session()


try:
    print("\nTrying to restore last checkpoint ...")
    last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=_SAVE_PATH)
    saver.restore(sess, save_path=last_chk_path)
    print("Restored checkpoint from:", last_chk_path)
except ValueError:
    print("\nFailed to restore checkpoint. Initializing variables instead.")
    sess.run(tf.global_variables_initializer())


def main():
Esempio n. 7
0
hard_loss = tf.reduce_mean(categorical_crossentropy(labels, hard_label_ph))
print('hard_loss', type(hard_loss), hard_loss.shape)

pre_losses = [hard_loss]
pre_losses.extend(reg_losses)

pre_loss = tf.add_n(pre_losses)
# pre_update = tf.train.GradientDescentOptimizer(0.05).minimize(pre_loss)
pre_update = tf.train.GradientDescentOptimizer(0.1).minimize(hard_loss)

init_op = tf.global_variables_initializer()
sess.run(init_op)

from include.data import get_data_set
train_x, train_y, train_l = get_data_set()
test_x, test_y, test_l = get_data_set("test")
# print(type(test_x), type(test_y), type(test_l))

with sess.as_default():
    for tn_batch in range(10000):
        randidx = np.random.randint(len(train_x), size=batch_size)
        batch_xs = train_x[randidx]
        batch_ys = train_y[randidx]
        feed_dict = {
            image_ph: batch_xs,
            hard_label_ph: batch_ys,
            K.learning_phase(): 1,
        }
        # res = sess.run(pre_update, feed_dict=feed_dict)
        pre_update.run(feed_dict=feed_dict)
Esempio n. 8
0
import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix

from include.data import get_data_set
from include.model import model

# test_x, test_y, test_l = get_data_set("test", cifar=10)
train_x, train_y, train_l, mu, std = get_data_set(cifar=10, whitten=False)
test_x, test_y, test_l, mu, std = get_data_set(name="test",
                                               mu=mu,
                                               std=std,
                                               cifar=10,
                                               whitten=False)
x, y, output, global_step, y_pred_cls, keep_prob = model()

_IMG_SIZE = 32
_NUM_CHANNELS = 3
_BATCH_SIZE = 128
_CLASS_SIZE = 10
_SAVE_PATH = "./tensorboard/aug-decay-RMS/"

saver = tf.train.Saver()
sess = tf.Session()

try:
    print("Trying to restore last checkpoint ...")
    last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=_SAVE_PATH)
    saver.restore(sess, save_path=last_chk_path)
    print("Restored checkpoint from:", last_chk_path)
except:
import numpy as np
import tensorflow as tf
from time import time
from include.data import get_data_set
from include.model import model

train_x, train_y = get_data_set()

print(train_x)
print(train_y)

_BATCH_SIZE = 300
_CLASS_SIZE = 6
_SAVE_PATH = "./data/tensorflow_sessions/myo_armband/"

x, y, output, global_step, y_pred_cls = model(_CLASS_SIZE)

loss = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y))
tf.summary.scalar("Loss", loss)
optimizer = tf.train.RMSPropOptimizer(learning_rate=1e-4).minimize(
    loss, global_step=global_step)

correct_prediction = tf.equal(y_pred_cls, tf.argmax(y, dimension=1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("Accuracy/train", accuracy)

init = tf.global_variables_initializer()
merged = tf.summary.merge_all()
saver = tf.train.Saver()
sess = tf.Session()
Esempio n. 10
0
def train():
    my_global_step = tf.Variable(0, name='global_step', trainable=False)

    log_dir = 'log'

    train_x, train_y, train_l = get_data_set()
    test_x, test_y, test_l = get_data_set("test")

    images = tf.placeholder(tf.float32, [None, 32, 32, 3])
    labels = tf.placeholder(tf.float32, [None, train_y.shape[1]])
    logits = inference(images)
    loss = losses(logits, labels)
    acc = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
    acc = tf.reduce_mean(tf.cast(acc, tf.float32))

    global_step = tf.Variable(0, trainable=True)
    learning_rate = tf.train.exponential_decay(BASE_LEARNING_RATE,
                                               global_step=global_step,
                                               decay_steps=10,
                                               decay_rate=0.9)

    optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    train_op = optimizer.minimize(loss, global_step=my_global_step)

    saver = tf.train.Saver(tf.global_variables())
    tf.summary.scalar("loss", loss)
    summary_op = tf.summary.merge_all()

    init = tf.global_variables_initializer()
    sess = tf.Session()

    sess.run(init)

    summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
    for epoch in np.arange(MAX_EPOCH):
        for _ in np.arange(ITERATION):
            randidx = np.random.randint(len(train_x), size=FLAGS.batch_size)
            batch_xs = train_x[randidx].reshape((-1, 32, 32, 3))
            batch_ys = train_y[randidx].astype("float32")

            _, loss_value, summary_str, step = sess.run(
                [train_op, loss, summary_op, my_global_step],
                feed_dict={
                    images: batch_xs,
                    labels: batch_ys
                })
            summary_writer.add_summary(summary_str, step)

            if step % 50 == 0 and step > 0:
                print('Step: %d, loss: %.4f' % (step, loss_value))

            if step % 1000 == 0 and step > 0:
                batch_xs = test_x.reshape((-1, 32, 32, 3))
                batch_ys = test_y.astype("float32")
                accuracy = sess.run(acc,
                                    feed_dict={
                                        images: batch_xs,
                                        labels: batch_ys
                                    })
                print('Accuracy: %.4f' % accuracy)
                checkpoint_path = os.path.join(log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
Esempio n. 11
0
import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix

from include.data import get_data_set
from include.model import model

test_x, test_y, test_l = get_data_set("test", cifar=10)
x, y, output, global_step, y_pred_cls = model()

_IMG_SIZE = 32
_NUM_CHANNELS = 3
_BATCH_SIZE = 128
_CLASS_SIZE = 10
_SAVE_PATH = "./tensorboard/cifar-10/"

saver = tf.train.Saver()
sess = tf.Session()

try:
    print("Trying to restore last checkpoint ...")
    last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=_SAVE_PATH)
    saver.restore(sess, save_path=last_chk_path)
    print("Restored checkpoint from:", last_chk_path)
except:
    print("Failed to restore checkpoint. Initializing variables instead.")
    sess.run(tf.global_variables_initializer())

i = 0
predicted_class = np.zeros(shape=len(test_x), dtype=np.int)
while i < len(test_x):
Esempio n. 12
0
def asynchrounous_train():
    parameter_servers = ["127.0.0.1:2222"]
    workers = ["127.0.0.1:2223", "127.0.0.1:2224", "127.0.0.1:2225"]
    cluster = tf.train.ClusterSpec({
        "ps": parameter_servers,
        "worker": workers
    })
    log_dir = 'log/asynchronous'
    train_x, train_y, train_l = get_data_set()
    test_x, test_y, test_l = get_data_set("test")
    start_time = time.time()
    history_loss = 0

    print "Loading cifar10 images"
    server = tf.train.Server(cluster,
                             job_name=FLAGS.job_name,
                             task_index=FLAGS.task_index)
    print "Successfully building the server"
    if FLAGS.job_name == "ps":
        server.join()
    elif FLAGS.job_name == "worker":
        with tf.device(
                tf.train.replica_device_setter(
                    worker_device="/job:worker/task:%d" % FLAGS.task_index,
                    cluster=cluster)):
            print "Enter the worker mode {}".format(FLAGS.task_index)

            my_global_step = tf.Variable(0,
                                         name='global_step',
                                         trainable=False)
            images = tf.placeholder(tf.float32, [None, 32, 32, 3])
            labels = tf.placeholder(tf.float32, [None, train_y.shape[1]])
            logits = inference(images)
            loss = losses(logits, labels)
            acc = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
            acc = tf.reduce_mean(tf.cast(acc, tf.float32))
            global_step = tf.Variable(0, trainable=True)

            learning_rate = tf.train.exponential_decay(BASE_LEARNING_RATE,
                                                       global_step=global_step,
                                                       decay_steps=1000,
                                                       decay_rate=0.9)

            optimizer = tf.train.GradientDescentOptimizer(learning_rate)
            train_op = optimizer.minimize(loss, global_step=my_global_step)

            saver = tf.train.Saver(tf.global_variables())
            tf.summary.scalar("accuracy", acc)
            tf.summary.scalar("loss", loss)
            summary_op = tf.summary.merge_all()

            init = tf.global_variables_initializer()

            print("Variables initialized ...")

        sv = tf.train.Supervisor(is_chief=(FLAGS.task_index == 0),
                                 global_step=global_step,
                                 init_op=init)

        with sv.prepare_or_wait_for_session(server.target) as sess:
            summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
            for _ in np.arange(10000):
                randidx = np.random.randint(len(train_x),
                                            size=FLAGS.batch_size)
                batch_xs = train_x[randidx].reshape((-1, 32, 32, 3))
                batch_ys = train_y[randidx].astype("float32")

                _, loss_value, summary_str, step = sess.run(
                    [train_op, loss, summary_op, my_global_step],
                    feed_dict={
                        images: batch_xs,
                        labels: batch_ys
                    })
                if (step > 100 and loss_value > history_loss + 1.5
                    ) or np.isnan(loss_value):
                    break
                else:
                    history_loss = loss_value
                summary_writer.add_summary(summary_str, step)

                if step % 100 == 0 and step > 0 and FLAGS.task_index != 1:
                    print('Task: %d, Step: %d, loss: %.4f' %
                          (FLAGS.task_index, step, loss_value))
                    batch_xs = test_x.reshape((-1, 32, 32, 3))
                    batch_ys = test_y.astype("float32")
                    accuracy, summary_str = sess.run([acc, summary_op],
                                                     feed_dict={
                                                         images: batch_xs,
                                                         labels: batch_ys
                                                     })
                    summary_writer.add_summary(summary_str, step)
                    print('Elapsed time: %.4f, Accuracy: %.4f' %
                          (time.time() - start_time, accuracy))
                    checkpoint_path = os.path.join(log_dir, 'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)
Esempio n. 13
0
import numpy as np
import tensorflow as tf
from time import time
import math

from include.data import get_data_set
from include.model import model, lr_decay

# dataset
X_train, y_train, X_test, y_test = get_data_set()
tf.set_random_seed(21)
# training examples
n_obs = X_train.shape[0]

X, Y, output, y_pred_cls, global_step, learning_rate = model("model_ujh_f000")
global_accuracy = 0
epoch_start = 0

# parameters
_BATCH_SIZE = 512
_EPOCH = 5
save_path = './graph'

# loss and optimizer
loss = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits_v2(logits=output, labels=Y))

optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                   beta1=0.9,
                                   beta2=0.999,
                                   epsilon=1e-08).minimize(
Esempio n. 14
0
import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from time import time

from include.data import get_data_set
from include.model import model

train_x, train_y, train_l = get_data_set(cifar=10)
test_x, test_y, test_l = get_data_set("test", cifar=10)

x, y, output, global_step, y_pred_cls = model()

_IMG_SIZE = 32
_NUM_CHANNELS = 3
_BATCH_SIZE = 128
_CLASS_SIZE = 10
_ITERATION = 200
_SAVE_PATH = "./tensorboard/cifar-10/"

loss = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y))
optimizer = tf.train.RMSPropOptimizer(learning_rate=1e-4).minimize(
    loss, global_step=global_step)

correct_prediction = tf.equal(y_pred_cls, tf.argmax(y, dimension=1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("Accuracy/train", accuracy)

merged = tf.summary.merge_all()
saver = tf.train.Saver()
def load_dataset(dataset):
    if dataset == "MNIST":
        mnist = tf.keras.datasets.mnist
        (train_samples, train_labels), (test_samples,
                                        test_labels) = mnist.load_data()
        train_samples, test_samples = train_samples / 255.0, test_samples / 255.0
        train_labels = to_categorical(train_labels)
        test_labels = to_categorical(test_labels)

        num_train = 60000
        num_test = len(test_samples)
        input_shape = [1, 28, 28]
        input_size = np.prod(input_shape)

        train_samples = np.reshape(train_samples, [num_train, input_size])
        test_samples = np.reshape(test_samples, [num_test, input_size])

    elif dataset == "FASHION_MNIST":
        from tensorflow import keras
        fashion_mnist = keras.datasets.fashion_mnist
        (train_samples,
         train_labels), (test_samples,
                         test_labels) = fashion_mnist.load_data()
        (train_samples, train_labels), (test_samples, test_labels)
        train_samples, test_samples = train_samples / 255.0, test_samples / 255.0
        train_samples, test_samples = train_samples / 255.0, test_samples / 255.0
        train_labels = to_categorical(train_labels)
        test_labels = to_categorical(test_labels)

        num_train = 60000
        num_test = len(test_samples)
        input_shape = [1, 28, 28]
        input_size = np.prod(input_shape)

        train_samples = np.reshape(train_samples, [num_train, input_size])
        test_samples = np.reshape(test_samples, [num_test, input_size])

    elif dataset == "CALTECH-SIL":
        caltech = sio.loadmat("caltech101_silhouettes_28_split1.mat")
        train_samples = caltech["train_data"]
        train_labels = caltech["train_labels"] - 1
        test_samples = caltech["test_data"]
        test_labels = caltech["test_labels"] - 1
        input_shape = [1, 28, 28]
        num_train = 4100

    elif dataset == "CIFAR-10":
        #get CIFAR-10
        train_samples, train_labels, train_l = get_data_set()
        test_samples, test_labels, test_l = get_data_set("test")
        train_samples = np.reshape(train_samples, [50000, 32, 32, 3])
        train_samples = np.transpose(train_samples, [0, 3, 1, 2])
        train_samples = np.reshape(train_samples, [50000, 32 * 32 * 3])

        test_samples = np.reshape(test_samples, [10000, 32, 32, 3])
        test_samples = np.transpose(test_samples, [0, 3, 1, 2])
        test_samples = np.reshape(test_samples, [10000, 32 * 32 * 3])
        train_labels = (train_labels)
        test_labels = (test_labels)
        input_shape = [3, 32, 32]
        num_train = 50000

    elif dataset == "IRIS":
        f = open("data_set\\iris\\iris.data.csv")
        reader = csv.reader(f)
        data = np.zeros([150, 5])
        train_portion = 0.6
        num_train = int(150 * train_portion)
        num_test = 150 - num_train
        for i, row in enumerate(reader):
            for j, s in enumerate(row):
                data[i][j] = eval(s)

        train_samples = np.zeros([num_train, 4])
        train_labels = np.zeros([num_train, 1])
        test_samples = np.zeros([num_test, 4])
        test_labels = np.zeros([num_test, 1])

        for i in range(3):
            train_samples[i * num_train // 3:(i + 1) * num_train // 3,
                          0:4] = data[i * 50:i * 50 + num_train // 3, 0:4]
            train_labels[i * num_train // 3:(i + 1) * num_train // 3,
                         0] = data[i * 50:i * 50 + num_train // 3, 4]
            test_samples[i * num_test // 3:(i + 1) * num_test // 3,
                         0:4] = data[(i + 1) * 50 - num_test // 3:(i + 1) * 50,
                                     0:4]
            test_labels[i * num_test // 3:(i + 1) * num_test // 3,
                        0] = data[(i + 1) * 50 - num_test // 3:(i + 1) * 50, 4]

        train_labels = to_categorical(train_labels)
        test_labels = to_categorical(test_labels)
        input_shape = [4]

    elif dataset == "WINE":
        f = open("data_set\\wine.csv")
        reader = csv.reader(f)
        data = np.zeros([178, 14])
        train_portion = 0.6
        num_train = int(178 * train_portion)
        num_test = 178 - num_train
        for i, row in enumerate(reader):
            for j, s in enumerate(row):
                data[i][j] = eval(s)
        np.random.shuffle(data)
        train_samples = data[0:num_train, 1:]
        train_labels = data[0:num_train, 1]
        test_samples = data[num_train:, 1:]
        test_labels = data[num_train:, 1]
        train_labels = to_categorical(train_labels)
        test_labels = to_categorical(test_labels)
        input_shape = [13]

    elif dataset == "NORB":
        num_train = 24300
        num_test = 24300
        dataset = SmallNORBDataset(dataset_root="data_set\\norb")
        data_train = dataset.data['train']
        data_test = dataset.data['test']
        train_samples = np.zeros([num_train, 2 * 96 * 96])
        test_samples = np.zeros([num_test, 2 * 96 * 96])
        train_labels = np.zeros([num_train, 1])
        test_labels = np.zeros([num_test, 1])
        input_shape = [2, 96, 96]
        input_size = int(np.prod(input_shape))
        for s in range(len(data_train)):
            #Training
            #left image
            train_samples[s, 0:input_size // 2] = np.reshape(
                data_train[s].image_lt, [input_size // 2])
            #right image
            train_samples[s, input_size // 2:input_size] = np.reshape(
                data_train[s].image_rt, [input_size // 2])
            train_labels[s] = data_train[s].category

            #Testing
            # left image
            test_samples[s, 0:input_size // 2] = np.reshape(
                data_test[s].image_lt, [input_size // 2])
            # right image
            test_samples[s, input_size // 2:input_size] = np.reshape(
                data_test[s].image_rt, [input_size // 2])
            test_labels[s] = data_test[s].category
        train_labels = to_categorical(train_labels)
        test_labels = to_categorical(test_labels)

    res = {}
    res["train_samples"] = train_samples
    res["train_labels"] = train_labels
    res["test_samples"] = test_samples
    res["test_labels"] = test_labels
    res["num_train"] = num_train
    res["num_test"] = len(test_labels)
    res["input_shape"] = input_shape
    res["num_classes"] = len(test_labels[0])
    return res
Esempio n. 16
0
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics

from include.data import get_data_set
#get_data_set is manually written function helps select train or test data from Data folder
import numpy as np

from sklearn.naive_bayes import GaussianNB

#train_x is  collection of Vectors of size 64
#new_train_y is Y labels in one hot representation i.e for 4 classes: [1,0,0,0] <- representation for 1st class

train_x, new_train_y = get_data_set("train")
test_x, new_test_y = get_data_set("test")

train_y = np.argmax(new_train_y, axis=1)
test_y = np.argmax(new_test_y, axis=1)
#test_y is single value Y lables as NaiveBayes does not accept multi dimentional values hence now classes are 0,1,2,3 (using argmax).

#selecting only the first from test set to predict the output , this row is for Eating gesture (class 0 )
test = test_x[0, 0:]

gaunb = GaussianNB()
gaunb = gaunb.fit(train_x, train_y)
# create naive bayes classifier

# predict using classifier
prediction = gaunb.predict(test.reshape(1, 64))

print("Test Row of Eating Activity")