Example #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("func",
                        default="demo",
                        type=str,
                        help="<Demo> or <model>")
    args = parser.parse_args()
    func = args.func

    if func == "demo":
        demo()
    if func == "train":
        train()
def kernel_variation():
    accuracy_sum = 0
    model_count = 3

    accuracy = {}

    for trained_model in range(model_count):
        net = Net(kernel_size=7, conv_output_size=2)
        model = 'test' + str(trained_model)
        train(net, model_name=model)
        accuracy[trained_model] = predict(net, model_name=model)
        accuracy_sum += accuracy[trained_model]

    accuracy['average_accuracy'] = accuracy_sum / model_count
    print(accuracy)
def fc_variation():
    print('Fully Connected Layer Size Variation')

    # Combinations of fully connected layer sizes
    fc1_size = [120, 10, 200]
    fc2_size = [84, 10, 120]

    # result dictionary
    accuracy = {}

    # Iterate through all the fully connected layer sizes
    for fc1 in fc1_size:
        for fc2 in fc2_size:
            # create an instance of the neural network
            net = Net(fc1_size=fc1, fc2_size=fc2)
            model = 'fc_variation' + str(fc1) + '_' + str(fc2)

            # train the model
            train(net, model_name=(model))

            # measure model accuracy with testing dataset
            accuracy[model] = predict(net, model_name=model)

    print(accuracy)
Example #4
0
from UserInterface import get_num_faces, get_name
from Training import train
from Generation import generate
from Testing import test

import numpy as np

MAX_IMAGES = 500
labels = []
faces = []
names = []

# Face Generation
get_num_faces()
file = open('data.txt', 'r+')
N = int(file.readline())
for i in range(N):
    get_name()
    names.append(file.readline())
    faces.extend(generate(MAX_IMAGES // N))
    labels.extend([i for x in range(MAX_IMAGES // N)])

# Training
model = train(np.array(faces), np.array(labels))

# Testing
test(model, names)
file.close()
Example #5
0
def run(mnist, mode):
    train(mnist, mode)
def main(argv=None):
    train()
Example #7
0
            print('Gesture Detected: Left Down')
            controller.next()
            #if you detect a gester, clear buffer fully 
            #To prevent multiple detections on one gesture
            samples_done = 0
        elif(prediction[0] == 2):
            print('Gesture Detected: Right Down')
            controller.previous()
            samples_done = 0
        elif(prediction[0] == 3):
            print('Gesture Detected: Up Flick')
            controller.pause_play()
            samples_done = 0
        buffer_full.clear()

#init music
controller.init_music()

#train model
train()

#create serial input thread
thread_input = Thread(target=take_input)
thread_predict = Thread(target=analyze)
try:
    thread_input.start()
    thread_predict.start()
except Exception as e:
    print(e)

Example #8
0
from Training import train
from Player import Player
from Playground import Playground

#agent = Player('DQN')
#agent.load('test1.h5')
#playground = Playground(200.0, 200.0, True, agent, Player('random player'))
#playground.start_game()
#for i in range(10):
#    print(i, " ", playground.start_game())

print(train())
Example #9
0
def trainChatbot():
    train()
    loadChatbotData()
    return returnOk()
import random

from Training import train
from dataloader import load_data, balanced_sampler
from Settings import DATASET_TYPE, CATEGORIES

data_dir = "./" + DATASET_TYPE + "/"
dataset, cnt = load_data(data_dir)

# test with happiness and anger
images = balanced_sampler(dataset, cnt, emotions=CATEGORIES)
display_index = 0

X = []
y = []
for i, category in enumerate(CATEGORIES):
    images_in_cat = images[category]
    random.shuffle(images_in_cat)
    X += images_in_cat
    y += [i] * len(images[category])

# get my feature - label pairs zipped together
all_data = list(zip(X, y))

# randomize the dataset so I can fold properly
train(all_data)
# construct word embeddings for validation set
X_val = X_corpus[num_labeled_samples:]
# Y_true_val = validation_df["sentiment"].values
Y_weak_val = validation_df["weak_labels"].values

# X_train, X_val, X_test = normalize(X_train, X_val, X_test)
print("X_train shape: ", X_train.shape)
print("X_val shape: ", X_val.shape)
print("X_test shape: ", X_test.shape)

####TESTED TILL HERE####
exit(0)
# TODO Test remaining. Code already in place.

# Building training model
classifier, Y_test_predicted, Y_val_predicted = train(X_train, Y_train, X_val,
                                                      X_test)
# printing accuracy of the model on the test set
active_iteration = 1
queried_samples = num_labeled_samples
get_test_accuracy(active_iteration, Y_test, Y_test_predicted,
                  classification_accuracies)

# active learning algorithm
while queried_samples < max_queries:
    active_iteration += 1

    # soft labels on validation set
    soft_labels = classifier.predict_proba(X_val)

    # sample low confidence samples
    uncertain_samples = margin_selection_sampling(soft_labels,