Beispiel #1
0
def load_and_test():
    x_train, y_train, x_test, y_test = load_data()
    model = load_data(expanduser("~/emotion/model.h5"))

    accuracy, fbeta = test_model(model, x_test)
    print("Accuracy: %s" % accuracy)
    print("F-Beta: %s" % fbeta)
def run(sets):
    data_set = load_data()
    print('Data loaded.')
    for i in sets:
        s = time()
        f = FeatureSetGenerator(data_set=data_set, size=i, full_init=True, load_from_file=False, export=True)
        print('Saved featureset with {0}% of data. Time Taken:{1}'.format(i*100, time()-s))
        print(Counter(f.data_set['Label']))
Beispiel #3
0
def main():
    x_train, y_train, x_test, y_test = load_data()

    model = Sequential()

    model.add(
        Conv2D(32,
               kernel_size=(11, 11),
               strides=4,
               padding="same",
               activation='relu',
               input_shape=(48, 48, 1)))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=2, padding="valid"))
    model.add(
        Conv2D(32,
               kernel_size=(5, 5),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=2, padding="valid"))
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               strides=1,
               padding="same",
               activation='relu'))
    model.add(Dense(1024, activation='relu'))
    model.add(Dense(512, activation='relu'))
    model.add(Dense(7, activation='softmax'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    model.fit(x_train,
              y_train,
              batch_size=128,
              epochs=5,
              verbose=1,
              validation_data=(x_test, y_test))

    model.save(expanduser("~/emotion/alex_net.h5"))

    accuracy, fbeta = test_model(model, x_test, y_test)
    print("Accuracy: %s" % accuracy)
    print("F-Beta: %s" % fbeta)
Beispiel #4
0
def run(sets, reload_dataset=False):
    if reload_dataset:
        source = {'eval': cfg.EVAL}
        target = 'C:\\Users\\Josef\\PycharmProjects\\QC-Yes-No\\Corpus\\Evaluation\\'
        create_dataset(source, target)
    test_data = import_data.load_data('C:\\Users\\Josef\\PycharmProjects\\QC-Yes-No\\Corpus\\Evaluation\\data_ready.csv')
    #sets = [0.1]#, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
    for s in sets:
        fs = util.load_pickle(name='fs_' + str(s), path='..\\pickles\\feature_sets\\')
        t = test_fs.TestFS(test_data)
        t.generate_features(fs)
        t.export(name=str(s), path='..\\pickles\\test_features\\fs_test_')
        print('Exported test for {}'.format(str(s)))
def main():

    ## Import and prune the data.
    ## Note that features data has been normalized.
    (X_train, y_train), (X_test, y_test)    =   load_data();

    ## Convert label classification to one-hot format (if not using SparseCategoricalCrossentropy).
    #y_train =   one_hot(    y_train);
    #y_test  =   one_hot(    y_test);
    
    model   =   NN_Model();                 ## Initiate NN Model.

    model.train_NN( X_train,  y_train);     ## Train NN model using training dataset.

    model.plot_training_model();            ## Plot the Train and validation accuracy.

    ## Evaluating test dataset accuracy.
    print(" Test Dataset Accuracy:")
    model.test_accuracy(X_test, y_test);    ## loss: 0.2633 - accuracy: 0.9308.
    
    return 0;
Beispiel #6
0
def data_preperation(num_of_subject=-1,
                     proprtions=[0.7, 0.9],
                     cross_subject=True,
                     down=True):
    pseudo_random(configurations.random_seed)
    X, Y, Z = load_data(import_model_data,
                        num_of_subject,
                        label_type=1,
                        concat=cross_subject)

    # equalize the number of different labels
    if down:
        X, Y, Z = down_sampling(X, Y, Z)
    else:
        X, Y, Z = over_sampling(X, Y, Z)

    if cross_subject:
        train_dataset, validation_dataset, test_dataset = trial_data_split(
            X, Y, Z, proprtions)
    else:
        train_dataset, validation_dataset, test_dataset = subjects_data_split(
            X, Y, Z, proprtions)
    return train_dataset, validation_dataset, test_dataset
import numpy as np
from import_data import load_data
from PIL import Image
import keras
from keras.models import Model, load_model

# Load trained unet model
loaded_model = load_model("Saved_Model/trained_model.h5")
loaded_model.set_weights(loaded_model.get_weights())
# Retrieve raw test images, label_name, height, and width.
images, label_names, height, width = load_data(mode="Test")
for i in range(len(images)):
    img = images[i]
    # (height, width, channels) -> (1, height, width, channels)
    img = np.expand_dims(img, axis=0)
    prediction = loaded_model.predict(img, verbose=1)
    prediction = np.squeeze(prediction)
    # Generate binary mask by rounding up values.
    prediction = np.round(prediction)
    prediction = prediction * 255.
    # Generate image.
    img = Image.fromarray(prediction)
    if img.mode != 'RGB':
        img = img.convert('RGB')
    # Resize the image to original size.
    img = img.resize((width[i], height[i]), Image.ANTIALIAS)
    print img.size
    img.save(label_names[i])
def main():

    x_data, y_data = load_data()
    ##  Set parameter to True for initial download.
    ##  Once data is present, set this to False to
    ##      prevent re-downloading data.

    ## Plotting the Iris Petal and Sepal length and width.
    plot_iris_data(x_data, y_data)

    y_data = one_hot(y_data)

    #Split data: 80% test set, 20% validation set.
    i_80 = int(len(y_data) * 0.8)
    x_train, y_train = x_data[:i_80], y_data[:i_80]
    x_test, y_test = x_data[i_80:], y_data[i_80:]

    iris_nn = NN_Model(
        x_train,  ## input data.
        y_train,  ## output data.
        3,  ## 3 NN layers: Input, hidden-layer, output.
        [4, 4, 3])
    ## num of nodes for each layer.

    if Grad_Descent_Method:
        print("\nNeural Network XNOR - using GRADIENT DESCENT ITERATION\n",
              "#" * 30, "\n")

        # File location where learned weight is saved.
        theta_file = CURRENT_PATH + r'/' + 'theta.npy'

        if LOAD_PREV_THETAS:
            flat_thetas = np_load(theta_file)
            iris_nn.unflatten_Thetas(flat_thetas)

            if CONTINUOUS_TRAINING:
                iris_nn.train_NN()
                np_save(theta_file, iris_nn.flatten_Thetas())

        else:
            iris_nn.train_NN()
            np_save(theta_file, iris_nn.flatten_Thetas())

            # Display final cost after learning iterations.
            print("Final Cost J = ", iris_nn.J_cost(iris_nn.a[-1]))

        if PLOT_COST:

            #   Plot the J Cost vs. # of iterations. J should coverge as iteration increases.
            x_axis = range(len(iris_nn.J_cost_values))
            y_axis = iris_nn.J_cost_values

            plt.plot(x_axis, y_axis, label='J_cost vs. # of Iterations')
            plt.show()

        # Test model accuracy on Validation/Test set.
        acc_count = 0
        for i in range(len(x_test)):

            x_input = x_test[i].flatten()
            y_val = np.argmax(y_test[i])
            y_pred = iris_nn.predict(x_input)[0]
            #print(y_pred, y_val);

            if y_pred == y_val: acc_count += 1

        print("Test Accuraccy = {}".format(acc_count / len(x_test)))

    return 0
Beispiel #9
0
from import_data import load_data
from feature_set_generator import FeatureSetGenerator
from pympler import asizeof
from pympler.classtracker import ClassTracker
from time import time
import util
import classifiers as c
from copy import copy
from collections import Counter

tracker = ClassTracker()
data_set = load_data()
print('Data loaded.')

trained = util.load_pickle(name='fs_1', path='..\\pickles\\feature_sets\\')
test = util.load_pickle(name='fs_test_1', path='..\\pickles\\test_features\\')

test_data = test['data_set']
featureset = 'fs_words_bigrams_pos'

X_train, y_train = trained[featureset], trained['labels']
X_test, y_test = test[featureset], test['labels']
feat_size = X_train.shape[1]

da = copy(test_data)
da['Feature'] = da['Feature'].apply(' '.join)


norm = Counter(da['Label'])[0] / Counter(da['Label'])[1]
counts = {}
for word in trained['bag_words'][1:]:
Beispiel #10
0
folders = ['MM09', 'MM10', 'MM11', 'MM12', 'MM14','MM15', 'MM16', 'MM18', 'MM19', 'MM20', 'MM21']
path = "C:/Users\SB00745777\OneDrive - Ulster University\KaraOne\Data/" 

#####Variables required for computing windows#####
samples = 5000
window_size = .1
n_bins = samples*window_size
window_ratio = samples/n_bins
n_windows = int(window_ratio*2 - 1)

#####Open folders/files, window and save#####
for f in folders:
    print("Computing windows for folder " + f)
    new_path = path + f
    data = load_data(new_path,"EEG_Data","EEG_Data")
    data = data['EEG']
    data = np.ravel(data) 
    data = pd.DataFrame(data[0])
  
    window_all_trials = []
    all_windows = []
    for tr in data:
        
        window = [] 
        ovlp = float(n_bins/2) 
        start = int(np.round(ovlp+1))
        end = int(np.round(n_bins + ovlp+1))
        m = 1

        for i in range(0, n_windows - 2):
Beispiel #11
0
                lyrics = lyrics.replace(c, "")
            elif not is_ascii(c):
                lyrics = lyrics.replace(c, "")

        

        # remove double spaces or triple spaces:
        lyrics = lyrics.replace("   ", " ")
        lyrics = lyrics.replace("  ", " ")
        entry['lyrics'] = lyrics
        invalid_entries.append(entry)


    # write clean data to lyrics3_0.txt
    print("write to file")
    out = open(dir + "/data/lyrics3_0.txt", 'w', encoding='UTF-8')
    out.write("index,song,year,artist,genre,lyrics\n")
    for entry in invalid_entries:
        sb = entry['index'] + "," + entry['song'] + "," + entry['year'] + "," + entry['artist'] + "," + entry['genre'] + ",\"" + entry['lyrics'] + "\"\n"
        out.write(sb)



if __name__ == '__main__':
    df = import_data.load_data()
    clean_data(df)
    # df = import_data.load_clean_data()

    # print(df[0:10])

Beispiel #12
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 13 22:50:49 2020

@author: lily
"""
import pandas as pd
import os
import import_data
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
## import data
x_train, y_train, x_test, y_test = import_data.load_data()

# divide the original training data into taining set and validation set,80%-training, 20%-validation
index = np.arange(0, x_train.shape[0])
train_length = int(0.8 * index.shape[0])
train_indices = index[:train_length]
vali_indices = index[train_length:]
SVM_x_train = x_train[train_indices]
SVM_y_train = y_train[train_indices]
SVM_x_vali = x_train[vali_indices]
SVM_y_vali = y_train[vali_indices]

#1(a)training for different C
#training for different C
# from sklearn import svm
from sklearn.svm import LinearSVC
Beispiel #13
0
#External imports
import statsmodels.api as sm
from statsmodels.formula.api import glm
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt

# pd.set_option("display.max_rows", None, "display.max_columns", None)

#My imports
from import_data import load_data
from construct_regressors import construct_regressors

#LOAD DATA
rat_data_file = "/Users/laurence/Desktop/Neuroscience/mproject/data/modeling_workflow_example/ratdata.mat"
data = load_data(rat_data_file)
"""Choices / Code to convert choices into binary to solve concat error"""
"""Only analyze trials that are non-vilations and of free choice."""
good_trials = data.loc[(data["sides"] == "l") | (data["sides"] == "r")]
good_trials = data.loc[(data["trial_types"] == "f")]
choices = np.asarray(good_trials["sides"])
for choice in range(len(choices)):
    if choices[choice] == "l":
        choices[choice] = 0
    elif choices[choice] == "r":
        choices[choice] = 1
    elif choices[choice] == "v":  #Will need to remove this code
        choices[choice] = 0
        print("Error, violation trials should have been removed")
    else:
        print(
Beispiel #14
0
        activation="relu",
        padding="same")(merged_layer_4)
    convolutional_layer_18 = Conv2D(
        filters=64,
        kernel_size=[3, 3],
        kernel_initializer=initializers.he_normal(seed=1),
        activation="relu",
        padding="same")(convolutional_layer_17)
    convolutional_layer_19 = Conv2D(
        filters=2,
        kernel_size=[3, 3],
        kernel_initializer=initializers.he_normal(seed=1),
        activation="relu",
        padding="same")(convolutional_layer_18)
    convolutional_layer_20 = Conv2D(
        filters=1, kernel_size=[1, 1],
        activation="sigmoid")(convolutional_layer_19)
    model = Model(input=input_layer, output=convolutional_layer_20)
    model.compile(loss="binary_crossentropy",
                  optimizer=optimizers.Adam(lr=0.00001),
                  metrics=['accuracy'])
    return model


images, labels = load_data(mode="Train")
model = unet_model()
print model.summary()
pd.DataFrame(model.fit(images, labels, epochs=20,
                       verbose=1).history).to_csv("Saved_Model/history.csv")
model.save("Saved_Model/trained_model.h5")
Beispiel #15
0
X_second = X[Y_bool]
X_second = X_second * 4
Y_second = Y[Y_bool]
Y_second = Y_second * 4

X = np.vstack((X, X_second))
Y = np.vstack((Y, Y_second))
X, Y = shuffle(X, Y)

model = Model()
model.init_random_weights(X.shape[1])
model.fit(X, Y, l1, l2, rate, epochs)

# TEST

X, Y = load_data()

Y_bool = (Y < 2)
Y_bool = np.reshape(Y_bool, Y_bool.shape[0])

X = X[Y_bool]
Y = Y[Y_bool]


def test(IND):
    global model
    TEST_INPUT = X[IND]
    TEST_INPUT = TEST_INPUT / TEST_INPUT.mean()
    W = read_numpy_array_from_path("results/best_weights.csv")
    B = read_numpy_array_from_path("results/bias.csv")
    model = Model()
Beispiel #16
0
                            loss='huber',
                            f_scale=0.1,
                            args=(t, amplitude_envelope))
    return res_hub


def fun(x, t, y):
    return x[0] * np.exp(-x[1] * t) + x[2] - y


def gen_data(t, a, b, c):
    return a * np.exp(-b * t) + c


if __name__ == '__main__':
    t, A = load_data('./ztpi_data.txt')
    y_lsq = gen_data(t, *lsq_resutls(t, A).x)
    y_soft_l1 = gen_data(t, *softL1_results(t, A).x)
    y_huber = gen_data(t, *huber_results(t, A).x)

    print(
        'To samo co u Piotra inna biblioteka - zwykla matoda najmniejszych kawdratow - wynik ten sam'
    )
    print(
        f'LSF a: {lsq_resutls(t, A).x[0]:.3}, b: {lsq_resutls(t, A).x[1]:.3} c: {lsq_resutls(t, A).x[2]:.3}'
    )
    print('Przyklad funkcji odpornej 1')
    print(
        f'Soft L1 a: {softL1_results(t, A).x[0]:.3}, b: {softL1_results(t, A).x[1]:.3} c: {softL1_results(t, A).x[2]:.3}'
    )
    print('Przyklad funkcji odpornej 2')