Пример #1
0
import numpy as np
import random
import math
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier

import load_data

PCA_TOGGLE = True

data = load_data.loadall('melspects.npz')
x_tr = data['x_tr']
y_tr = data['y_tr']
x_te = data['x_te']
y_te = data['y_te']
x_cv = data['x_cv']
y_cv = data['y_cv']

print('here1', x_tr.shape)
# print(y_cv)

x_tr = x_tr.reshape(x_tr.shape[0], x_tr.shape[1] * x_tr.shape[2])
x_cv = x_cv.reshape(x_cv.shape[0], x_cv.shape[1] * x_cv.shape[2])
x_te = x_te.reshape(x_te.shape[0], x_te.shape[1] * x_te.shape[2])

scaler = StandardScaler()
# Fit on training set only.
scaler.fit(x_tr)
# Apply transform to both the training set and the test set.
train_sc = scaler.transform(x_tr)
Пример #2
0
def main():

    #################################################

    # Data stuff

    data = load_data.loadall('melspects.npz')

    x_tr = data['x_tr']
    y_tr = data['y_tr']
    x_te = data['x_te']
    y_te = data['y_te']
    x_cv = data['x_cv']
    y_cv = data['y_cv']

    tr_idx = np.random.permutation(len(x_tr))
    te_idx = np.random.permutation(len(x_te))
    cv_idx = np.random.permutation(len(x_cv))

    x_tr = x_tr[tr_idx]
    y_tr = y_tr[tr_idx]
    x_te = x_te[te_idx]
    y_te = y_te[te_idx]
    x_cv = x_cv[cv_idx]
    y_cv = y_cv[cv_idx]

    x_tr = x_tr[:, :, :, np.newaxis]
    x_te = x_te[:, :, :, np.newaxis]
    x_cv = x_cv[:, :, :, np.newaxis]

    y_tr = np_utils.to_categorical(y_tr)
    y_te = np_utils.to_categorical(y_te)
    y_cv = np_utils.to_categorical(y_cv)

    # training = np.load('gtzan/gtzan_tr.npy')
    # x_tr = np.delete(training, -1, 1)
    # label_tr = training[:,-1]

    # test = np.load('gtzan/gtzan_te.npy')
    # x_te = np.delete(test, -1, 1)
    # label_te = test[:,-1]

    # cv = np.load('gtzan/gtzan_cv.npy')
    # x_cv = np.delete(cv, -1, 1)
    # label_cv = test[:,-1]

    # temp = np.zeros((len(label_tr),10))
    # temp[np.arange(len(label_tr)),label_tr.astype(int)] = 1
    # y_tr = temp
    # temp = np.zeros((len(label_te),10))
    # temp[np.arange(len(label_te)),label_te.astype(int)] = 1
    # y_te = temp
    # temp = np.zeros((len(label_cv),10))
    # temp[np.arange(len(label_cv)),label_cv.astype(int)] = 1
    # y_cv = temp
    # del temp

    #################################################

    #    if True:
    #   model = keras.models.load_model('model84.082.0.h5', custom_objects={'metric': metric})
    #   print("Saving confusion data...")
    #   pred = model.predict_classes(x_te, verbose=1)
    #   cnf_matrix = confusion_matrix(np.argmax(y_te, axis=1), pred)
    #   np.set_printoptions(precision=1)
    #   plt.figure()
    #   plot_confusion_matrix(cnf_matrix, classes=song_labels, normalize=True, title='Normalized confusion matrix')
    #   print(precision_recall_fscore_support(np.argmax(y_te, axis=1),pred, average='macro'))
    #   plt.savefig("matrix",format='png', dpi=1000)
    #   raise SystemExit

    ann = model(cnn)
    ann.train_model(x_tr,
                    y_tr,
                    val_x=x_cv,
                    val_y=y_cv,
                    test_x=x_te,
                    test_y=y_te)
Пример #3
0
from load_data import loadall
from json import load
from os.path import join, sep
from sys import path
import matplotlib.pyplot as plt
from IPython.display import Markdown as md
import numpy as np
settings = load(open("foldersettings.json"))
path.append(join(f"{sep}".join(settings["projectdir"]), "from_scratch"))
from mystats import avg, describe_matrix

if __name__ == "__main__":
    datadir = f"{sep}".join(settings["datadir"])
    data = loadall(datadir, prefix="*ubyte*")
    X_train, X_test = data["i60000"], data["i10000"]
    y_train, y_test = data["l60000"], data["l10000"]
    mdobj = md(describe_matrix(X_train))
    # non-square matrix
    # => no solution to Ax = b
    # What about A^T A x_hat = A^T b ?
    # If there would be a solution, it could be solved with:
    # np.linalg.solve(X_train, y_test)

    print(mdobj._repr_markdown_())
    # 1. Visualise and clean data
    plt.figure()
    plt.imshow(X_train[0, :].reshape(28, 28), cmap="gist_yarg")
    plt.savefig(join("img", "example_digit.png"))
from controller.gripper.gripper_control import Gripper_Controller
from controller.ur5.ur_controller import UR_Controller
from controller.mini_robot_arm.RX150_driver import RX150_Driver

import GPy
from load_data import loadall
import control as ctrl
import slycot

import keyboard
from queue import Queue
from logger_class import Logger

import collections

X, Y = loadall()

N = X.shape[0]
print(N)
idx = list(range(N))
random.seed(0)
random.shuffle(idx)

train_idx = idx[:int(N * 0.8)]
test_idx = idx[int(N * 0.8):]
X_train, Y_train = X[train_idx], Y[train_idx]

# kernel1 = GPy.kern.Linear(input_dim=4,ARD=True,initialize=False)
# m1 = GPy.models.SparseGPRegression(X_train, Y_train[:, 0].reshape(Y_train.shape[0], 1), kernel1, num_inducing=500, initialize=False)
# m1.update_model(False)
# m1.initialize_parameter()