예제 #1
0
def singleTrain():
    # Single trainings with final single testing set
    filename = ['individual1_pred', 'individual2_pred']
    data2, data7, index2, index7 = util.loadSingleTrainData()
    Xt2, Xt7 = util.loadSingleTestData()
    X2, X7, Y2, Y7 = util.singleTransform(data2, data7, index2, index7)

    X2, Y2 = util.sampling(X2, Y2)
    X7, Y7 = util.sampling(X7, Y7)

    pcaX2, pcaXt2 = util.pca(X2, Xt2)
    stdX2, stdXt2 = util.scaler(pcaX2, pcaXt2)
    pcaX7, pcaXt7 = util.pca(X7, Xt7)
    stdX7, stdXt7 = util.scaler(pcaX7, pcaXt7)

    training(stdX2, Y2, stdXt2, filename[0])
    training(stdX7, Y7, stdXt7, filename[1])
예제 #2
0
파일: planefit.py 프로젝트: jwgu/camo
def fit_plane(pts):
  # A = ut.homog(pts.T).T
  # b = mvg.solve_0_system(A)
  # b /= np.linalg.norm(b[:3])
  # return b
  axes, _, mean_pt = ut.pca(pts)
  w = axes[2]
  b = -np.dot(w, mean_pt)
  assert np.allclose(np.linalg.norm(w), 1)
  return np.array([w[0], w[1], w[2], b], 'd')
def main(track_id):
    # tracks = [2,5,10,140,141,148,182,190,193,200]
    # check_difference([2])
    # print("Track ID: %s"%track_id)

    model = get_model()
    print("Track ID: %s" % track_id)
    filepath = get_audio_path(DATA_AUDIO_DIR, track_id)
    x, sr = librosa.load(filepath, sr=None, mono=True)
    features = compute_features(track_id, x, sr)
    features = pca(features)
    pred = model.predict(features)
    cat = np.argmax(pred)
    print(CATEGORIES[cat])
    return
예제 #4
0
def generalTrain():
    # General training with final final testing set
    filename = "general_pred"
    data, indexes = util.loadGeneralTrainData()
    Xt = util.loadGeneralTestData()
    Xs, Ys = util.generalTransform(data, indexes)

    f = lambda a, b: np.concatenate((a, b), axis=0)
    X = reduce(f, Xs)
    Y = reduce(f, Ys)

    X, Y = util.sampling(X, Y)
    pcaX, pcaXt = util.pca(X, Xt)
    stdX, stdXt = util.scaler(pcaX, pcaXt)

    training(stdX, Y, stdXt, filename)
예제 #5
0
def generalCVTrain():
    # General Training with cross validation
    data, indexes = util.loadGeneralTrainData()
    Xs, Ys = util.generalTransform(data, indexes)
    subjectIndexes = [1, 4, 6, 9]

    print("\n--> General Training Data Set Shape")
    for i in range(4):
        # used to output ground truth files
        #util.saveGroundTruth(Ys[i], "groundTruth{0}.csv".format(subjectIndexes[i]))

        print("Subject {0}. X: {1}, Y: {2}, # of 1: {3}".format(\
        subjectIndexes[i], Xs[i].shape, Ys[i].shape, Ys[i].tolist().count(1)))

    for i in range(4):
        print("\n=========== Subject {0} As Validation Set ===========".format(subjectIndexes[i]))

        X, Y, Xt, Yt = util.splitTrainTest(Xs, Ys, i)
        X, Y = util.sampling(X, Y)
        pcaX, pcaXt = util.pca(X, Xt)
        stdX, stdXt = util.scaler(pcaX, pcaXt)

        sampleTraining(stdX, Y, stdXt, Yt)
예제 #6
0
파일: models.py 프로젝트: tmurray19/CS428
 def compute(self, X, y):
     [D, self.W, self.mu] = pca(asRowMatrix(X), y, self.num_components)
     self.y = y
     for xi in X:
         self.projections.append(project(self.W, xi.reshape(1, -1),
                                         self.mu))
예제 #7
0
labels = np.concatenate([np.zeros(ppop1.shape[-2]), np.ones(ppop2.shape[-2])])

n_pp = ppop.shape[1]

#%%

vv = []
uv = []
vu = []
uu = []
null = []
for pp in range(len(ppop)):
    p1 = ppop[pp, :, labels == 0, t].T
    p2 = ppop[pp, :, labels == 1, t].T

    U, l1 = util.pca(p1)
    V, l2 = util.pca(p2)

    uu.append(np.cumsum(l1) / np.sum(l1))
    vv.append(np.cumsum(l2) / np.sum(l2))
    uv.append(
        np.array([(V[:, :k].T @ p1).var(1).sum(0) / p1.var(1).sum(0)
                  for k in range(np.min(V.shape))]))
    vu.append(
        np.array([(U[:, :k].T @ p2).var(1).sum(0) / p2.var(1).sum(0)
                  for k in range(np.min(U.shape))]))

    # la.qr( np.random.randn(n_pp,n_pp).T)[0]
    null.append(np.array([[(la.qr( np.random.randn(n_pp,n_pp).T)[0][:,:k].T@p2).var(1).sum(0)/p2.var(1).sum(0) \
                          for k in range(np.min(U.shape))] for _ in range(15)]))
예제 #8
0
    up_uncue_circ = np.array([
        z_retro[1][:, (upcol == c) & (cue < 0), :].detach().numpy().mean(1)
        for c in cbins
    ])
    down_unue_circ = np.array([
        z_retro[1][:, (downcol == c) & (cue > 0), :].detach().numpy().mean(1)
        for c in cbins
    ])

    cue_circs = np.concatenate([up_cue_circ, down_cue_circ], axis=0)
    up_circs = np.concatenate([up_cue_circ, up_uncue_circ], axis=0)

    plot_circs = cue_circs
    # plot_circs = up_circs

    U, S = util.pca(plot_circs[:, task.T_inp2:task.T_resp, :].reshape(
        (-1, N)).T)
    # U = dec.weight.detach().numpy()[[0,1,4],:].T
    # U = wa['estimator'][0].coef_

    proj_z = plot_circs @ U[:, :3]
    # proj_z = util.pca_reduce(wa['estimator'][0].coef_@plot_circs.reshape((-1,100)).T, num_comp=3)
    # proj_z = proj_z.reshape((128, 35, 3))

    ani.ScatterAnime3D(proj_z[..., 0],
                       proj_z[..., 1],
                       proj_z[..., 2],
                       c=np.tile(cbins, 2),
                       cmap='hsv',
                       rotation_period=100,
                       after_period=50,
                       view_period=20).save(SAVE_DIR + 'temp.mp4', fps=10)
예제 #9
0
basis = la.qr(np.random.randn(nneur, nneur).T)[0][:8, :]

#%% parallel circles

mu_u = basis[:2, :].T @ circ1 + np.random.randn(nneur, ndat**2) * 0.1
mu_d_l = 0.6 * basis[2:4, :].T @ circ2 + np.random.randn(nneur, ndat**2) * 0.1

mu_l = basis[:2, :].T @ circ2 + basis[[5], :].T + np.random.randn(
    nneur, ndat**2) * 0.1
mu_d_u = 0.6 * basis[2:4, :].T @ circ1 + basis[[6], :].T + np.random.randn(
    nneur, ndat**2) * 0.1

T1_par = mu_u + mu_d_l  # upper cued torus
T2_par = mu_l + mu_d_u  # lower cued torus

U, mwa = util.pca(T1_par)
V, _ = util.pca(T2_par)

plt.plot([(U[:, :k].T @ T2_par).var(1).sum(0) / T2_par.var(1).sum(0)
          for k in range(nneur)])
plt.plot([(V[:, :k].T @ T1_par).var(1).sum(0) / T1_par.var(1).sum(0)
          for k in range(nneur)])

#%%
mu_u = basis[:2, :].T @ circ1 + np.random.randn(nneur, ndat**2) * 0.1
mu_d_l = 0.6 * basis[2:4, :].T @ circ2 + np.random.randn(nneur, ndat**2) * 0.1

mu_l = basis[4:6, :].T @ circ2 + np.random.randn(nneur, ndat**2) * 0.1
mu_d_u = 0.6 * basis[6:, :].T @ circ1 + np.random.randn(nneur, ndat**2) * 0.1

T1_orth = mu_u + mu_d_l  # upper cued torus
                 channels=1,
                 rate=RATE,
                 input=True,
                 frames_per_buffer=CHUNKSIZE)
model = get_model(MODEL_VERSION)

print("* Start Recording for %d seconds" % RECORD_SECONDS)
stream.start_stream()
frames = []

for i in tqdm(range(0, int(RATE / CHUNKSIZE * RECORD_SECONDS))):
    data = stream.read(CHUNKSIZE, exception_on_overflow=False)
    frames.append(np.fromstring(data, dtype=np.float32))

print("* End Recording")
stream.stop_stream()
stream.close()
pa.terminate()

audio_data = np.hstack(frames)
features = compute_features('test', audio_data, RATE)
if MODEL_VERSION == "cat6":
    features = pca(features)
elif MODEL_VERSION in ["cat8", "cat6_nopca"]:
    features = np.expand_dims(features.transpose(), axis=0)
pred = model.predict(features)
cat = np.argmax(pred)
pred = pred.tolist()[0]
for i in range(len(pred)):
    print('{}: {:04f}'.format(categories(MODEL_VERSION)[i], pred[i]))
print('* Final Prediction: {}'.format(categories(MODEL_VERSION)[cat]))