Ejemplo n.º 1
0
def train_initial(name, n_seq, n_labels, n_dimension, n_hidden_1, n_hidden_2, epochs, save):
    usage_ratio = 1
    # epochs = 150

    print '========================= Reading ========================='
    X_train, y_train, X_test, y_test = test.read_data(name=name, n_seq=n_seq, n_labels=n_labels, n_dimension=n_dimension)
    data = (X_train, y_train, X_test, y_test)

    print '========================= Modeling ========================='
    model = lstm.build_model(n_dimension=n_dimension, n_labels=n_labels, n_seq=n_seq, n_hidden_1=n_hidden_1, n_hidden_2=n_hidden_2)

    print '========================= Training =========================='
    model = lstm.run_network(model=model, data=data, epochs=epochs, usage_ratio=usage_ratio, save=True, save_name=name)

    print '========================= Testing =========================='
    test.test_all_metrics(model, data=data, usage_ratio=usage_ratio)
Ejemplo n.º 2
0
def get_image_for_frame(i):

    # r=np.zeros((height,width))
    # r[:,i]=1.0
    # r[:, -i] = 0.5
    # return r
    files = test.list_files()  #list all .aps files in sequential order
    r = files[
        0]  #0 stands for subject 0, all 16 images for this subject are included
    header = test.read_header(r)  #extracting the header dictionary
    r = test.read_data(
        r)  #reading the binary .aps file & extracting image data
    r = r.transpose()  #so we can index each of the 16 images
    r = r[i]  #images 1-16
    print(r.max())
    print(r.min())
    print(r.mean())
    r = r.astype(np.double)
    print(r.shape)
    r = r[::-1, ]
    #r=(test.get_single_image(files[0],0))[i].flatten()
    #r=r.astype(np.double)
    return r / r.max()
Ejemplo n.º 3
0
from sklearn import svm, datasets
import numpy as np
from test import read_data
import pylab as pl

filename = 'svm_train_2.svm'
points, labels = read_data(filename)
for i in range(labels.shape[0]):
    if labels[i] == -1:
        labels[i] = 0
iris = datasets.load_iris()
X = iris.data[:,:2]
y = iris.target
clf = svm.SVC(kernel = 'linear')
clf.fit(X,y)
# create mesh
h = 0.02
xMin, xMax = X[:,0].min()-1, X[:,0].max()+1
yMin, yMax = X[:,1].min()-1, X[:,1].max()+1

xx, yy = np.meshgrid(np.arange(xMin, xMax, h),
                     np.arange(yMin, yMax, h))
Z = clf.predict(np.c_[xx.ravel(),yy.ravel()])
Z = Z.reshape(xx.shape)
pl.contourf(xx,yy,Z,cmap=pl.cm.Paired)
pl.axis('off')
pl.scatter(X[:,0],X[:,1], c = y, cmap=pl.cm.Paired)
pl.title(filename)
pl.show()
Ejemplo n.º 4
0
from sklearn import svm, datasets
import numpy as np
from test import read_data
import pylab as pl

filename = 'svm_train_2.svm'
points, labels = read_data(filename)
for i in range(labels.shape[0]):
    if labels[i] == -1:
        labels[i] = 0
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
# create mesh
h = 0.02
xMin, xMax = X[:, 0].min() - 1, X[:, 0].max() + 1
yMin, yMax = X[:, 1].min() - 1, X[:, 1].max() + 1

xx, yy = np.meshgrid(np.arange(xMin, xMax, h), np.arange(yMin, yMax, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
pl.contourf(xx, yy, Z, cmap=pl.cm.Paired)
pl.axis('off')
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=pl.cm.Paired)
pl.title(filename)
pl.show()
Ejemplo n.º 5
0
def run_network(save_name, model=None, data=None, epochs=100, usage_ratio=1, save=False):
    epochs = epochs
    batch_size = 128
    start_time = time.time()
    print 'Running LSTM, start time: ', start_time

    if data is None:
        # X_train, y_train, X_test, y_test = divide_data(usage_ratio=usage_ratio)
        X_train, y_train, X_test, y_test = read_data(train_name='train_2.csv', test_name='test_2.csv', usage_ratio=usage_ratio)
    else:
        X_train, y_train, X_test, y_test = data

    print 'Modeling with ', X_train.shape[0], ' training samples & ', \
        X_test.shape[0], ' testing samples..'

    if model is None:
        model = build_model()

    # earlyStopping=EarlyStopping(monitor='val_loss',
                                # patience=5,
                                # verbose=1,
                                # mode='auto')

    print "Training Mode: TENSORBOARD ON.."
    try:
        model.fit(
            X_train, y_train,
            batch_size=batch_size,
            nb_epoch=epochs,
            validation_data=(X_test, y_test),
            # using TB to visualize process.
            callbacks=[TensorBoard(log_dir='../logs')]
        )
    except KeyboardInterrupt:
        print 'Training over, total executing time: ', time.time() - start_time

    print 'Training over, total executing time: ', time.time() - start_time
    # y_train_pred = model.predict(X_train)
    # y_test_pred = model.predict(X_test)

    if save:
        if raw_input('Save the model? (y/n): ') == 'y':
            json_string = model.to_json()

            filename1 = 'lstm_architecture'
            filename2 = 'lstm_weights'

            cnt = 1
            model_path = '../models/'
            for root, dirs, files in os.walk(model_path):
                for filename in files:
                    if 'lstm_architecture' in filename:
                        cnt += 1
            filename1 = model_path + filename1 + '_' + save_name + '.json'
            filename2 = model_path + filename2 + '_' + save_name + '.h5'

            print 'Saving LSTM model into .json & .h5 at:', filename1
            open(filename1, 'w').write(json_string)
            model.save_weights(filename2)

    acc_train = model.evaluate(X_train, y_train, batch_size=128)
    acc_test = model.evaluate(X_test, y_test, batch_size=128)
    print '\ntraining accuracy: ', acc_train
    print 'testing accuracy: ', acc_test
    print '\nTraining down.'

    return model