Beispiel #1
0
def output_testcase(model, test_x_list, test_y_list, name, eps):
    print 'Processing %s' % name
    assert isinstance(test_x_list, list), 'test_x_list must be a list.'
    assert isinstance(test_y_list, list), 'test_y_list must be a list.'

    model.compile(loss='mean_squared_error', optimizer='adamax')
    model.fit(test_x_list, test_y_list, nb_epoch=1, verbose=False)
    predict_y_list = model.predict(test_x_list)
    if not isinstance(predict_y_list, list):
        predict_y_list = [predict_y_list]

    print model.summary()

    export_model(model, 'test_%s.model' % name)

    with open('test_%s.h' % name, 'w') as f:
        predict_x_list = [test_x[0] for test_x in test_x_list]
        x_map = tensor_map_init(predict_x_list, ',\n        ')
        y_map = tensor_map_init(predict_y_list, ',\n        ')
        input_layer_names = ', '.join(
            ["\"%s\"" % layer_name for layer_name in model.input_names])
        output_layer_names = ', '.join(
            ["\"%s\"" % layer_name for layer_name in model.output_names])
        f.write(TEST_CASE % (name, name, input_layer_names, output_layer_names,
                             x_map, y_map, name, eps))
Beispiel #2
0
def combine_modules(inpdim, numPolicies, modelF):
    print('Input Dimension: %d' % inpdim)
    inp = Input((inpdim, ))

    out = []
    for k in range(numPolicies):
        _, getScore, _ = getRankModel(inpdim)
        cfname = modelF.replace(".h5", "_%d.h5" % k)

        # Normalization layers
        getScore.add(Dense(1, input_shape=(1, )))
        getScore.add(Dense(1, input_shape=(1, )))

        print('Loading Model %s' % cfname)
        getScore.load_weights(cfname)
        cout = getScore(inp)
        out.append(cout)

    if numPolicies > 1:
        ensembleOut = keras.layers.average(out)
    else:
        ensembleOut = cout

    ensemble = Model(inputs=inp, outputs=ensembleOut)

    finalfname = modelF.replace(".h5", ".h5")
    jsonfname = finalfname.replace('.h5', '.json')

    # Kerasify to c++
    export_model(ensemble, finalfname)
    return
Beispiel #3
0
def output_testcase(model, test_x, test_y, name, eps):
    print("Processing %s" % name)
    model.compile(loss='mean_squared_error', optimizer='adamax')
    model.fit(test_x, test_y, nb_epoch=1, verbose=False)
    predict_y = model.predict(test_x).astype('f')
    print(model.summary())

    export_model(model, 'test_%s.model' % name)

    with open('test_%s.h' % name, 'w') as f:
        x_shape, x_data = c_array(test_x[0])
        y_shape, y_data = c_array(predict_y[0])

        f.write(TEST_CASE % (name, name, x_shape, x_data, y_shape, y_data, name, eps))
def output_testcase(model, test_x, test_y, name, eps):
    print('Processing %s' % name)
    model.compile(loss='mse', optimizer='adam')
    model.fit(test_x, test_y, epochs=1, verbose=False)
    predict_y = model.predict(test_x).astype('f')
    print(model.summary())

    export_model(model, models_path + '/%s.model' % name)

    with open(src_path + '/%s_test.cpp' % name, 'w') as f:
        x_shape, x_data = c_array(test_x[0])
        y_shape, y_data = c_array(predict_y[0])

        f.write(TEST_CASE %
                (name, x_shape, x_data, y_shape, y_data, name, eps))
Beispiel #5
0
def output_testcase(model, test_x, test_y, name, eps):
    print("Processing %s" % name)
    model.compile(loss='mean_squared_error', optimizer='adamax')
    model.fit(test_x, test_y, nb_epoch=1, verbose=False)
    predict_y = model.predict(test_x).astype('f')
    print(model.summary())

    export_model(model, 'test_%s.model' % name)

    with open('test_%s.h' % name, 'w') as f:
        x_shape, x_data = c_array(test_x[0])
        y_shape, y_data = c_array(predict_y[0])

        f.write(TEST_CASE %
                (name, name, x_shape, x_data, y_shape, y_data, name, eps))
Beispiel #6
0
def output_testcase(model, test_x, test_y, name, eps):
    print(f'Processing {name}')
    model.compile(loss='mse', optimizer='adam')
    model.fit(test_x, test_y, epochs=1, verbose=False)
    predict_y = model.predict(test_x).astype('f')
    print(model.summary())

    path = os.path.abspath(f'models/{name}.model')
    export_model(model, path)

    with open(f'include/test/{name}.h', 'w') as f:
        x_shape, x_data = c_array(test_x[0])
        y_shape, y_data = c_array(predict_y[0])

        f.write(TEST_CASE % dict(name=name, path=path, eps=eps,
                                 x_shape=x_shape,
                                 x_data=x_data,
                                 y_shape=y_shape,
                                 y_data=y_data))
Beispiel #7
0
def main():
    '''
    main routine to train and generate keras classifier model
    '''
    # Model Parameters and Paths
    # NOTE: Uncomment below cell to generate one of the classifier model (hand or pose or face)

    timesteps = 5

    ## POSE - val_acc: 98%
    epochs = 1000
    batch_size = 32
    _dropout = 0.1
    _activation = 'relu'
    _optimizer = 'Adam'
    class_names = ["close_to_camera", "standing", "sitting"]
    X_vector_dim = 36  # number of features or columns (pose)
    samples_path = "../../../train_data/pose/pose_samples_raw.txt"
    labels_path = "../../../train_data/pose/pose_labels_raw.txt"
    model_path = '../../../train_data/pose/pose.model'
    json_model_path = '../../../train_data/pose/pose_model.json'
    model_weights_path = "../../../train_data/pose/pose_model.h5"

    # ## HAND - val_acc: 97%
    # epochs = 1000
    # batch_size = 32
    # _dropout = 0.1
    # _activation='relu'
    # _optimizer='adam'
    # class_names = ["fist","pinch","wave","victory","stop","thumbsup"]
    # X_vector_dim = 40 # number of features or columns (hand)
    # samples_path = "../../../train_data/hand/hand_samples_raw.txt"
    # labels_path = "../../../train_data/hand/hand_labels_raw.txt"
    # model_path = '../../../train_data/hand/hand.model'
    # json_model_path = '../../../train_data/hand/hand_model.json'
    # model_weights_path = "../../../train_data/hand/hand_model.h5"

    # # ## FACE - val_acc: 97%
    # epochs = 1000
    # batch_size = 32
    # _dropout = 0.1
    # _activation='tanh'
    # _optimizer='Adadelta'
    # class_names = ["normal","happy","sad","surprise"]
    # X_vector_dim = 96 # number of features or columns (face)
    # samples_path = "../../../train_data/face/face_samples_raw.txt"
    # labels_path = "../../../train_data/face/face_labels_raw.txt"
    # model_path = '../../../train_data/face/face.model'
    # json_model_path = '../../../train_data/face/face_model.json'
    # model_weights_path = "../../../train_data/face/face_model.h5"

    # Load Keypoints Samples and Labels
    X = np.loadtxt(samples_path, dtype="float")
    y = np.loadtxt(labels_path)

    y_one_hot = convert_y_to_one_hot(y)  # convert to one_hot_encoding vector
    y_vector_dim = y_one_hot.shape[1]  # number of features or columns

    X_vectors_per_sample = timesteps  # number of vectors per sample

    # Keras LSTM model require 3-Dimensional Tensors. Convert samples to 3D tensors
    X_3D = samples_to_3D_array(X_vector_dim, X_vectors_per_sample, X)

    # Perform test-train split
    X_train, X_test, y_train, y_test = train_test_split(X_3D,
                                                        y_one_hot,
                                                        test_size=0.33,
                                                        random_state=42)

    input_shape = (X_train.shape[1], X_train.shape[2])  # store input_shape

    print "Model Parameters:"
    print "input_shape     : ", input_shape
    print "X_vector_dim    : ", X_vector_dim
    print "y_vector_dim    : ", y_vector_dim

    # Build Keras TimeDistributed(Dense) (many-to-many case) LSTM model
    print("Build Keras Timedistributed-LSTM Model...")
    model = Sequential()
    model.add(
        TimeDistributed(Dense(X_vector_dim, activation=_activation),
                        input_shape=input_shape))
    model.add(Dropout(_dropout))
    model.add(TimeDistributed(Dense(X_vector_dim * 2,
                                    activation=_activation)))  #(5, 80)
    model.add(Dropout(_dropout))
    model.add(TimeDistributed(Dense(X_vector_dim,
                                    activation=_activation)))  #(5, 40)
    model.add(Dropout(_dropout))
    model.add(TimeDistributed(Dense(X_vector_dim / 2,
                                    activation=_activation)))  #(5, 20)
    model.add(Dropout(_dropout))
    model.add(TimeDistributed(Dense(X_vector_dim / 4,
                                    activation=_activation)))  #(5, 10)
    model.add(Dropout(_dropout))
    model.add(
        LSTM(X_vector_dim / 4, dropout=_dropout, recurrent_dropout=_dropout))
    model.add(Dense(y_vector_dim, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=_optimizer,
                  metrics=['accuracy'])
    model.summary()

    # Fit model
    print('Training...')
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              validation_data=(X_test, y_test))

    # Evaluate Model and Predict Classes
    print('Testing...')
    score, accuracy = model.evaluate(X_test, y_test, batch_size=batch_size)

    print('Test score: {:.3}'.format(score))
    print('Test accuracy: {:.3}'.format(accuracy))

    # Export model
    export_model(model, model_path)

    # serialize model to JSON
    model_json = model.to_json()
    with open(json_model_path, "w") as json_file:
        json_file.write(json_model_path)
    # serialize weights to HDF5
    model.save_weights(model_weights_path)
    print("Saved model to disk")
Beispiel #8
0
 def kerasify(self, name):
     export_model(self.model, 'yinsh.model')
Beispiel #9
0
    model.compile(loss='categorical_crossentropy', optimizer=_optimizer, metrics=['accuracy'])
    model.summary()

    print('Training...')
    model.fit(X_train, y_train,
              batch_size=batch_size,
              epochs=epochs,
              validation_data=(X_test, y_test))

    print('Testing...')
    score, accuracy = model.evaluate(X_test, y_test,
                                     batch_size=batch_size)

    print('Test score: {:.3}'.format(score))
    print('Test accuracy: {:.3}'.format(accuracy))

    export_model(model,model_path)

    # serialize model to JSON
    model_json = model.to_json()
    with open(json_model_path, "w") as json_file:
        json_file.write(json_model_path)
    # serialize weights to HDF5
    model.save_weights(model_weights_path)
    print("Saved model to disk")


if __name__ == "__main__":
    main()

Beispiel #10
0
 def kerasify(self, name):
     export_model(self.model, 'example.model')
#else:
#    model.add(Dense(predict*NUM_FEATURE))

#api model
inputs1 = Input(shape=(train_X.shape[1], train_X.shape[2]))
lstm = LSTM(NUM_LSTM, return_sequences=True)()
model = Model(inputs=inputs1, outputs=lstm)
model.compile(loss='mae', optimizer='adam')
# fit network
history = model.fit(train_X,
                    train_y,
                    epochs=NUM_EPOCH,
                    batch_size=BATCH_SIZE,
                    validation_data=(test_X, test_y),
                    verbose=2,
                    shuffle=False)
#EXPORT MODEL
export_model(model, exportname)
#SAVE MODEL
#model.save(Net_PATH)
#with open(HISTORY_PATH, 'wb') as handle:
#    pickle.dump(history.history, handle, protocol=pickle.HIGHEST_PROTOCOL)
# read the history
#with open(HISTORY_PATH, 'rb') as handle:
#   b = pickle.load(handle)
# plot history
#pyplot.plot(history.history['loss'], label='train')
#pyplot.plot(history.history['val_loss'], label='test')
#pyplot.legend()
#pyplot.show()
Beispiel #12
0
    model = Sequential()
    model.add(Dense(units=32, input_dim=pos.shape[1], activation='relu'))
	model.add(Dropout(0.1))
    model.add(Dense(units=64, activation="relu"))
	model.add(Dropout(0.1))
    model.add(Dense(units=1, activation='sigmoid'))
    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
    return model 

model = None # Clearing the NN.
model = create_model()

checkpointer = ModelCheckpoint(filepath="sprot_training_57_dropout_32_64_1.hdf5", verbose=1, save_best_only=True)
earlystopper = EarlyStopping(monitor='val_loss', patience=10, verbose=1)

model.fit(X_train, y_train, epochs=1000, batch_size=32000, callbacks=[checkpointer], validation_data=(X_valid, y_valid), verbose=1, shuffle = True)

model = load_model("sprot_training_57_dropout_32_64_1.hdf5")

# eval
tresults = model.evaluate(X_test, y_test)
print tresults
y_pred = model.predict(X_test, batch_size=8192, verbose=1)
y = y_test
print 'Calculating AUC...'
auroc = roc_auc_score(y, y_pred)
auprc = average_precision_score(y, y_pred)
print auroc, auprc

export_model(model, 'sprot_training_57_dropout_32_64_1.kerasify')
Beispiel #13
0
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D

test_x = np.random.rand(100, 10).astype('f')
test_y = np.random.rand(100).astype('f')

input_shape = 10
num_classes = 1

model = Sequential()
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='sigmoid'))

model.compile(loss='mean_squared_error', optimizer='adamax')
model.fit(test_x, test_y, nb_epoch=1, verbose=False)

input = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]])
print(input.shape)
print(input)
print(model.predict(input))

from kerasify import export_model
export_model(model, 'yinsh.model')
from kerasify import export_model
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM

test_x = np.random.rand(10, 10, 10).astype('f')
test_y = np.random.rand(10).astype('f')

model = Sequential()
model.add(LSTM(2, input_shape=(10, 10)))
model.add(Dense(1, input_dim=10))

model.compile(loss='mean_squared_error', optimizer='adamax')
model.fit(test_x, test_y, epochs=3, verbose=2)

print model.predict(np.array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]))
print('1')

export_model(model, 'example.model')
print('2')
Beispiel #15
0
def main():
    args = firstPassCommandLine()
    trainF = args.searchInpTrain
    validF = args.searchInpValid
    weightF = args.searchWeight
    validWeightF = args.searchValidWeight
    testF = args.searchInpTest
    modelF = args.searchF
    nepoch = args.nepoch
    batchSize = args.batchSize

    # Read weight for each data points
    weights = load_weight(weightF)

    # Load data in the svm rank file format
    trainfeats1, trainlabels1 = load_svmlight(trainF + '.1')
    trainfeats2, trainlabels1 = load_svmlight(trainF + '.2')

    #valid_weights = load_weight(validWeightF)
    #validfeats1, validlabels2 = load_svmlight(validF + '.1')
    #validfeats2, validlabels2 = load_svmlight(validF + '.2')

    # Permulation
    idx = np.random.permutation(trainfeats1.shape[0])
    trainfeats1 = trainfeats1[idx, :]
    trainfeats2 = trainfeats2[idx, :]
    weights = weights[idx]

    assert len(weights) == len(trainlabels1), "Length not equal"
    testfeats_exists = False

    if testF != None:
        testfeats1, testlabels1 = load_svmlight(testF + '.1')
        testfeats2, testlabels2 = load_svmlight(testF + '.2')
        testfeats_exists = True
    else:
        testfeats1 = None
        testfeats2 = None
        testlabels = None

    ## convert to dense array
    #trainfeats1 = np.array(trainfeats1.todense())
    #validfeats1 = np.array(validfeats1.todense())
    #trainfeats2 = np.array(trainfeats2.todense())
    #validfeats2 = np.array(validfeats2.todense())
    #
    #if testfeats_exists:
    #    testfeats1 = np.array(testfeats1.todense())
    #    testfeats2 = np.array(testfeats2.todense())

    # Create model
    INPUT_DIM = trainfeats1.shape[1]
    rankModel, getScore, outNet = getRankModel(INPUT_DIM)
    target = np.ones((trainfeats1.shape[0]))
    rankModel.summary()
    numParams = rankModel.count_params()
    numDatapts = trainfeats1.shape[0]
    numPolicies = int(np.round(numDatapts / (numParams * _GROUPS_)))
    numPolicies = -1
    print("###############################")
    print("###############################")
    print("###############################")
    print("###############################")

    if numPolicies <= 0:
        numPolicies = 1
        group = [range(len(trainfeats1))]
    else:
        group = list(
            chunks(range(0, len(trainfeats1)), int(numParams * _GROUPS_)))

    ## Train model.
    allModels = []
    for k, cidx in enumerate(group):
        if k != 0:
            # Create new models
            rankModel, getScore, outNet = getRankModel(INPUT_DIM)

        print('Data size: %d\n' % len(cidx))

        #validation_data=([validfeats1, validfeats2], \
        #                  np.ones((validfeats1.shape[0])), valid_weights), \

        early_stopping = EarlyStopping(monitor='val_acc', patience=50)
        history = rankModel.fit([trainfeats1[cidx, :], trainfeats2[cidx, :]], target[cidx], \
                                sample_weight=weights[cidx],
                                validation_split=0.2,
                                callbacks=[early_stopping],
                                batch_size=batchSize, \
                                epochs=nepoch, verbose=1, \
                                shuffle=True)

        # Add an additional module to normalize the score
        relS = getScore.predict(trainfeats1[cidx, :],
                                batch_size=batchSize,
                                verbose=1)
        irrS = getScore.predict(trainfeats2[cidx, :],
                                batch_size=batchSize,
                                verbose=1)
        allScore = np.hstack((relS.flatten(), irrS.flatten()))
        maxScore = np.max(allScore)
        minScore = np.min(allScore)
        scaleFactor = 1 / (maxScore - minScore)
        print('Max: %f Min: %f' % (maxScore, minScore))
        getScore.add(
            Dense(1,
                  input_shape=(1, ),
                  weights=[np.ones([1, 1]), -minScore * np.ones((1))]))
        getScore.add(
            Dense(1,
                  input_shape=(1, ),
                  weights=[scaleFactor * np.ones([1, 1]),
                           np.zeros((1))]))

        # Save mode
        ## Generate scores from document/query features.
        #relS = getScore.predict(validfeats1, batch_size=batchSize, verbose=1)
        #irrS = getScore.predict(validfeats2, batch_size=batchSize, verbose=1)
        #getRank(relS, irrS, 'valid')

        if testfeats_exists:
            relS = getScore.predict(testfeats1,
                                    batch_size=batchSize,
                                    verbose=1)
            irrS = getScore.predict(testfeats2,
                                    batch_size=batchSize,
                                    verbose=1)
            np.savetxt(modelF[:-3] + "rank1.txt", relS)
            np.savetxt(modelF[:-3] + "rank2.txt", irrS)
            print("Max score difference: " + str(np.max(relS - irrS)))
            print("Min score difference: " + str(np.min(relS - irrS)))
            print("Mean score difference: " + str(np.mean(relS - irrS)))
            print("Score difference std dev: " + str(np.std(relS - irrS)))
            getRank(relS, irrS, 'test')

        # Save mode
        export_model(getScore, modelF)
        getScore.save(modelF.replace(".h5", '_%d.h5' % k))
        del getScore
        del rankModel

        #_, getScore, _= getRankModel(INPUT_DIM)

    #combine_modules(INPUT_DIM, numPolicies, modelF)
    be.clear_session()
    return
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("input", help="input filepath of keras model file")
parser.add_argument("output", help="output filepath of kerasified model file")
args = parser.parse_args()

print args

from keras.models import load_model
print "convert keras model:", args.input
keras_model = load_model(args.input)

import sys
sys.path.insert(0, './lib/kerasify')
from kerasify import export_model

print "to:", args.output
export_model(keras_model, args.output)

print "done!"
Beispiel #17
0
def RunNetwork(epo, dropout, networknum, class_name, Xvector, valpercent,
               timestep, iteration, dataInfo):
    timesteps = timestep
    ## POSE - val_acc: 98%
    epochs = epo
    batch_size = 32
    _dropout = dropout
    _activation = 'relu'
    _optimizer = 'Adam'
    class_names = class_name  # 4 classes
    X_vector_dim = Xvector  # number of features or columns (pose)
    samples_path = "data.txt"  # 311 files with 10 frames' human-pose estimation keypoints(10*18)
    labels_path = "label.txt"  # 311 files' labels, 3 classes in total
    if os.path.isdir(str(iteration)) == True:
        pass
    else:
        os.makedirs(str(iteration))
    model_path = str(iteration) + '/pose.model'
    json_model_path = str(iteration) + '/pose_model.json'
    model_weights_path = str(iteration) + '/pose_model.h5'

    X = np.loadtxt(samples_path, dtype="float")
    y = np.loadtxt(labels_path)

    def samples_to_3D_array(_vector_dim, _vectors_per_sample, _X):
        X_len = len(_X)
        result_array = []
        for sample in range(0, X_len):  # should be the 311 samples?
            sample_array = []
            for vector_idx in range(0, _vectors_per_sample):
                start = vector_idx * _vector_dim
                end = start + _vector_dim
                sample_array.append(_X[sample][start:end])
            result_array.append(sample_array)
        return np.asarray(result_array)

    X_vectors_per_sample = timesteps  # number of vectors per sample , 5 samples
    X_3D = samples_to_3D_array(X_vector_dim, X_vectors_per_sample, X)

    def convert_y_to_one_hot(
        _y
    ):  # one hot encoding simply means : red --> 0 , green --> 1 , blue --> 2
        _y = np.asarray(_y, dtype=int)
        b = np.zeros((_y.size, _y.max() + 1))
        b[np.arange(_y.size), _y] = 1
        return b

    y_one_hot = convert_y_to_one_hot(y)
    y_vector_dim = y_one_hot.shape[1]

    X_train, X_test, y_train, y_test = train_test_split(X_3D,
                                                        y_one_hot,
                                                        test_size=valpercent,
                                                        random_state=42)
    input_shape = (X_train.shape[1], X_train.shape[2])

    model = Sequential()
    model.add(
        TimeDistributed(Dense(X_vector_dim, activation=_activation),
                        input_shape=input_shape))
    model.add(Dropout(_dropout))
    model.add(TimeDistributed(Dense(X_vector_dim * 2,
                                    activation=_activation)))  # (5, 80)
    model.add(Dropout(_dropout))
    model.add(TimeDistributed(Dense(X_vector_dim,
                                    activation=_activation)))  # (5, 40)
    model.add(Dropout(_dropout))
    model.add(
        TimeDistributed(Dense(int(X_vector_dim / 2),
                              activation=_activation)))  # (5, 20)
    model.add(Dropout(_dropout))
    model.add(
        TimeDistributed(Dense(int(X_vector_dim / 4),
                              activation=_activation)))  # (5, 10)
    model.add(Dropout(_dropout))
    model.add(
        LSTM(int(X_vector_dim / 4),
             dropout=_dropout,
             recurrent_dropout=_dropout))
    model.add(Dense(y_vector_dim, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=_optimizer,
                  metrics=['accuracy'])
    NetworkInfo = '*2_*1_/2_/4'

    class TrainingVisualizer(keras.callbacks.History):
        def on_epoch_end(self, epoch, logs={}):
            super(TrainingVisualizer, self).on_epoch_end(epoch, logs)
            IPython.display.clear_output(wait=True)
            # 生成TrainingVisualizer图片
            if epoch == epochs - 1:
                axes = pd.DataFrame(self.history).plot()
                axes.axvline(x=max(
                    (val_acc, i)
                    for i, val_acc in enumerate(self.history['val_acc']))[1])

    print('Training...')
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              validation_data=(X_test, y_test),
              callbacks=[TrainingVisualizer()])

    score, accuracy = model.evaluate(X_test, y_test, batch_size=batch_size)

    doc = open('result.txt', 'a')
    print(iteration, file=doc)
    print('Test score: {:.3}'.format(score), file=doc)
    print('Test accuracy: {:.3}'.format(accuracy), file=doc)
    doc.close()

    y_pred = model.predict(X_test)

    def plot_confusion_matrix(cm,
                              classes,
                              normalize=False,
                              title='Confusion matrix',
                              cmap=plt.cm.Blues):

        if normalize:
            cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
            print("Normalized confusion matrix")
        else:
            print('Confusion matrix, without normalization')

        print(cm)

        plt.imshow(cm, interpolation='nearest', cmap=cmap)
        plt.title(title)
        plt.colorbar()
        tick_marks = np.arange(len(classes))
        plt.xticks(tick_marks, classes, rotation=45)
        plt.yticks(tick_marks, classes)

        fmt = '.2f' if normalize else 'd'
        thresh = cm.max() / 2.
        for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
            plt.text(j,
                     i,
                     format(cm[i, j], fmt),
                     horizontalalignment="center",
                     color="white" if cm[i, j] > thresh else "black")

        plt.tight_layout()
        plt.ylabel('True label')
        plt.xlabel('Predicted label')
        if os.path.isdir("graph/Confusion_Matrix") == True:
            pass
        else:
            os.makedirs("graph/Confusion_Matrix")
        plt.savefig('graph/Confusion_Matrix/CM_' + str(iteration))
        plt.close()

    # Compute confusion matrix
    cnf_matrix = confusion_matrix(np.argmax(y_test, axis=1),
                                  np.argmax(y_pred, axis=1))
    np.set_printoptions(precision=2)
    # Plot non-normalized confusion matrix
    plt.figure()
    plot_confusion_matrix(cnf_matrix,
                          classes=class_names,
                          title='Confusion matrix, without normalization')

    # plt.show()
    if os.path.isdir("graph/TrainingVisualizer") == True:
        pass
    else:
        os.makedirs("graph/TrainingVisualizer")
    plt.savefig('graph/TrainingVisualizer/TV_' + str(iteration))

    export_model(model, model_path)
    print("Model saved to disk")

    model_json = model.to_json()
    with open(json_model_path, "w") as json_file:
        json_file.write(json_model_path)
    # serialize weights to HDF5
    model.save_weights(model_weights_path)
    print("Saved model to disk")

    docdes = open('description.txt', 'a')
    docdes.write(str(iteration) + "\n")
    docdes.write("Data source: " + dataInfo + "\n")
    docdes.write("Network: " + NetworkInfo + "\n")
    docdes.write("epochs: " + str(epo) + "\n")
    docdes.write("dropout: " + str(dropout) + "\n")
    docdes.write("Validation percentage: " + str(valpercent) + "\n")
    docdes.write('\n')
    docdes.close
Beispiel #18
0
#sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
#model.compile(loss='categorical_crossentropy', optimizer=sgd)

model.compile(loss='mse', optimizer='adam', metrics=['mse', 'mae'])

print(np.shape(x_train))
print(np.shape(y_train))

model.fit(x_train, y_train, batch_size=32, epochs=100)
score = model.evaluate(x_train, y_train, batch_size=32)

print(score)

#model export for SuperCollider
from kerasify import export_model
export_model(model, 'DNN1.model')

#model export for javascript
import onnxmltools
#from keras.models import load_model

# Update the input name and path for your Keras model
#input_keras_model = 'model.h5'

# Change this path to the output name and path for the ONNX model
output_onnx_model = '/Users/ioi/Desktop/onnxoutput/modelcheck.onnx'

# Load your Keras model
#keras_model = load_model(input_keras_model)

# Convert the Keras model into ONNX
Beispiel #19
0
for label_num in range(len(LABEL_NAMES)):
    (loaded_labels, loaded_data, loaded_tests_labels, loaded_tests) = load_samples(label_num)
    data_labels = np.append(data_labels, loaded_labels)
    data = np.append(data, loaded_data, axis=0)
    tests_labels = np.append(tests_labels, loaded_tests_labels)
    tests = np.append(tests, loaded_tests, axis=0)

# Try with only the torso keypoints which are 0-7 * (x,y)
data = data[:, 0:16]
tests = tests[:, 0:16]

data = tf.keras.utils.normalize(data, axis=1)
tests = tf.keras.utils.normalize(tests, axis=1)

model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(64, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(64, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(3, activation=tf.nn.softplus))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])

start_time = time.time()
model.fit(data, data_labels, epochs=100, batch_size=2)
val_loss, val_acc = model.evaluate(tests, tests_labels)
elapsed_time = time.time() - start_time
print("Final Loss: {} Final Accuracy:{} Total Time:{}s".format(val_loss, val_acc, elapsed_time))

export_model(model, os.path.join(output_dir, 'raised-hands-upper-body-only.model'))


Beispiel #20
0
def main(args):
    tensorflow_shutup()
    model = load_model(args.model)
    export_model(model, "tmp/" + args.name + ".temp")
    curdir = os.path.dirname(os.path.abspath(__file__))
    dirs = curdir.split("\\")
    curdir = ("/").join(dirs[0:-1])
    with open("source/" + args.name + ".cpp", "w") as f:
        f.write('''// Auto-generated by keraport.py, do not modify
#include <iostream>
#include <fstream>
#include <math.h>
#include <string.h>
#include "mex.hpp"
#include "mexAdapter.hpp"
#include "keras_model.hpp"
#include <memory>

class MexFunction : public matlab::mex::Function {
	matlab::data::ArrayFactory factory;
	std::shared_ptr<matlab::engine::MATLABEngine> matlabPtr = getEngine();
	std::ostringstream stream;
	public: 
	void operator() (matlab::mex::ArgumentList outputs, matlab::mex::ArgumentList inputs) {
		//checkArguments(outputs, inputs);
		bool bypass;
		if (inputs[inputs.size() - 1].getType() == matlab::data::ArrayType::LOGICAL) {
			bypass = inputs[inputs.size() - 1][0];
		} else {
			bypass = false;
		}
		if (bypass == true) {
			mexPrintf("**Ignoring errors for dimensions**");
		}
		// implementation for multiple predictions
		for (int i = 0; i < inputs.size(); i++) {
			if (inputs[i].getType() == matlab::data::ArrayType::DOUBLE) {
				matlab::data::TypedArray<double> doubleArray = std::move(inputs[i]);
				std::vector<float> vectorFloats;
				for (auto& elem : doubleArray) {
					vectorFloats.push_back((float)elem);
				}
				std::vector<float> VecOut = predict(vectorFloats, bypass);
				std::vector<double> doubleVec;
				for (auto elem : VecOut) {
					doubleVec.push_back((double)elem);
				}
				matlab::data::TypedArray<double> doubleOut = factory.createArray({ 1,doubleVec.size() }, doubleVec.begin(), doubleVec.end());
				outputs[i] = std::move(doubleOut);
			}
			else if (bypass==true && i == inputs.size()-1) {
				//do nothing
			}
			else {
				mexError("Input must be a double array");
			}
		}
	}
	void mexPrintf(std::string stream) {
        // Pass stream content to MATLAB fprintf function
        matlabPtr->feval(u"fprintf", 0,
            std::vector<matlab::data::Array>({ factory.createScalar(stream) }));
    }
	void mexError(std::string stream) {
		// Pass stream content to MATLAB fprintf function
		matlabPtr->feval(u"error", 0,
			std::vector<matlab::data::Array>({ factory.createScalar(stream) }));
	}
	std::vector<float> predict(std::vector<float> input, int bypass) {
		KerasModel model;
		model.LoadModel("%s/tmp/%s.temp");
		Tensor in(input.size());
		in.data_ = input;
		Tensor out;
		//test if dimensions match
		//KerasLayer* layer = model.layers_[0];
		std::string str = model.firstlayer_->Check(&in);
		if (str != "true" && bypass == false) {
			mexError(str);
		}
		model.Apply(&in, &out);
		return out.data_;
	}
};''' % (curdir, args.output))
def main():
    args = firstPassCommandLine()
    trainF = args.pruneInpTrain
    validF = args.pruneInpValid
    weightF = args.pruneWeight
    validWeightF = args.pruneValidWeight
    testF = args.pruneInpTest
    modelF = args.pruneF
    nepoch = args.nepoch
    batchSize = args.batchSize

    # Load sample weights
    weights = load_weight(weightF)
    valid_weights = load_weight(validWeightF)

    # Load data in the svm rank file format
    trainfeats, trainlabels = load_svmlight(trainF)
    validfeats, validlabels = load_svmlight(validF)
    testfeats_exists = False

    #weights = weights[:len(trainlabels)] valid_weights = valid_weights[:len(validlabels)]

    if testF != None:
        testfeats, testlabels = load_svmlight(testF)
        testfeats_exists = True
    else:
        testfeats = None
        testlabels = None

    # convert to dense array
    trainfeats = np.array(trainfeats.todense())
    validfeats = np.array(validfeats.todense())
    if testfeats_exists:
        testfeats = np.array(testfeats.todense())

    # concatenate weights to data points for oversampling
    trainfeats_weights = np.hstack(
        (trainfeats, np.reshape(weights, (len(weights), 1))))
    validfeats_weights = np.hstack(
        (validfeats, np.reshape(valid_weights, (len(valid_weights), 1))))

    # Apply the random over-sampling
    ros = RandomOverSampler()
    trainfeats_weights_ros, trainlabels_ros = ros.fit_sample(
        trainfeats_weights, trainlabels)
    validfeats_weights_ros, validlabels_ros = ros.fit_sample(
        validfeats_weights, validlabels)
    trainfeats_ros = trainfeats_weights_ros[:, :-1]
    trainweights_ros = trainfeats_weights_ros[:, -1]
    validfeats_ros = validfeats_weights_ros[:, :-1]
    validweights_ros = validfeats_weights_ros[:, -1]

    if testfeats_exists:
        testfeats_ros, testlabels_ros = ros.fit_sample(testfeats, testlabels)

    # Reset labels (not needed anymore since for neural nets, we use labels of 0 and 1 in trj files)
    #trainlabels[np.where(trainlabels==-1)[0]] = 0
    #validlabels[np.where(validlabels==-1)[0]] = 0
    #if testfeats != None:
    #  testlabels[np.where(testlabels==-1)[0]] = 0

    # Create model
    INPUT_DIM = trainfeats.shape[1]
    pruneModel = getPruneModel(INPUT_DIM)
    pruneModel.summary()

    ## Train model.
    early_stopping = EarlyStopping(monitor='val_loss', patience=20)
    history = pruneModel.fit(trainfeats_ros, trainlabels_ros,
                             batch_size=batchSize, \
                             sample_weight=trainweights_ros,
                             epochs=nepoch, verbose=1,
                             validation_data=(validfeats_ros, validlabels_ros, validweights_ros),
                             callbacks=[early_stopping],
                             shuffle=True)
    # Evaluate on training
    train_preds = pruneModel.predict_classes(trainfeats)
    train_eval_ = pruneModel.evaluate(trainfeats, trainlabels, verbose=1)
    print("[Loss, accuracy] = " + str(train_eval_))
    print("Precision = " + str(precision_score(trainlabels, train_preds)))
    print("Recall = " + str(recall_score(trainlabels, train_preds)))

    train_preds = pruneModel.predict_classes(trainfeats)
    train_eval_ = pruneModel.evaluate(trainfeats, trainlabels, verbose=1)
    print("[Loss, accuracy] (randomly oversampled) = " + str(train_eval_))
    print("Precision (randomly oversampled) = " +
          str(precision_score(trainlabels, train_preds)))
    print("Recall (randomly oversampled) = " +
          str(recall_score(trainlabels, train_preds)))

    # Evaluate on test
    eval_ = pruneModel.evaluate(validfeats, validlabels, verbose=1)

    if testfeats_exists:
        eval_ = pruneModel.evaluate(testfeats, testlabels, verbose=1)
        print("[Test loss, test accuracy] = " + str(eval_))
        eval_ = pruneModel.evaluate(testfeats, testlabels, verbose=1)
        print("[Test loss, test accuracy] (randomly oversampled) = " +
              str(eval_))

    # Save mode
    print('Creating: ' + modelF)
    export_model(pruneModel, modelF)
    pruneModel.save(modelF[:-3] + "_keras.h5")
    del pruneModel
    be.clear_session()
    return