Ejemplo n.º 1
0
 def port(self,
          arena_size='1024 * 16',
          model_name='model',
          classname='NeuralNetwork',
          classmap=None):
     """
     Port Tf model to plain C++
     :param arena_size: int|str size of tensor arena (read Tf docs)
     :param model_name: str name of the exported model variable
     :param classname: str name of the exported class
     """
     return jinja(
         'ml/classification/tensorflow/NeuralNetwork.jinja', {
             'classname':
             classname,
             'model_name':
             model_name,
             'model_data':
             port(self.sequential, variable_name=model_name,
                  optimize=False),
             'num_inputs':
             self.num_inputs,
             'num_outputs':
             self.num_classes,
             'arena_size':
             arena_size,
             'classmap':
             classmap
         })
Ejemplo n.º 2
0
def main(tfModelName="TeensyModel", modelName=None, debug=False):
    if modelName == None:
        #model = tf.keras.models.load_model('dense2_single_weather_model.h5')
        #model = tf.keras.models.load_model('results/DQNbasic_lr0.0001_LI1_bs256_g0.95_e1_t0.05_network2x256_run11_numGames800_score500.0.h5')
        #model = tf.keras.models.load_model('results/DQNbasic_lr0.0001_LI1_bs256_g0.95_e1_t0.05_network4x16_run23_numGames800_score438.73.h5')
        #model = tf.keras.models.load_model('results/DQNbasic_lr0.0001_LI1_bs256_g0.95_e1_t0.05_network2x32_run18_numGames800_score391.24.h5')

        #model = tf.keras.models.load_model('results/DQNbasic_lr0.0001_LI1_bs256_g0.95_e1_t0.05_network3x64_run20_numGames800_score500.0.h5')
        #model = tf.keras.models.load_model('results/DQNbasic_lr0.0001_LI1_bs256_g0.95_e1_t0.05_network4x32_run22_numGames800_score500.0.h5')
        #model = tf.keras.models.load_model('results/DQNbasic_lr0.0001_LI1_bs256_g0.95_e1_t0.05_network4x64_run21_numGames800_score498.75.h5')

        #model = tf.keras.models.load_model('results/td3/011_Pendulum_lr0.001-0.001_LI1_bs100_g0.99_t0.005_n0.1_network128-128-64-64-32/models/actor.h5')
        #model = tf.keras.models.load_model('results/td3/012_Pendulum_lr0.001-0.001_LI1_bs100_g0.99_t0.005_n0.1_network128-128-64-64-32/models/actor.h5')
        #model = tf.keras.models.load_model('results/td3/013_Pendulum_lr0.001-0.001_LI1_bs100_g0.99_t0.005_n0.1_network128-64-64-32/models/actor.h5')
        #model = tf.keras.models.load_model('results/td3/014_Pendulum_lr0.001-0.001_LI1_bs100_g0.99_t0.005_n0.1_network128-64-64-32/models/actor.h5')
        #model = tf.keras.models.load_model('break_me.h5')
        model = tf.keras.models.load_model(
            'results/td3/022_Pendulum_lr0.001-0.001_LI1_bs100_g0.99_t0.005_n0.05_network128-128-64-64-32/models/actor.h5'
        )
        #model = tf.keras.models.load_model('results/td3/024_Pendulum_lr0.001-0.001_LI1_bs100_g0.99_t0.005_n0.05_network128-128-64-64-32/models/actor.h5')
    else:
        model = tf.keras.models.load_model('results/' + modelName + '.h5')

    print(model.summary())

    with open('TeensyModels/' + tfModelName + '.h',
              'w') as f:  # change path if needed
        text = port(model,
                    optimize=False,
                    pretty_print=True,
                    variable_name='modelParams')
        text = text.replace(
            ' int ',
            ' long ')  # model size may be too big to describe length in int
        text = text.replace(
            'const', '')  # for live updates, this array needs to be modifyable
        f.write(text)
        #f.write(port(model, optimize=False))

    #afile = open('TeensyModels/' + tfModelName + '.h', 'rw')

    # show test outputs to prove teensy model gives similar results
    if debug:
        an_input = [[
            .05,
            .05,
            .05,
        ]]
        npData = np.array(an_input, dtype=np.float32)
        #print("Shape: " + str(npData.shape))
        outputPrediction = model.predict(npData)
        print("Output Prediction: " + str(outputPrediction))
    np.random.seed(1337)
    x_values = np.random.uniform(low=0, high=2 * math.pi, size=SAMPLES)
    # shuffle and add noise
    np.random.shuffle(x_values)
    y_values = np.sin(x_values)
    y_values += 0.1 * np.random.randn(*y_values.shape)

    # split into train, validation, test
    TRAIN_SPLIT = int(0.6 * SAMPLES)
    TEST_SPLIT = int(0.2 * SAMPLES + TRAIN_SPLIT)
    x_train, x_test, x_validate = np.split(x_values, [TRAIN_SPLIT, TEST_SPLIT])
    y_train, y_test, y_validate = np.split(y_values, [TRAIN_SPLIT, TEST_SPLIT])

    # create a NN with 2 layers of 16 neurons
    model = tf.keras.Sequential()
    model.add(layers.Dense(16, activation='relu', input_shape=(1, )))
    model.add(layers.Dense(16, activation='relu'))
    model.add(layers.Dense(1))
    model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
    model.fit(x_train,
              y_train,
              epochs=200,
              batch_size=16,
              validation_data=(x_validate, y_validate))
    return model


model = get_model()
c_code = port(model, pretty_print=True)
print(c_code)
Ejemplo n.º 4
0
    # model.add(layers.MaxPooling2D((2, 2)))
    # model.add(layers.Conv2D(64, (3, 3), activation='relu'))
    model.add(layers.Flatten())
    # model.add(layers.Dense(16, activation='relu'))
    model.add(layers.Dense(len(np.unique(y_train))))

    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])
    model.fit(x_train,
              y_train,
              epochs=50,
              batch_size=16,
              validation_data=(x_validate, y_validate))
    return model, x_test, y_test


def test_model(model, x_test, y_test):
    x_test = (x_test / x_test.max()).reshape((len(x_test), 8, 8, 1))
    y_pred = model.predict(x_test).argmax(axis=1)
    print('ACCURACY', (y_pred == y_test).sum() / len(y_test))
    exit()


if __name__ == '__main__':
    model, x_test, y_test = get_model()
    test_model(model, x_test, y_test)
    c_code = port(model, variable_name='digits_model', pretty_print=True)
    print(c_code)
predictions_categorical = np.argmax(predictions, axis=1)

# display prediction performance on validation data and test data

print('Prediction Accuracy:',
      accuracy_score(target_test, predictions_categorical).round(3))
print('Test accuracy:', round(test_score[1], 3))
print('Test loss:', round(test_score[0], 3))
print('')
print(classification_report(target_test, predictions_categorical))

# convert TF model to TF Lite model as a C header file (for the classifier)

from tinymlgen import port
with open('tf_lite_model.h', 'w') as f:
    f.write(port(model, optimize=False))

# visualize prediction performance

DISPLAY_SKIP = 500

import matplotlib.pyplot as plt

accuracy = history.history['accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
val_accuracy = history.history['val_accuracy']
epochs = np.arange(len(accuracy)) + 1

plt.rcParams['font.size'] = 12
plt.figure(figsize=(14, 8))
Ejemplo n.º 6
0
input_dim = X_train.shape[1:]
output_dim = y.shape[1]

print('input_dim', input_dim)
print('output_dim', output_dim)

# create and train network
# you can customize the layers as you prefer
nn = Sequential()
nn.add(layers.Dense(units=50, activation='relu', input_shape=input_dim))
nn.add(layers.Dense(units=50, activation='relu'))
nn.add(layers.Dense(output_dim, activation='softmax'))

# use categorical_crossentropy for multi-class classification
nn.compile(loss='categorical_crossentropy',
           optimizer='adam',
           metrics=['accuracy'])
nn.fit(X_train,
       y_train,
       validation_data=(X_valid, y_valid),
       epochs=100,
       verbose=0)

print('Accuracy: %.1f' % nn.evaluate(X_test, y_test)[1])

# export to file
with open('wine_nn.h', 'w', encoding='utf-8') as file:
    print(
        port(nn, variable_name='wine_model', pretty_print=True,
             optimize=False))