def ident_plant(_input, _output, expe_name, epochs=50, force_train=False, display_training_history=False): filename = "/tmp/plant_id__planar_mip.h5" if force_train or not os.path.isfile(filename): plant_i = keras.layers.Input((5,), name ="plant_i") # x1_k, x2_k, x3_k, x4_k, u_k if 1: plant_l = keras.layers.Dense(4, activation='linear', kernel_initializer='uniform', use_bias=False, name="plant") plant_o = plant_l(plant_i) else: plant_l1 = keras.layers.Dense(8, activation='relu', kernel_initializer='uniform', use_bias=True, name="plant1") plant_l2 = keras.layers.Dense(12, activation='relu', kernel_initializer='uniform', use_bias=True, name="plant2") plant_l3 = keras.layers.Dense(4, activation='linear', kernel_initializer='uniform', use_bias=True, name="plant3") plant_o = plant_l3(plant_l2(plant_l1(plant_i))) plant_ann = keras.models.Model(inputs=plant_i, outputs=plant_o) plant_ann.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy']) early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss', patience=8) history = plant_ann.fit(_input, _output, epochs=epochs, batch_size=32, verbose=1, shuffle=True, validation_split=0.1, callbacks=[early_stopping]) if display_training_history: margins = (0.04, 0.07, 0.98, 0.93, 0.27, 0.2) figure = plu.prepare_fig(figsize=(20.48, 7.68), margins=margins) ax = plt.subplot(1,2,1); plu.decorate(ax, title='loss'); plt.plot(history.history['loss']) ax = plt.subplot(1,2,2); plu.decorate(ax, title='accuracy'); plt.plot(history.history['acc']) #plu.save_if('../docs/plots/plant_id__mip_simple__{}_training_history.png'.format(expe_name)) plant_ann.save(filename) else: plant_ann = keras.models.load_model(filename) return plant_ann
def plot_training_dataset(_ann_in, _ann_out): fig = pu.prepare_fig(window_title='Training dataset') names = '$y_k$', '$y_{k-1}$', '$u_{k-1}$', '$u_k$', '$y_{k+1}$' for i in range(4): ax = plt.subplot(1, 5, i + 1) plt.hist(_ann_in[:, i]) pu.decorate(ax, title=names[i]) ax = plt.subplot(1, 5, 5) plt.hist(_ann_out) pu.decorate(ax, title=names[4])
def plot_dataset(time, X, U, exp_name): margins = (0.04, 0.07, 0.98, 0.93, 0.27, 0.2) figure = plu.prepare_fig(figsize=(20.48, 7.68), margins=margins) plots = [('$x$', 'm', X[:, 0]), ('$\\theta$', 'deg', np.rad2deg(X[:, 1])), ('$\dot{x}$', 'm/s', X[:, 2]), ('$\dot{\\theta}$', 'deg/s', np.rad2deg(X[:, 3])), ('$\\tau$', 'N', U[:, 0])] for i, (_ti, _un, _d) in enumerate(plots): ax = plt.subplot(1, 5, i + 1) plt.hist(_d, bins=100) plu.decorate(ax, title=_ti, xlab=_un)
def simulate_plant_and_ann(plant, ann_plant): time = np.arange(0., 25.05, plant.dt) sp, yp, ya = scipy.signal.square(0.25 * time), np.zeros( len(time)), np.zeros(len(time)) for k in range(1, len(time) - 1): yp[k + 1] = plant.io_dyn(yp[k], yp[k - 1], sp[k], sp[k - 1]) ya[k + 1] = ann_plant.predict(ya[k], ya[k - 1], sp[k - 1], sp[k]) fig = pu.prepare_fig(window_title='Time simulation') ax = plt.gca() plt.plot(time, sp, label='sp') plt.plot(time, yp, label='plant') plt.plot(time, ya, label='ann') pu.decorate(ax, title='$y$', xlab='time in s', ylab='m', legend=True) plt.savefig(ut.cs_asset('docs/plots/so_lti__ident__io__keras.png'))
def plot_training(ann): fig = pu.prepare_fig(window_title='Training history') _h = ann.history.history plt.plot(_h['loss'], label='loss') plt.plot(_h['val_loss'], label='val_loss') pu.decorate(plt.gca(), 'loss', xlab='epochs', legend=True)