def custom_fit(model, callbacks, x_train, y_train, x_test, y_test, epochs, batch_size, dir_name, compare_title, draw_step=10, verbose=1): np.random.seed(42) epochs_step = int(epochs / draw_step) if dir_name != None: os.mkdir(dir_name) full_loss_history = np.empty(0) for init_epoch in np.arange(0, epochs, step=epochs_step): save = False if dir_name == None else True if save: save_path = dir_name + "/" + "val_loss.png" else: save_path = None history = model.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=init_epoch + epochs_step, verbose=verbose, callbacks=callbacks, validation_data=(x_test, y_test), initial_epoch=init_epoch) full_loss_history = np.append(full_loss_history, history.history['val_loss']) plt.plot(np.transpose(x_test)[0], y_test, '.') plt.plot(np.transpose(x_test)[0], model.predict(x_test), '.') plt.legend(('function', 'approximation'), loc='upper left', shadow=True) plt.title( compare_title + "\nval_loss = %.4f\nepoch = %d" % (history.history["val_loss"][history.epoch.__len__() - 1], init_epoch + epochs_step)) if dir_name != None: plt.savefig(dir_name + "/" + "%.d_compare_%.4f.png" % (init_epoch + epochs_step, history.history["val_loss"][history.epoch.__len__() - 1]) , dpi=200) plt.show() plt.close() if (history.epoch.__len__() - 1 != epochs_step): epochs=init_epoch+history.epoch.__len__() break gr.plot_graphic(x=np.arange(1, epochs + 1), y=full_loss_history, x_label='epochs', y_label='val_loss', title="val_loss" + ' history', save_path=save_path, save=save, show=True) return model
# <neur_number> neurons with 1024 inputs,initialize - normal distribution model.add( Dense(neur_number, input_dim=in_image_size[0] * in_image_size[1], init='normal', activation='relu')) model.add(Dense(1, init='normal', activation='hard_sigmoid')) model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.0008), metrics=['accuracy']) # batch_size define speed of studying history = model.fit(x_train, y_train, batch_size=1, nb_epoch=5, verbose=1) score = model.evaluate(x_test, y_test, verbose=1) print("accuracy on testing data %.f%%" % (score[1] * 100)) gr.plot_history_separte(history, save_path_acc="ACC.png", save_path_loss="LOSS.png", save=False, show=True) # model.save('CZ_REC_200.h5')
# 3 setting stopper # callbacks.EarlyStopping(monitor='acc', min_delta=0, patience=5, mode='max') callbacks = [EarlyStoppingByLossVal(monitor='val_loss', value=goal_loss, verbose=1)] # 4 model fitting model.compile(optimizer=optimizer, loss='mse', metrics=['mse']) history = model.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=epochs, verbose=verbose, callbacks=callbacks, validation_data=(x_test, y_test)) # Save information about learning and save NN dir_name = "E_" + opt_name + "_" + str(history.epoch.__len__()) + "_" + str(lr) + str() # os.mkdir(dir_name) gr.plot_graphic(x=history.epoch, y=np.array(history.history["val_loss"]), x_label='epochs', y_label='val_loss', title="val_loss" + ' history', save_path=dir_name + "/" + "val_loss.png", save=False, show=True) plt_x_zero = np.empty(0) plt_y_zero = np.empty(0) plt_x_one = np.empty(0) plt_y_one = np.empty(0) plt_x_two = np.empty(0) plt_y_two = np.empty(0) plt_x_three = np.empty(0) plt_y_three = np.empty(0) plt_x_four = np.empty(0) plt_y_four = np.empty(0)
y_train, batch_size=batch_size, epochs=epochs, verbose=verbose) score = model.evaluate(x_test, y_test, verbose=verbose) print("\nabsolute_error on train data\t %.f%%" % (history.history['mean_absolute_error'][epochs - 1] * 100)) print("\nabsolute_error on testing data %.f%%" % (score[1] * 100)) print("loss on train data %.f%%" % (history.history['loss'][epochs - 1] * 100)) gr.plot_graphic(x=history.epoch, y=np.array(history.history['loss']), x_label='epochs', y_label='loss', title='mean_squared_error history', show=True) gr.plot_graphic(x=history.epoch, y=np.array(history.history['mean_absolute_error']), x_label='epochs', y_label='accuracy', title='mean_absolute_error history', show=True) plt.plot(np.append(x_train, x_test), model.predict(np.append(x_train, x_test)), '.') plt.plot(np.append(x_train, x_test), np.append(y_train, y_test), '.') plt.legend(('approximation', 'function'), loc='upper left', shadow=True)
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=verbose) score = model.evaluate(x_test, y_test, verbose=verbose) print("\nabsolute_error on train data\t %.f%%" % (history.history['mean_absolute_error'][epochs - 1] * 100)) print("\nabsolute_error on testing data %.f%%" % (score[1] * 100)) print("loss on train data %.f%%" % (history.history['loss'][epochs - 1] * 100)) gr.plot_history_separte(history=history, acc='mean_absolute_error', save_path_acc="ACC.png", save_path_loss="LOSS.png", save=True, show=True) plt.plot(np.append(x_train, x_test), model.predict(np.append(x_train, x_test)), '.') plt.plot(np.append(x_train, x_test), np.append(y_train, y_test), '.') plt.legend(('approximation', 'function'), loc='upper left', shadow=True) plt.show() plt.close() h = 0.05 count = 0