Beispiel #1
0
            # <neur_number> neurons with 1024 inputs,initialize - normal distribution
            model.add(
                Dense(neur_number,
                      input_dim=in_image_size[0] * in_image_size[1],
                      init='normal',
                      activation='relu'))
            model.add(Dense(1, init='normal', activation='hard_sigmoid'))

            model.compile(loss='binary_crossentropy',
                          optimizer=SGD(lr=0.0008),
                          metrics=['accuracy'])

            # batch_size define speed of studying
            history = model.fit(x_train,
                                y_train,
                                batch_size=1,
                                nb_epoch=5,
                                verbose=1)

            score = model.evaluate(x_test, y_test, verbose=1)
            print("accuracy on testing data %.f%%" % (score[1] * 100))

            gr.plot_history_separte(history,
                                    save_path_acc="ACC.png",
                                    save_path_loss="LOSS.png",
                                    save=False,
                                    show=True)

            # model.save('CZ_REC_200.h5')
Beispiel #2
0
    history = model.fit(x_train,
                        y_train,
                        batch_size=batch_size,
                        epochs=epochs,
                        verbose=verbose)

    score = model.evaluate(x_test, y_test, verbose=verbose)

    print("\nabsolute_error on train data\t %.f%%" %
          (history.history['mean_absolute_error'][epochs - 1] * 100))
    print("\nabsolute_error on testing data %.f%%" % (score[1] * 100))
    print("loss on train data %.f%%" %
          (history.history['loss'][epochs - 1] * 100))
    gr.plot_history_separte(history=history,
                            acc='mean_absolute_error',
                            save_path_acc="ACC.png",
                            save_path_loss="LOSS.png",
                            save=True,
                            show=True)

    plt.plot(np.append(x_train, x_test),
             model.predict(np.append(x_train, x_test)), '.')
    plt.plot(np.append(x_train, x_test), np.append(y_train, y_test), '.')

    plt.legend(('approximation', 'function'), loc='upper left', shadow=True)

    plt.show()
    plt.close()

    h = 0.05
    count = 0