Esempio n. 1
0
def accuracy(y_true, y_pred):
    '''
  Temporary metrics to overcome "from_categorical" missing in standard metrics
  '''
    truth = from_categorical(y_true)
    predicted = from_categorical(y_pred)
    return mean_accuracy_score(truth, predicted)
Esempio n. 2
0
    def test_mean_accuracy_score(self, size):
        y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))
        y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))

        metric = tf.keras.metrics.Accuracy()

        res_py = mean_accuracy_score(y_true, y_pred)

        metric.update_state(y_true, y_pred)
        res_tf = metric.result().numpy()

        np.testing.assert_allclose(res_tf, res_py, atol=1e-8, rtol=1e-5)
Esempio n. 3
0
    # model.add(Cost_layer(cost_type=cost_type.mse))

    # model.compile(optimizer=SGD(lr=0.01, decay=0., lr_min=0., lr_max=np.inf))
    model.compile(optimizer=Adam(), metrics=[accuracy])

    print('*************************************')
    print('\n Total input dimension: {}'.format(X_train.shape), '\n')
    print('**************MODEL SUMMARY***********')

    model.summary()

    print('\n***********START TRAINING***********\n')

    # Fit the model on the training set
    model.fit(X=X_train, y=y_train, max_iter=10, verbose=True)

    print('\n***********START TESTING**************\n')

    # Test the prediction with timing
    loss, out = model.evaluate(X=X_test, truth=y_test, verbose=True)

    truth = from_categorical(y_test)
    predicted = from_categorical(out)
    accuracy = mean_accuracy_score(truth, predicted)

    print('\nLoss Score: {:.3f}'.format(loss))
    print('Accuracy Score: {:.3f}'.format(accuracy))
    # SGD : best score I could obtain was 94% with 10 epochs, lr = 0.01 %
    # Momentum : best score I could obtain was 93% with 10 epochs
    # Adam : best score I could obtain was 95% with 10 epochs