Esempio n. 1
0
    def test_fit_generator_method(self):
        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.compile(loss='mse',
                      optimizer=rmsprop.RMSprop(1e-3),
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()])

        model.fit_generator(custom_generator_threads(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            workers=4,
                            use_multiprocessing=True)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False,
                            validation_data=custom_generator(),
                            validation_steps=10)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            validation_data=custom_generator(),
                            validation_steps=1,
                            workers=0)
Esempio n. 2
0
    def test_generator_methods(self):
        model = testing_utils.get_small_mlp(10, 4, 3)
        optimizer = rmsprop.RMSprop(learning_rate=0.001)
        model.compile(optimizer,
                      loss='mse',
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()],
                      run_eagerly=True)

        x = np.random.random((10, 3))
        y = np.random.random((10, 4))

        def numpy_iterator():
            while True:
                yield x, y

        model.fit_generator(numpy_iterator(), steps_per_epoch=3, epochs=1)
        model.evaluate_generator(numpy_iterator(), steps=3)

        def inference_numpy_iterator():
            while True:
                yield x

        out = model.predict_generator(inference_numpy_iterator(), steps=3)
        self.assertEqual(out.shape, (30, 4))
Esempio n. 3
0
    def test_evaluate_generator_method(self):
        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.compile(loss='mse',
                      optimizer=rmsprop.RMSprop(1e-3),
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()],
                      run_eagerly=testing_utils.should_run_eagerly())

        model.evaluate_generator(custom_generator_threads(),
                                 steps=5,
                                 max_queue_size=10,
                                 workers=2,
                                 verbose=1,
                                 use_multiprocessing=True)
        model.evaluate_generator(custom_generator(),
                                 steps=5,
                                 max_queue_size=10,
                                 use_multiprocessing=False)
        model.evaluate_generator(custom_generator(),
                                 steps=5,
                                 max_queue_size=10,
                                 use_multiprocessing=False,
                                 workers=0)
Esempio n. 4
0
    def DISABLED_test_function_model_feature_layer_input(self):
        col_a = tf.feature_column.numeric_column("a")
        col_b = tf.feature_column.numeric_column("b")

        feature_layer = df.DenseFeatures([col_a, col_b], name="fc")
        dense = keras.layers.Dense(4)

        # This seems problematic.... We probably need something for DenseFeatures
        # the way Input is for InputLayer.
        output = dense(feature_layer)

        model = keras.models.Model([feature_layer], [output])

        optimizer = "rmsprop"
        loss = "mse"
        loss_weights = [1.0, 0.5]
        model.compile(
            optimizer,
            loss,
            metrics=[metrics_module.CategoricalAccuracy(), "mae"],
            loss_weights=loss_weights,
        )

        data = ({"a": np.arange(10), "b": np.arange(10)}, np.arange(10, 20))
        model.fit(*data, epochs=1)
Esempio n. 5
0
    def test_generator_methods_with_sample_weights(self):
        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.compile(loss='mse',
                      optimizer=rmsprop.RMSprop(1e-3),
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()],
                      run_eagerly=testing_utils.should_run_eagerly())

        model.fit_generator(custom_generator(mode=3),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False)
        model.fit_generator(custom_generator(mode=3),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False,
                            validation_data=custom_generator(mode=3),
                            validation_steps=10)
        model.predict_generator(custom_generator(mode=3),
                                steps=5,
                                max_queue_size=10,
                                use_multiprocessing=False)
        model.evaluate_generator(custom_generator(mode=3),
                                 steps=5,
                                 max_queue_size=10,
                                 use_multiprocessing=False)
Esempio n. 6
0
    def test_generator_input_to_fit_eval_predict(self):
        val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)

        def ones_generator():
            while True:
                yield np.ones([10, 10], np.float32), np.ones([10, 1],
                                                             np.float32)

        model = testing_utils.get_small_mlp(num_hidden=10,
                                            num_classes=1,
                                            input_dim=10)

        model.compile(rmsprop.RMSprop(0.001),
                      'binary_crossentropy',
                      run_eagerly=testing_utils.should_run_eagerly())
        model.fit(ones_generator(),
                  steps_per_epoch=2,
                  validation_data=val_data,
                  epochs=2)
        model.evaluate(ones_generator(), steps=2)
        model.predict(ones_generator(), steps=2)

        # Test with a changing batch size
        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.compile(loss='mse',
                      optimizer=rmsprop.RMSprop(1e-3),
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()])
        model.fit_generator(custom_generator_changing_batch_size(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False)
        model.fit_generator(
            custom_generator_changing_batch_size(),
            steps_per_epoch=5,
            epochs=1,
            verbose=1,
            max_queue_size=10,
            use_multiprocessing=False,
            validation_data=custom_generator_changing_batch_size(),
            validation_steps=10)

        model.fit(custom_generator_changing_batch_size(),
                  steps_per_epoch=5,
                  validation_data=custom_generator_changing_batch_size(),
                  validation_steps=10,
                  epochs=2)
        model.evaluate(custom_generator_changing_batch_size(), steps=5)
        model.predict(custom_generator_changing_batch_size(), steps=5)
Esempio n. 7
0
def initializeNN():

    from keras.models import Sequential
    from keras.layers import Dense
    from keras.layers import LeakyReLU
    from keras.layers import Dropout
    from keras import regularizers
    from keras import metrics
    #import tensorflow_addons as tfa

    ### Define metrics
    metrics = [
        metrics.CategoricalAccuracy(name="accuracy"),
        metrics.FalseNegatives(name="fn"),
        metrics.FalsePositives(name="fp"),
        metrics.TrueNegatives(name="tn"),
        metrics.TruePositives(name="tp"),
        metrics.Precision(name="precision"),
        metrics.Recall(name="recall"),
        metrics.AUC(name='auc')  #,
        #tfa.metrics.CohenKappa(name='kappa')
    ]

    # define the keras model
    nn = Sequential()
    nn.add(Dense(256, input_dim=102,
                 kernel_regularizer='l1'))  #, activation='relu'))
    nn.add(LeakyReLU(alpha=0.1))
    nn.add(Dropout(0.1))

    nn.add(Dense(128))  #, activation='relu'))#,kernel_regularizer='l1'))
    nn.add(LeakyReLU(alpha=0.1))
    nn.add(Dropout(0.1))

    nn.add(Dense(64))  #, activation='relu'))#,kernel_regularizer='l1'))
    nn.add(LeakyReLU(alpha=0.1))
    nn.add(Dropout(0.1))

    nn.add(Dense(64))  #, activation='relu'))#,kernel_regularizer='l1'))
    nn.add(LeakyReLU(alpha=0.1))
    nn.add(Dropout(0.1))

    nn.add(Dense(31, activation='softmax'))

    nn.compile(loss='categorical_crossentropy',
               optimizer='Adamax',
               metrics=metrics)

    return nn
Esempio n. 8
0
    def test_dataset_with_sample_weights(self):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        optimizer = 'rmsprop'
        loss = 'mse'
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      run_eagerly=testing_utils.should_run_eagerly())

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        sample_weights = np.ones((10), np.float32)
        dataset = tf.data.Dataset.from_tensor_slices(
            (inputs, targets, sample_weights))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)

        model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
        model.evaluate(dataset, steps=2, verbose=1)
        model.predict(dataset, steps=2)
Esempio n. 9
0
    def test_categorical_accuracy(self):
        acc_obj = metrics.CategoricalAccuracy(name='my_acc')

        # check config
        assert acc_obj.name == 'my_acc'
        assert acc_obj.stateful
        assert len(acc_obj.weights) == 2
        assert acc_obj.dtype == 'float32'

        # verify that correct value is returned
        result_t = acc_obj([[0, 0, 1], [0, 1, 0]],
                           [[0.1, 0.1, 0.8], [0.05, 0.95, 0]])
        result = K.eval(result_t)
        assert result == 1  # 2/2

        # check with sample_weight
        result_t = acc_obj([[0, 0, 1], [0, 1, 0]],
                           [[0.1, 0.1, 0.8], [0.05, 0, 0.95]],
                           [[0.5], [0.2]])
        result = K.eval(result_t)
        assert np.isclose(result, 2.5 / 2.7, atol=1e-3)  # 2.5/2.7
Esempio n. 10
0
    def test_model_methods_with_eager_tensors_single_io(self):
        if not tf.executing_eagerly():
            # Only test V2 Function and V2 Eager modes, as V1 Graph mode with
            # symbolic tensors has different requirements.
            return

        model = test_utils.get_small_mlp(10, 4, 3)

        optimizer = rmsprop.RMSprop(learning_rate=0.001)
        loss = "mse"
        metrics = ["mae", metrics_module.CategoricalAccuracy()]
        model.compile(
            optimizer,
            loss,
            metrics=metrics,
            run_eagerly=test_utils.should_run_eagerly(),
        )

        inputs = tf.zeros(shape=(10, 3))
        targets = tf.zeros(shape=(10, 4))

        model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0)
        model.fit(inputs,
                  targets,
                  epochs=1,
                  batch_size=3,
                  verbose=0,
                  shuffle=False)
        model.fit(
            inputs,
            targets,
            epochs=1,
            batch_size=4,
            verbose=0,
            validation_data=(inputs, targets),
        )
        model.evaluate(inputs, targets, batch_size=2, verbose=0)
        model.predict(inputs, batch_size=2)
        model.train_on_batch(inputs, targets)
        model.test_on_batch(inputs, targets)
Esempio n. 11
0
    def DISABLED_test_function_model_multiple_feature_layer_inputs(self):
        col_a = tf.feature_column.numeric_column("a")
        col_b = tf.feature_column.numeric_column("b")
        col_c = tf.feature_column.numeric_column("c")

        fc1 = df.DenseFeatures([col_a, col_b], name="fc1")
        fc2 = df.DenseFeatures([col_b, col_c], name="fc2")
        dense = keras.layers.Dense(4)

        # This seems problematic.... We probably need something for DenseFeatures
        # the way Input is for InputLayer.
        output = dense(fc1) + dense(fc2)

        model = keras.models.Model([fc1, fc2], [output])

        optimizer = "rmsprop"
        loss = "mse"
        loss_weights = [1.0, 0.5]
        model.compile(
            optimizer,
            loss,
            metrics=[metrics_module.CategoricalAccuracy(), "mae"],
            loss_weights=loss_weights,
        )

        data_list = (
            [
                {
                    "a": np.arange(10),
                    "b": np.arange(10)
                },
                {
                    "b": np.arange(10),
                    "c": np.arange(10)
                },
            ],
            np.arange(10, 100),
        )
        model.fit(*data_list, epochs=1)

        data_bloated_list = (
            [
                {
                    "a": np.arange(10),
                    "b": np.arange(10),
                    "c": np.arange(10)
                },
                {
                    "a": np.arange(10),
                    "b": np.arange(10),
                    "c": np.arange(10)
                },
            ],
            np.arange(10, 100),
        )
        model.fit(*data_bloated_list, epochs=1)

        data_dict = (
            {
                "fc1": {
                    "a": np.arange(10),
                    "b": np.arange(10)
                },
                "fc2": {
                    "b": np.arange(10),
                    "c": np.arange(10)
                },
            },
            np.arange(10, 100),
        )
        model.fit(*data_dict, epochs=1)

        data_bloated_dict = (
            {
                "fc1": {
                    "a": np.arange(10),
                    "b": np.arange(10),
                    "c": np.arange(10),
                },
                "fc2": {
                    "a": np.arange(10),
                    "b": np.arange(10),
                    "c": np.arange(10),
                },
            },
            np.arange(10, 100),
        )
        model.fit(*data_bloated_dict, epochs=1)
Esempio n. 12
0
    def DISABLED_test_function_model_multiple_feature_layer_inputs(self):
        col_a = tf.feature_column.numeric_column('a')
        col_b = tf.feature_column.numeric_column('b')
        col_c = tf.feature_column.numeric_column('c')

        fc1 = df.DenseFeatures([col_a, col_b], name='fc1')
        fc2 = df.DenseFeatures([col_b, col_c], name='fc2')
        dense = keras.layers.Dense(4)

        # This seems problematic.... We probably need something for DenseFeatures
        # the way Input is for InputLayer.
        output = dense(fc1) + dense(fc2)

        model = keras.models.Model([fc1, fc2], [output])

        optimizer = 'rmsprop'
        loss = 'mse'
        loss_weights = [1., 0.5]
        model.compile(optimizer,
                      loss,
                      metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
                      loss_weights=loss_weights)

        data_list = ([{
            'a': np.arange(10),
            'b': np.arange(10)
        }, {
            'b': np.arange(10),
            'c': np.arange(10)
        }], np.arange(10, 100))
        model.fit(*data_list, epochs=1)

        data_bloated_list = ([{
            'a': np.arange(10),
            'b': np.arange(10),
            'c': np.arange(10)
        }, {
            'a': np.arange(10),
            'b': np.arange(10),
            'c': np.arange(10)
        }], np.arange(10, 100))
        model.fit(*data_bloated_list, epochs=1)

        data_dict = ({
            'fc1': {
                'a': np.arange(10),
                'b': np.arange(10)
            },
            'fc2': {
                'b': np.arange(10),
                'c': np.arange(10)
            }
        }, np.arange(10, 100))
        model.fit(*data_dict, epochs=1)

        data_bloated_dict = ({
            'fc1': {
                'a': np.arange(10),
                'b': np.arange(10),
                'c': np.arange(10)
            },
            'fc2': {
                'a': np.arange(10),
                'b': np.arange(10),
                'c': np.arange(10)
            }
        }, np.arange(10, 100))
        model.fit(*data_bloated_dict, epochs=1)
Esempio n. 13
0
    def test_model_methods_with_eager_tensors_multi_io(self):
        if not tf.executing_eagerly():
            # Only test V2 Function and V2 Eager modes, as V1 Graph mode with
            # symbolic tensors has different requirements.
            return

        input_a = keras.layers.Input(shape=(3, ), name='input_a')
        input_b = keras.layers.Input(shape=(3, ), name='input_b')

        dense = keras.layers.Dense(4, name='dense')
        dropout = keras.layers.Dropout(0.5, name='dropout')

        model = testing_utils.get_multi_io_model([input_a, dense],
                                                 [input_b, dense, dropout])

        optimizer = rmsprop.RMSprop(learning_rate=0.001)
        loss = 'mse'
        loss_weights = [1., 0.5]
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      loss_weights=loss_weights,
                      run_eagerly=testing_utils.should_run_eagerly(),
                      sample_weight_mode=None)

        input_a = tf.zeros(shape=(10, 3))
        input_b = tf.zeros(shape=(10, 3))
        target_a = tf.zeros(shape=(10, 4))
        target_b = tf.zeros(shape=(10, 4))

        model.fit([input_a, input_b], [target_a, target_b],
                  epochs=1,
                  batch_size=5,
                  verbose=0)
        # Test: no shuffle.
        model.fit([input_a, input_b], [target_a, target_b],
                  epochs=1,
                  batch_size=5,
                  verbose=0,
                  shuffle=False)
        # Test: validation data.
        model.fit([input_a, input_b], [target_a, target_b],
                  epochs=1,
                  batch_size=2,
                  verbose=0,
                  validation_data=([input_a, input_b], [target_a, target_b]))
        model.train_on_batch([input_a, input_b], [target_a, target_b])
        model.predict([input_a, input_b], batch_size=5)
        model.evaluate([input_a, input_b], [target_a, target_b],
                       batch_size=2,
                       verbose=0)
        model.test_on_batch([input_a, input_b], [target_a, target_b])

        # Test: mix np and tensors.
        input_b = np.zeros(shape=(10, 3)).astype('float32')
        target_b = np.zeros(shape=(10, 4)).astype('float32')
        model.fit([input_a, input_b], [target_a, target_b],
                  epochs=1,
                  batch_size=5,
                  verbose=0)
        model.fit([input_a, input_b], [target_a, target_b],
                  epochs=1,
                  batch_size=2,
                  verbose=0,
                  validation_data=([input_a, input_b], [target_a, target_b]))
        model.fit([input_a, input_b], [target_a, target_b],
                  epochs=1,
                  batch_size=5,
                  verbose=0,
                  shuffle=False)
        model.train_on_batch([input_a, input_b], [target_a, target_b])
        model.predict([input_a, input_b], batch_size=5)
        model.evaluate([input_a, input_b], [target_a, target_b],
                       batch_size=2,
                       verbose=0)
        model.test_on_batch([input_a, input_b], [target_a, target_b])
model = keras.Sequential()

# Adds a densely-connected layer with 64 nodes to the model:
# input shape defines the number of input features (dimensions)
# activation defines the activation function of each layer
model.add(layers.Dense(64, activation='relu', input_shape=(10, )))

# Add another:
model.add(layers.Dense(64, activation='relu'))

# Add a softmax layer with 10 output nodes:
model.add(layers.Dense(10, activation='softmax'))

model.compile(optimizer=optimizers.RMSprop(0.01),
              loss=losses.CategoricalCrossentropy(),
              metrics=[metrics.CategoricalAccuracy()])

# The number of columns of the input must be equal with the number of
# rows of the input shape of the first layer. The number of columns of
# the "labels" must be equal with the number of the output nodes.
# The number of rows of data and labels is the size of the training set.
data = np.random.random((1000, 10))
labels = np.random.random((1000, 10))
# in place of the corresponding lines of code
# data = pd.read_csv('data.csv')
# labels = pd.read_csv('labels.csv')

print(data)

model.fit(data, labels, epochs=10, batch_size=100)
Esempio n. 15
0
    def test_training_and_eval_methods_on_dataset(self):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        optimizer = 'rmsprop'
        loss = 'mse'
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      run_eagerly=testing_utils.should_run_eagerly())

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        dataset = tf.data.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.repeat()  # Infinite dataset.
        dataset = dataset.batch(10)

        model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
        model.evaluate(dataset, steps=2, verbose=1)
        model.predict(dataset, steps=2)

        # Test with validation data
        model.fit(dataset,
                  epochs=1,
                  steps_per_epoch=2,
                  verbose=0,
                  validation_data=dataset,
                  validation_steps=2)

        # Test with validation split
        with self.assertRaises(ValueError):
            model.fit(dataset,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      validation_split=0.5,
                      validation_steps=2)

        # Test with sample weight.
        sample_weight = np.random.random((10, ))
        with self.assertRaisesRegex(
                ValueError,
                r'`sample_weight` argument is not supported .+dataset'):
            model.fit(dataset,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      sample_weight=sample_weight)

        with self.assertRaisesRegex(
                ValueError, '(you should not specify a target)|'
                '(`y` argument is not supported when using dataset as input.)'
        ):
            model.fit(dataset, dataset, epochs=1, steps_per_epoch=2, verbose=0)

        # With an infinite dataset, `steps_per_epoch`/`steps` argument is required.
        with self.assertRaises(ValueError):
            model.fit(dataset, epochs=1, verbose=0)
        with self.assertRaises(ValueError):
            model.evaluate(dataset, verbose=0)
        with self.assertRaises(ValueError):
            model.predict(dataset, verbose=0)
model.summary()



#read input
data = pd.read_csv(filename,header=None,index_col=0)

weights = model.layers[0].get_weights()



#ftiaxnw to neo neurwniko, sto opoio tha valw ta varh tou dwsmenou
new_model = keras.Sequential()
new_model.add(layers.Dense(64, activation = 'relu',input_shape=(128,))) #prosthetw layer

new_model.compile(optimizer=optimizers.RMSprop(0.01),loss=losses.CategoricalCrossentropy(),metrics=[metrics.CategoricalAccuracy()])

new_model.summary()


new_model.layers[0].set_weights(weights) #vazw ta weights

interm_output = new_model.predict(data,batch_size=32)


outname='new_representations2.csv'
with open(outname, 'w') as filehandle:
    for i in range(len(data.index)):
        y_str=data.index[i]
        for j in range(len(interm_output[i])):
            y_str=y_str+','+str(interm_output[i][j])
shear_range                    = 0.1
brightness_range               = [0.8,1.2]
zoom_range                     = 0.1
horizontal_flip                = False
fill_mode                      = 'nearest'
print_sample_input             = False
hidden_activation_function     = ['relu']
hidden_layers_neurons          = [128]
hidden_layers_L1_coeffs        = [0.00]
hidden_layers_L2_coeffs        = [0.00]
hidden_layers_dropout          = [0.00]
final_activation_function      = 'softmax'
final_layer_neurons            = 4
model_optimizer                = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
loss_function                  = 'categorical_crossentropy'
metrics                        = [metrics.CategoricalAccuracy(name='categorical_accuracy'),
                                  metrics.AUC(multi_label = True, name='multiclass_AUC')]
n_epochs                       = 40
batch_size                     = 40
validation_steps               = 50
vgg_include_top                = False
vgg_hidden_activation_function = ['relu']
vgg_hidden_layers_neurons      = [128]
vgg_hidden_layers_L1_coeffs    = [0.00]
vgg_hidden_layers_L2_coeffs    = [0.00]
vgg_hidden_layers_dropout      = [0.00]
vgg_final_activation_function  = 'softmax'
vgg_final_layer_neurons        = 4
vgg_model_optimizer            = optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
vgg_n_epochs                   = 40
vgg_validation_steps           = 50
Esempio n. 18
0
## Compile the network
###############################################################################
print('Compiling the network.')
#for LEARNING_RATE in ( [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1]):
#for LEARNING_RATE in ( [ 0.00001, 0.001 ]):
print('lr:', LEARNING_RATE)
from keras.optimizers import RMSprop
from keras.optimizers import Adam
from keras import metrics

model.compile(
    loss='categorical_crossentropy',
    optimizer=Adam(lr=LEARNING_RATE),
    metrics=[
        'accuracy',
        metrics.CategoricalAccuracy(),
        #metrics.Recall (),
        #metrics.Precision ()
    ])

###############################################################################
## Fit the network
###############################################################################
print('Fitting the network.')
history = model.fit(X_train,
                    y_train,
                    batch_size=BATCH_SIZE,
                    epochs=NUMBER_OF_EPOCHS,
                    verbose=1,
                    validation_split=1 / 10)
Esempio n. 19
0
model.add(Dense(units_9, activation=activation_9))
model.add(Dropout(dropout_9))

model.add(Dense(units_10, activation=activation_10))
model.add(Dropout(dropout_10))

model.add(Dense(units_11, activation=activation_11))
model.add(Dropout(dropout_11))

model.add(Dense(units_12, activation=activation_12))
model.add(Dropout(dropout_12))

model.add(Dense(nb_classes, activation='softmax'))

METRICS = [
    metrics.CategoricalAccuracy(name='ACCURACY'),
    metrics.Precision(name='PRECISION'),
    metrics.Recall(name='RECALL'),
    metrics.AUC(name='AUC'),
    metrics.TruePositives(name='TP'),
    metrics.TrueNegatives(name='TN'),
    metrics.FalsePositives(name='FP'),
    metrics.FalseNegatives(name='FN')
]

model.compile(loss='categorical_crossentropy',
              optimizer=compile_optimizer,
              metrics=METRICS)

# GENERATORS
train_datagen = ImageDataGenerator(rescale=1. / 255,