def conf_loss(y_true, y_pred):
    y_true = K.cast(y_true, dtype=tf.float32)
    y_pred = K.cast(y_pred, dtype=tf.float32)
    bboxes_true = y_true[..., :4]
    bboxes_pred = y_pred[..., :4]
    conf_true = y_true[..., 4]
    conf_pred = y_pred[..., 4]
    #print(bboxes_true)
    #print(bboxes_true * K.expand_dims(conf_true))

    mseFunc = losses.MeanSquaredError(reduction='sum_over_batch_size')
    mse = mseFunc(bboxes_true * K.expand_dims(conf_true),
                  bboxes_pred * K.expand_dims(conf_true))

    bceFunc = losses.BinaryCrossentropy(reduction='sum_over_batch_size')
    bce = bceFunc(conf_true, conf_pred)
    #print("MSE: ", mse)
    #print("BCE: ", bce)
    return 10 * mse + bce
Exemple #2
0
    def Train(self, pre_weights, activation, learning_rate, momentum,
              weight_decay, batch_size, epochs, nclasses, early_stop,
              save_model):

        full_model = self.model

        if weight_decay is None:
            opt = tf.keras.optimizers.SGD(learning_rate=learning_rate,
                                          momentum=momentum,
                                          nesterov=True)
            # opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)
            # print('Adam')
        else:
            opt = tfa.optimizers.SGDW(weight_decay=weight_decay,
                                      learning_rate=learning_rate,
                                      momentum=momentum,
                                      nesterov=True)

        if nclasses == 2:
            loss = tfloss.BinaryCrossentropy(from_logits=False)
        else:
            loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False)

        callbacks = self.SetCallbacks(early_stop, save_model)

        full_model.compile(optimizer=opt, loss=loss, metrics=['accuracy'])

        if nclasses == 2:
            class_weights = {
                0:
                len(self.y_train[self.y_train[:, 1] == 1]) / len(self.y_train),
                1:
                len(self.y_train[self.y_train[:, 0] == 1]) / len(self.y_train)
            }
        elif nclasses == 4:

            N1 = len(self.y_train[self.y_train[:, 0] == 1])
            N2 = len(self.y_train[self.y_train[:, 1] == 1])
            N3 = len(self.y_train[self.y_train[:, 2] == 1])
            N4 = len(self.y_train[self.y_train[:, 3] == 1])
            NT = 1 / (1 / N1 + 1 / N2 + 1 / N3 + 1 / N4)

            class_weights = {
                0: 1 / N1 * NT,
                1: 1 / N2 * NT,
                2: 1 / N3 * NT,
                3: 1 / N4 * NT
            }

        history = full_model.fit(x=self.x_train,
                                 y=self.y_train,
                                 batch_size=batch_size,
                                 epochs=epochs,
                                 verbose=1,
                                 callbacks=callbacks,
                                 validation_data=(self.x_valid, self.y_valid),
                                 shuffle=True,
                                 class_weight=class_weights,
                                 sample_weight=None,
                                 initial_epoch=0,
                                 steps_per_epoch=None,
                                 validation_steps=None,
                                 validation_batch_size=None,
                                 validation_freq=1,
                                 max_queue_size=10,
                                 workers=1,
                                 use_multiprocessing=False)

        return full_model, history
Exemple #3
0
class InitTests(keras_parameterized.TestCase):
    """Tests for keras model initialization."""
    @parameterized.named_parameters([
        {
            'testcase_name': 'normal',
            'n_outputs': 1,
        },
        {
            'testcase_name': 'many outputs',
            'n_outputs': 100,
        },
    ])
    def test_init_params(self, n_outputs):
        """Test initialization of BoltOnModel.

    Args:
        n_outputs: number of output neurons
    """
        # test valid domains for each variable
        clf = models.BoltOnModel(n_outputs)
        self.assertIsInstance(clf, models.BoltOnModel)

    @parameterized.named_parameters([
        {
            'testcase_name': 'invalid n_outputs',
            'n_outputs': -1,
        },
    ])
    def test_bad_init_params(self, n_outputs):
        """test bad initializations of BoltOnModel that should raise errors.

    Args:
        n_outputs: number of output neurons
    """
        # test invalid domains for each variable, especially noise
        with self.assertRaises(ValueError):
            models.BoltOnModel(n_outputs)

    @parameterized.named_parameters([
        {
            'testcase_name': 'string compile',
            'n_outputs': 1,
            'loss': TestLoss(1, 1, 1),
            'optimizer': 'adam',
        },
        {
            'testcase_name': 'test compile',
            'n_outputs': 100,
            'loss': TestLoss(1, 1, 1),
            'optimizer': TestOptimizer(),
        },
    ])
    def test_compile(self, n_outputs, loss, optimizer):
        """Test compilation of BoltOnModel.

    Args:
      n_outputs: number of output neurons
      loss: instantiated TestLoss instance
      optimizer: instantiated TestOptimizer instance
    """
        # test compilation of valid tf.optimizer and tf.loss
        with self.cached_session():
            clf = models.BoltOnModel(n_outputs)
            clf.compile(optimizer, loss)
            self.assertEqual(clf.loss, loss)

    @parameterized.named_parameters([{
        'testcase_name': 'Not strong loss',
        'n_outputs': 1,
        'loss': losses.BinaryCrossentropy(),
        'optimizer': 'adam',
    }, {
        'testcase_name': 'Not valid optimizer',
        'n_outputs': 1,
        'loss': TestLoss(1, 1, 1),
        'optimizer': 'ada',
    }])
    def test_bad_compile(self, n_outputs, loss, optimizer):
        """test bad compilations of BoltOnModel that should raise errors.

    Args:
      n_outputs: number of output neurons
      loss: instantiated TestLoss instance
      optimizer: instantiated TestOptimizer instance
    """
        # test compilaton of invalid tf.optimizer and non instantiated loss.
        with self.cached_session():
            with self.assertRaises((ValueError, AttributeError)):
                clf = models.BoltOnModel(n_outputs)
                clf.compile(optimizer, loss)
Exemple #4
0
def pix2pix_discriminator():

    initializer = random_normal_initializer(0., 0.02)

    inputs = layers.Input(shape=[99], name='cp')

    x = downsample(64, 99, False)(inputs)
    x = downsample(128, 64)(x)
    x = downsample(256, 128)(x)
    x = downsample(512, 256)(x)
    last = layers.Dense(1, kernel_initializer=initializer)(x)

    return Model(inputs=inputs, outputs=last)


loss_object = losses.BinaryCrossentropy(from_logits=True)


def discriminator_loss(real_output, fake_output):
    real_loss = loss_object(tf.ones_like(real_output), real_output)
    fake_loss = loss_object(tf.zeros_like(fake_output), fake_output)

    return real_loss + fake_loss


def generator_loss(fake_output):
    gan_loss = loss_object(tf.ones_like(fake_output), fake_output)

    return gan_loss