def get_main_loss(main_loss_str):
    """
    Get the main loss function using a string.
    The search is structured in 3 steps:
        1. Search for the loss in the custom losses
        2. Search for the loss in the standard keras losses
        3. Fall back to categorical crossentropy if the loss does not exist

    Arguments
    ----
        main_loss_str:  String
            Name of the loss function you are looking
            for.

    """
    if type(main_loss_str) is list:
        main_loss_str = main_loss_str[0]

    custom_loss_dict = get_custom_loss_dict()
    loss = custom_loss_dict.get(main_loss_str, 'not_custom')

    if loss == 'not_custom':
        try:
            loss = losses.get(main_loss_str)
        except ValueError:
            print("We could not find your custom loss: " + str(main_loss_str))
            print("Fall back to categorical crossentropy")
            loss = losses.get('categorical_crossentropy')

    return loss
예제 #2
0
def test_regression():
    (x_train, y_train), (x_test, y_test) = boston_housing.load_data()

    supervision_metric = 'mae'
    ivis_boston = Ivis(k=15,
                       batch_size=16,
                       epochs=2,
                       supervision_metric=supervision_metric)
    ivis_boston.fit(x_train, y_train)

    embeddings = ivis_boston.transform(x_train)
    y_pred = ivis_boston.score_samples(x_train)

    loss_name = ivis_boston.model_.loss['supervised'].__name__
    assert losses.get(loss_name).__name__ == losses.get(
        supervision_metric).__name__
    assert ivis_boston.model_.layers[-1].activation.__name__ == 'linear'
    assert ivis_boston.model_.layers[-1].output_shape[-1] == 1
예제 #3
0
    def compile_model(self,
                      optimizer,
                      optimizer_kwargs,
                      loss,
                      metrics,
                      target_tensors=None,
                      **kwargs):
        # Initialize optimizer
        optimizer = optimizers.__dict__[optimizer]
        optimizer = optimizer(**optimizer_kwargs)

        # Make sure sparse metrics and loss are specified sparse
        metrics = ensure_list_or_tuple(metrics)
        loss = ensure_list_or_tuple(loss)
        for i, m in enumerate(metrics + loss):
            if "sparse" not in m:
                raise_non_sparse_metric_or_loss_error()

        # Initialize loss(es)
        loss_list = []
        for l in loss:
            if l in losses.__dict__:
                loss_list.append(losses.get(l))
            else:
                import inspect
                l = loss_functions.__dict__[l]
                if inspect.isclass(l):
                    loss_list.append(l(logger=self.logger, **kwargs))
                else:
                    loss_list.append(l)
        loss = loss_list

        # Find metrics both from standard keras.metrics module and own custom
        init_metrics = []
        for m in metrics:
            if m in TF_metrics.__dict__:
                init_metrics.append(TF_metrics.get(m))
            else:
                import inspect
                metric = custom_metrics.__dict__[m]
                if inspect.isclass(metric):
                    metric = metric(logger=self.logger, **kwargs)
                init_metrics.append(metric)

        # Compile the model
        self.model.compile(optimizer=optimizer,
                           loss=loss,
                           metrics=init_metrics,
                           target_tensors=target_tensors)
        self.logger("Optimizer:   %s" % optimizer)
        self.logger("Loss funcs:  %s" % loss)
        self.logger("Metrics:     %s" % init_metrics)
        if target_tensors is not None:
            self.target_tensor = True
        return self
예제 #4
0
    def compile_model(self,
                      optimizer,
                      optimizer_kwargs,
                      loss,
                      metrics,
                      sparse=False,
                      mem_logging=False,
                      **kwargs):
        # Initialize optimizer
        optimizer = optimizers.__dict__[optimizer]
        optimizer = optimizer(**optimizer_kwargs)

        # Initialize loss
        if loss in losses.__dict__:
            loss = losses.get(loss)
        else:
            import inspect
            loss = loss_functions.__dict__[loss]
            if inspect.isclass(loss):
                loss = loss(logger=self.logger, **kwargs)

        if sparse:
            # Make sure sparse metrics are specified
            for i, m in enumerate(metrics):
                if "sparse" not in m:
                    new = "sparse_" + m
                    self.logger("Note: changing %s --> "
                                "%s (sparse=True passed)" % (m, new))
                    metrics[i] = new

        # Find metrics both from standard keras.metrics module and own custom
        init_metrics = []
        for m in metrics:
            if m in TF_metrics.__dict__:
                init_metrics.append(TF_metrics.get(m))
            else:
                import inspect
                metric = custom_metrics.__dict__[m]
                if inspect.isclass(metric):
                    metric = metric(logger=self.logger, **kwargs)
                init_metrics.append(metric)

        # Compile the model
        self.model.compile(optimizer=optimizer,
                           loss=loss,
                           metrics=init_metrics)

        self.logger("Optimizer:   %s" % optimizer)
        self.logger("Loss:        %s" % loss)
        self.logger("Targets:     %s" % ("Integer" if sparse else "One-Hot"))
        self.logger("Metrics:     %s" % init_metrics)

        return self
예제 #5
0
def get_model(input_shape=(256, 256, 3)):
    inputs = layers.Input(shape=input_shape)  # 256
    decoder0 = u_net_block(inputs)
    outputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(decoder0)

    model = models.Model(inputs=[inputs], outputs=[outputs])

    model.compile(
        optimizer=optimizers.get(OPTIMIZER),
        loss=losses.get(LOSS),
        metrics=[metrics.get(metric) for metric in METRICS])

    return model
예제 #6
0
def test_svm_score_samples():
    iris = datasets.load_iris()
    x = iris.data
    y = iris.target

    supervision_metric = 'categorical_hinge'
    ivis_iris = Ivis(k=15,
                     batch_size=16,
                     epochs=2,
                     supervision_metric=supervision_metric)

    # Correctly formatted one-hot labels train successfully
    y = to_categorical(y)
    embeddings = ivis_iris.fit_transform(x, y)

    y_pred = ivis_iris.score_samples(x)

    loss_name = ivis_iris.model_.loss['supervised'].__name__
    assert losses.get(loss_name).__name__ == losses.get(
        supervision_metric).__name__
    assert ivis_iris.model_.layers[-1].activation.__name__ == 'linear'
    assert ivis_iris.model_.layers[-1].kernel_regularizer is not None
    assert ivis_iris.model_.layers[-1].output_shape[-1] == y.shape[-1]
예제 #7
0
def get(identifier: Union[Callable, str]) -> Callable:
    """Returns loss function
    Arguments:
        identifier: Function or string
    Returns:
        Function corresponding to the input string or input function.
    """
    if identifier is None:
        return None
    elif callable(identifier):
        return identifier
    elif identifier in LOSSES.keys():
        return LOSSES.get(identifier)
    else:
        return losses.get(identifier)
예제 #8
0
def test_score_samples():
    iris = datasets.load_iris()
    x = iris.data
    y = iris.target

    supervision_metric = 'sparse_categorical_crossentropy'
    ivis_iris = Ivis(k=15,
                     batch_size=16,
                     epochs=2,
                     supervision_metric=supervision_metric)

    embeddings = ivis_iris.fit_transform(x, y)
    y_pred = ivis_iris.score_samples(x)

    # Softmax probabilities add to one, correct shape
    assert np.sum(y_pred, axis=-1) == pytest.approx(1, 0.01)
    assert y_pred.shape[0] == x.shape[0]
    assert y_pred.shape[1] == len(np.unique(y))

    # Check that loss function and activation are correct
    loss_name = ivis_iris.model_.loss['supervised'].__name__
    assert losses.get(loss_name).__name__ == losses.get(
        supervision_metric).__name__
    assert ivis_iris.model_.layers[-1].activation.__name__ == 'softmax'
예제 #9
0
def get_siamese_model(input_shape=(256, 256, 3)):
    inputs = layers.Input(shape=input_shape)  # 256
    block0 = u_net_block(inputs)
    block1 = u_net_block(inputs)
    decoder_siamese = layers.concatenate([block0, block1], axis=-1)
    outputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(decoder_siamese)

    model = models.Model(inputs=[inputs], outputs=[outputs])

    model.compile(
        optimizer=optimizers.get(OPTIMIZER),
        loss=losses.get(LOSS),
        metrics=[metrics.get(metric) for metric in METRICS])

    return model
예제 #10
0
파일: cae.py 프로젝트: wrijupan/ngdlm
        def contractive_loss(y_pred, y_true):

            # Get the base_loss.
            if isinstance(self.loss, str):
                base_loss = losses.get(self.loss)(y_pred, y_true)
            else:
                base_loss = self.loss(y_pred, y_true)

            # Get the contractive loss.
            encoder_output = self.encoder.layers[-1]
            weigths = K.variable(value=encoder_output.get_weights()[0])
            weigths = K.transpose(weigths)  # N_hidden x N
            h = encoder_output.output
            dh = h * (1 - h)
            contractive = lam * K.sum(dh**2 * K.sum(weigths**2, axis=1), axis=1)

            return base_loss + contractive
예제 #11
0
        def vae_loss(loss_inputs, loss_outputs):

            # Flatten all to accept different dimensions.
            loss_inputs = K.flatten(loss_inputs)
            loss_outputs = K.flatten(loss_outputs)

            # Reconstruction loss.
            if isinstance(self.loss, str):
                r_loss = losses.get(self.loss)(loss_inputs, loss_outputs)
            else:
                r_loss = self.loss(loss_inputs, loss_outputs)

            r_loss *= inputs_dim

            # kl loss.
            kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
            kl_loss = K.sum(kl_loss, axis=-1)
            kl_loss *= -0.5

            # VAE loss.
            vae_loss = K.mean(r_loss + kl_loss)
            vae_loss /= inputs_dim
            return vae_loss
예제 #12
0
    def __init__(self, loss, fast=False, **kwargs):
        self.supports_masking = True
        self.loss = losses.get(loss)
        self.fast = fast

        super(GradientNormLayer, self).__init__(**kwargs)
예제 #13
0
    def __init__(self, loss, **kwargs):
        self.supports_masking = True
        self.loss = losses.get(loss)

        super(LossLayer, self).__init__(**kwargs)
예제 #14
0
def get_loss_class_function_or_string(
        loss: str) -> Union[losses_mod.Loss, Callable]:
    got = losses_mod.get(loss)
    if type(got) == FunctionType:
        return got
    return type(got)  # a class, e.g. if loss="BinaryCrossentropy"