Exemplo n.º 1
0
def image_gradient_loss(y_true, y_pred):
    """
    Loss based on image gradient
    """
    dy_true, dx_true = tf.image.image_gradients(y_true)
    dy_pred, dx_pred = tf.image.image_gradients(y_pred)
    return losses.mse(dy_true, dy_pred) + losses.mae(dx_true, dx_pred)
def cep_loss(y_true,y_pred):
    y_pred=tf.cast(y_pred, tf.complex64)
    y_true=tf.cast(y_true, tf.complex64)
    y_pred_cep=spectral.irfft(y_pred)
    y_true_cep=spectral.irfft(y_true)  
    y_pred=tf.cast(y_pred, tf.float32)
    y_true=tf.cast(y_true, tf.float32)
    y_true_cep_win=w.win(y_true_cep[:,:257])
    y_pred_cep_win=w.win(y_pred_cep[:,:257])   
    return 0.9967*mae(y_true_cep_win,y_pred_cep_win)+0.0033*mse(y_true,y_pred)
Exemplo n.º 3
0
def write(op, y_true, y_pred):
    y_test = y_true.astype(np.float32)
    pred = y_pred.astype(np.float32)
    op('MAE {}\n'.format(K.eval(K.mean(mae(y_test, pred)))))
    op('MSE {}\n'.format(K.eval((K.mean(mse(y_test, pred))))))
    op('RMSE {}\n'.format(K.eval(K.sqrt(K.mean(mse(y_test, pred))))))
    op('NRMSE_a {}\n'.format(K.eval(K.sqrt(K.mean(nrmse_a(y_test, pred))))))
    op('NRMSE_b {}\n'.format(K.eval(K.sqrt(K.mean(nrmse_b(y_test, pred))))))
    op('MAPE {}\n'.format(
        K.eval(K.mean(mean_absolute_percentage_error(y_test, pred)))))
    op('NRMSD {}\n'.format(K.eval(K.mean(nrmsd(y_test, pred)))))
    op('SMAPE {}\n'.format(K.eval(K.mean(smape(y_test, pred)))))
    op('R2 {}\n'.format(K.eval(K.mean(r2(y_test, pred)))))
Exemplo n.º 4
0
 def total_recon_loss(y_true, y_pred):
     tot_recon_loss = 0
     for idx, loss_name in enumerate(loss_func_list):
         if loss_name.upper() == 'L1' or loss_name.upper() == 'MAE':
             tot_recon_loss += weights[idx] * mae(y_true, y_pred)
         elif loss_name.upper() == 'L2' or loss_name.upper() == 'MSE':
             tot_recon_loss += weights[idx] * mse(y_true, y_pred)
         elif loss_name.upper() == 'BINARY_CROSSENTROPY' or loss_name.upper(
         ) == 'BINARY_CROSS_ENTROPY':
             tot_recon_loss += weights[idx] * binary_crossentropy(
                 y_true, y_pred)
         else:
             raise NotImplementedError(
                 str(loss_name) + " has no implementation")
     return tot_recon_loss
Exemplo n.º 5
0
    def _VAElossFunc(inp, outp):
        """Axis-wise KL-Div + loss-of-predictor penalty"""
        import keras.backend as K

        # penalizes non-normal distribution of encodings in latent space
        #kl_loss = -0.5 * K.clip(K.sum(1 - K.square(enc_mean) + enc_logstdev - K.square(K.exp(enc_logstdev+K.epsilon())), axis=-1), -200000, -K.epsilon())
        kl_loss = axiswise_kld(enc_mean, enc_logstdev)

        # penalizes loss of predictive information after compression, *NOT* an incorrect prediction (penalized by Decoder loss)
        import keras.losses as kL
        #print("DIAGNOSES DEBUG:")
        #print(K.eval(base_diagnosis))
        #print(K.eval(diagnosis))
        #K.print_tensor(base_diagnosis)
        #K.print_tensor(diagnosis)
        reconstruction_loss = (
            K.categorical_crossentropy(base_diagnosis, diagnosis) +
            0.5 * kL.mae(inp, outp))

        total_loss = K.mean(reconstruction_loss + kl_loss)
        return total_loss
Exemplo n.º 6
0
 def __call__(self, y_true, y_pred):
     return (self.mse_fraction * losses.mse(y_true, y_pred) +
             (1 - self.mse_fraction) * losses.mae(y_true, y_pred))
Exemplo n.º 7
0
 def __call__(self, y_true, y_pred, sample_weight=None):
     return (self.mse_fraction * losses.mse(y_true, y_pred) +
             (1 - self.mse_fraction) * losses.mae(y_true, y_pred))
Exemplo n.º 8
0
        def c_loss(ytrue, ypred):
            loss = mae(ytrue, ypred)
            print(type(loss))
            loss += self.loss_weight * K.square((K.mean(ypred) - self.center))

            return loss
Exemplo n.º 9
0
    def build(self, update: bool = False):
        """
        Function that constructs the resVAE architecture, including both the encoder and decoder parts.

        :return: returns the encoder, decoder, and complete resVAE keras models
        """
        if not update:
            assert not self.built, print(
                'Model is already built and update is set to False')
        input_shape, latent_dim = self.config['INPUT_SHAPE']
        encoder_shape = self.config['ENCODER_SHAPE']
        decoder_shape = self.config['DECODER_SHAPE']
        activ = self.config['ACTIVATION']
        last_activ = self.config['LAST_ACTIVATION']
        dropout = self.config['DROPOUT']
        latent_scale = self.config['LATENT_SCALE']
        latent_offset = self.config['LATENT_OFFSET']
        decoder_bias = self.config['DECODER_BIAS']
        decoder_regularizer = self.config['DECODER_REGULARIZER']
        decoder_regularizer_initial = self.config[
            'DECODER_REGULARIZER_INITIAL']
        base_loss = self.config['BASE_LOSS']
        decoder_bn = self.config['DECODER_BN']
        relu_thresh = self.config['DECODER_RELU_THRESH']
        assert activ in ['relu', 'elu'], print('invalid activation function')
        assert last_activ in ['sigmoid', 'softmax', 'relu',
                              None], print('invalid final activation function')
        assert decoder_bias in ['all', 'none', 'last']
        assert decoder_regularizer in [
            'var_l1', 'var_l2', 'var_l1_l2', 'l1', 'l2', 'l1_l2', 'dot',
            'dot_weights', 'none'
        ]
        assert base_loss in ['mse', 'mae']

        if decoder_regularizer == 'dot_weights':
            self.dot_weights = np.zeros(shape=(latent_scale * latent_dim,
                                               latent_scale * latent_dim))
            for s in range(latent_dim):
                self.dot_weights[s * latent_scale:s * latent_scale +
                                 latent_scale,
                                 s * latent_scale:s * latent_scale +
                                 latent_scale] = 1

        # L1 regularizer with the scaling factor updateable through the l_rate variable (callback)
        def variable_l1(weight_matrix):
            return self.l_rate * K.sum(K.abs(weight_matrix))

        # L2 regularizer with the scaling factor updateable through the l_rate variable (callback)
        def variable_l2(weight_matrix):
            return self.l_rate * K.sum(K.square(weight_matrix))

        # Mixed L1 and L2 regularizer, updateable scaling. TODO: Consider implementing different scaling factors for L1 and L2 part
        def variable_l1_l2(weight_matrix):
            return self.l_rate * (K.sum(K.abs(weight_matrix)) +
                                  K.sum(K.square(weight_matrix))) * 0.5

        # Dot product-based regularizer
        def dotprod_weights(weights_matrix):
            penalty_dot = self.l_rate * K.mean(
                K.square(
                    K.dot(weights_matrix, K.transpose(weights_matrix)) *
                    self.dot_weights))
            penalty_l1 = 0.000 * self.l_rate * K.sum(K.abs(weights_matrix))
            return penalty_dot + penalty_l1

        def dotprod(weights_matrix):
            penalty_dot = self.l_rate * K.mean(
                K.square(K.dot(weights_matrix, K.transpose(weights_matrix))))
            penalty_l1 = 0.000 * self.l_rate * K.sum(K.abs(weights_matrix))
            return penalty_dot + penalty_l1

        def dotprod_inverse(weights_matrix):
            penalty_dot = 0.1 * K.mean(
                K.square(
                    K.dot(K.transpose(weights_matrix), weights_matrix) *
                    self.dot_weights))
            penalty_l1 = 0.000 * self.l_rate * K.sum(K.abs(weights_matrix))
            return penalty_dot + penalty_l1

        def relu_advanced(x):
            return K.relu(x, threshold=relu_thresh)

        if activ == 'relu':
            activ = relu_advanced

        # assigns the regularizer to the scaling factor. TODO: Look for more elegant method
        if decoder_regularizer == 'var_l1':
            reg = variable_l1
            reg1 = variable_l1
        elif decoder_regularizer == 'var_l2':
            reg = variable_l2
            reg1 = variable_l2
        elif decoder_regularizer == 'var_l1_l2':
            reg = variable_l1_l2
            reg1 = variable_l1_l2
        elif decoder_regularizer == 'l1':
            reg = regularizers.l1(decoder_regularizer_initial)
            reg1 = regularizers.l1(decoder_regularizer_initial)
        elif decoder_regularizer == 'l2':
            reg = regularizers.l2(decoder_regularizer_initial)
            reg1 = regularizers.l2(decoder_regularizer_initial)
        elif decoder_regularizer == 'l1_l2':
            reg = regularizers.l1_l2(l1=decoder_regularizer_initial,
                                     l2=decoder_regularizer_initial)
            reg1 = regularizers.l1_l2(l1=decoder_regularizer_initial,
                                      l2=decoder_regularizer_initial)
        elif decoder_regularizer == 'dot':
            reg = dotprod
            reg1 = dotprod
        elif decoder_regularizer == 'dot_weights':
            reg1 = dotprod_weights
            reg = dotprod
        else:
            reg = None
            reg1 = None
        resvae_inp = layers.Input(shape=(input_shape, ), name='Input')
        resvae_inp_cat = layers.Input(shape=(latent_dim, ),
                                      name='Category_input')
        x = layers.Dense(encoder_shape[0], activation=activ,
                         name='Dense1')(resvae_inp)
        x = layers.Dropout(dropout, name='Dropout1')(x)
        # add layers according to encoder shape input. TODO: Consider allowing different parameters for each layer besides size.
        if len(encoder_shape) > 1:
            for i in range(len(encoder_shape) - 1):
                x = layers.Dense(encoder_shape[i + 1],
                                 activation=activ,
                                 name='Dense' + str(i + 2))(x)
                x = layers.Dropout(dropout, name='Dropout' + str(i + 2))(x)
        resvae_z_mean = layers.Dense(latent_dim * latent_scale,
                                     name='z_mean',
                                     activity_regularizer=None)(x)
        resvae_z_log_var = layers.Dense(latent_dim * latent_scale,
                                        name='z_log_var')(x)
        resvae_repeat_cat = layers.RepeatVector(latent_scale)(resvae_inp_cat)
        resvae_repeat_flattened = layers.Flatten(
            data_format='channels_first', name='Flatten')(resvae_repeat_cat)
        resvae_z = layers.Lambda(_sampling_function,
                                 output_shape=(latent_dim * latent_scale, ),
                                 name='z')([
                                     resvae_z_mean, resvae_z_log_var,
                                     resvae_repeat_flattened
                                 ])
        resvae_encoder = Model([resvae_inp, resvae_inp_cat],
                               [resvae_z_mean, resvae_z_log_var, resvae_z],
                               name='encoder')
        resvae_latent_inputs = layers.Input(shape=(latent_dim *
                                                   latent_scale, ),
                                            name='z_sampling')
        d = layers.Dense(decoder_shape[0],
                         activation=activ,
                         name='Dense_D1',
                         activity_regularizer=reg1)(resvae_latent_inputs)
        if decoder_bn:
            d = layers.BatchNormalization()(d)
        # adds layers to the decoder. See encoder layers
        if len(decoder_shape) > 1:
            for i in range(len(decoder_shape) - 1):
                if decoder_bias == 'all':
                    d = layers.Dense(decoder_shape[i + 1],
                                     activation=activ,
                                     name='Dense_D' + str(i + 2),
                                     use_bias=True,
                                     activity_regularizer=reg)(d)
                else:
                    d = layers.Dense(decoder_shape[i + 1],
                                     activation=activ,
                                     name='Dense_D' + str(i + 2),
                                     use_bias=False,
                                     kernel_regularizer=reg)(d)
                if decoder_bn:
                    d = layers.BatchNormalization()(d)
        if decoder_bias == 'none':
            resvae_outputs = layers.Dense(input_shape,
                                          activation=last_activ,
                                          use_bias=False)(d)
        else:
            resvae_outputs = layers.Dense(input_shape,
                                          activation=last_activ)(d)
        resvae_decoder = Model(resvae_latent_inputs,
                               resvae_outputs,
                               name='decoder')
        outputs = resvae_decoder(
            resvae_encoder([resvae_inp, resvae_inp_cat])[2])
        resvae = Model([resvae_inp, resvae_inp_cat], outputs, name='resvae')
        # Add a loss that is a mixture of the mean-squared error and the KL-divergence from a Gaussian of the latent space
        if base_loss == 'mse':
            reconstruction_loss = losses.mse(resvae_inp, outputs)
        else:
            reconstruction_loss = losses.mae(resvae_inp, outputs)
        reconstruction_loss *= input_shape
        # Calculate Kullback-Leibler divergence for the latent space. This is modified by adding a variable shifting the mean from zero.
        kl_loss = (1 + resvae_z_log_var - K.square(latent_offset - resvae_z_mean) - K.exp(resvae_z_log_var)) * \
                  resvae_repeat_flattened
        kl_loss = K.sum(kl_loss, axis=-1)
        kl_loss *= -0.5
        vae_loss = K.mean(reconstruction_loss + kl_loss)
        self.vae_loss = vae_loss
        resvae.add_loss(vae_loss)
        # set built to true to later avoid inadvertently overwriting a built model. TODO: implement this check
        self.built = True
        return resvae_encoder, resvae_decoder, resvae
Exemplo n.º 10
0
def combined_loss(y_true, y_pred):
    '''
    Uses a combination of mean_squared_error and an L1 penalty on the output of AE
    '''
    return mse(y_true, y_pred) + 0.01 * mae(0, y_pred)
Exemplo n.º 11
0
 def __call__(self, y_true, y_pred):
     return (self.mse_fraction * losses.mse(y_true, y_pred) +
             (1 - self.mse_fraction) * losses.mae(y_true, y_pred))
Exemplo n.º 12
0
 def mape(y_true, y_pred):
     return losses.mae(y_true, y_pred)
Exemplo n.º 13
0
vae_classifer = Model([mask, class_input],
                      [c_output, vae_outputs_mask, ps_vae_outputs_mask],
                      name='vae_classifer')
# vae_classifer.summary()
# get_flops_params()

#GaussVAE_Loss
# reconstruction_loss = binary_crossentropy(K.flatten(input_img), K.flatten(vae_output))
# reconstruction_loss *= original_dim
# kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
# kl_loss = -0.5*( K.sum(kl_loss, axis=-1))
# vae_loss = K.mean(reconstruction_loss + kl_loss)
# vae.add_loss(vae_loss)

#G0VAE_Loss
reconstruction_loss = mae(K.flatten(input_img), K.flatten(vae_output))
reconstruction_loss *= img_size
G0_loss = g0_loss(z_mean, z_log_var)
vae_loss = K.mean(reconstruction_loss + 0.2 * G0_loss)
vae.add_loss(vae_loss)

vae.compile(optimizer=Adam(lr=0.01, beta_1=0.5))

pp = []
for i in range(50):
    x = vae.fit(x_train,
                x_train,
                epochs=vae_epoch,
                batch_size=batch_size,
                shuffle=True)
    pp.append(x.history['loss'][0])
Exemplo n.º 14
0
def mae_with_false_negatives(Y_true, Y_pred):
    return mae(Y_true, Y_pred) + false_negatives(Y_true, Y_pred)
Exemplo n.º 15
0
def categorical_crossentropy_with_mae(y_true, y_pred):
    l1 = categorical_crossentropy(y_true, y_pred)
    l2 = mae(y_true, y_pred)
    return l1 + l2