Beispiel #1
0
def content_feature_loss(content1,
                         features1,
                         content2,
                         features2,
                         feature_weight=1.0):
    content_mse = mean_squared_error(content1, content2)
    features_mse = mean_squared_error(features1, features2)
    return K.mean(content_mse) + feature_weight * K.mean(features_mse)
Beispiel #2
0
def asymmetric_outlier_mse(y_true, y_pred):
    """Loss function which asymmetrically penalizes over estimations of large values."""
    top_over = 40.0 * K.maximum(y_true - 2.0, 0.0) * K.maximum(
        y_true - y_pred, 0.0) * mean_squared_error(y_true, y_pred)
    top_over += 20.0 * K.maximum(y_true - 1.0, 0.0) * K.maximum(
        y_true - y_pred, 0.0) * mean_squared_error(y_true, y_pred)
    top_under = 5.0 * K.maximum(y_true - 1.0, 0.0) * K.maximum(
        y_pred - y_true, 0.0) * mean_squared_error(y_true, y_pred)
    return top_over + top_under + logcosh(y_true, y_pred)
Beispiel #3
0
 def generalized_loss(y_true, y_pred):
     loss = 0
     loss += mean_squared_error(tf.gather(data_train, indices), y_pred)
     for i in range(batch_size):
         curr_idx = indices[i]
         sort_idx = tf.gather(sort_distance_idx, curr_idx)
         data_true = tf.gather(data_train, sort_idx[0, 0])
         for j in range(10):
             curr_data_train = tf.gather(data_train, sort_idx[0, j])
             s = tf.math.exp(-(tf.norm(data_true - curr_data_train)**2) /
                             200)
             loss += s * mean_squared_error(curr_data_train, y_pred[i, :])
     return loss
 def _get_mse_for_action(self, target_and_action, current_prediction):
     targets, one_hot_action = tf.split(target_and_action, [1, 2], axis=1)
     active_q_value = tf.expand_dims(tf.reduce_sum(current_prediction *
                                                   one_hot_action,
                                                   axis=1),
                                     axis=-1)
     return kls.mean_squared_error(targets, active_q_value)
Beispiel #5
0
 def learn_w(self, state, r):
     with tf.GradientTape() as tape:
         pred = self.branch_1_model(state)
         loss = mean_squared_error(r, pred)
     grads = tape.gradient(loss, self.branch_1_model.trainable_variables)
     self.opt.apply_gradients(
         zip(grads, self.branch_1_model.trainable_variables))
Beispiel #6
0
def point_metrics(y_data, output):
    """Evaluate metrics comparing real and predicted output values."""
    # Reshaping in memory to avoid duplicate use of RAM
    original_real_shape = y_data.shape
    y_data.shape = (y_data.shape[2], y_data.shape[1], y_data.shape[0])
    original_output_shape = output.shape
    output.shape = (output.shape[2], output.shape[1], output.shape[0])
    mae = losses.mean_absolute_error(y_data, output)
    mse = losses.mean_squared_error(y_data, output)
    mape = losses.mean_absolute_percentage_error(y_data, output)
    try:
        keras_session = backend.get_session()
        mae = mae.eval(session=keras_session)
        mse = mse.eval(session=keras_session)
        mape = mape.eval(session=keras_session)
    except NotImplementedError:
        mae = mae.numpy()
        mse = mse.numpy()
        mape = mape.numpy()
    y_data.shape = original_real_shape
    output.shape = original_output_shape
    return [
        np.mean(mae),
        np.sqrt(np.mean(mse)),
        np.mean(mape), mae,
        np.sqrt(mse), mape
    ]
Beispiel #7
0
    def call(self, inputs, training):
        if training is None:
            training = K.learning_phase()

        masked_inputs = inputs
        if training:
            if self.noise_std > 0:
                masked_inputs = GaussianNoise(self.noise_std)(masked_inputs)

            if self.swap_prob > 0:
                masked_inputs = SwapNoiseMasker(
                    probs=[self.swap_prob] * self.input_dim,
                    seed=[self.seed] * 2)(masked_inputs)

            if self.mask_prob > 0:
                masked_inputs = ZeroNoiseMasker(
                    probs=[self.mask_prob] * self.input_dim,
                    seed=[self.seed] * 2)(masked_inputs)

        encoded = self.encoder(masked_inputs)
        decoded = self.decoder(encoded)

        rec_loss = K.mean(mean_squared_error(inputs, decoded))
        self.add_loss(rec_loss)

        return encoded, decoded
 def _validate_model_regression(self, X_test, y_test, learner):
     y_preds = learner.predict(X_test)  # predict classes for y test
     # print('Predictions Overview: ', y_preds)
     mae = mean_absolute_error(y_test, y_preds)
     rmse = np.sqrt(mean_squared_error(y_test, y_preds))
     rsquared=r2_score(y_test, y_preds)
     return mae, rmse,rsquared
Beispiel #9
0
def criterion(y_true, y_pred):  # Regression Loss

    regr_loss = mean_squared_error(y_true, y_pred)
    #     regr_loss = tf.keras.losses.Huber()(y_true, y_pred)
    loss = tf.reduce_mean(regr_loss)

    return loss
Beispiel #10
0
    def pi_loss(y_true, m_out):
        y_pred, advs, vf = m_out[0], m_out[1], m_out[2]

        y_true_action = y_true[0]
        vf_true = tf.cast(y_true[1], tf.float32)

        # First, one-hot encoding of true value y_true
        y_true_action = tf.expand_dims(tf.cast(y_true_action, tf.int32),
                                       axis=1)
        y_true_action = tf.one_hot(y_true_action, depth=action_size)

        # Execute categorical crossentropy
        neglogp = cat_crosentropy(
            y_true_action,  # True actions chosen
            y_pred,  # Logits from model
            # sample_weight=advs
        )
        policy_loss = tf.reduce_mean(advs * neglogp)

        entropy_loss = kls.categorical_crossentropy(y_pred,
                                                    y_pred,
                                                    from_logits=True)

        loss_vf = kls.mean_squared_error(vf, vf_true)

        return policy_loss - coeff_entropy * entropy_loss + coeff_vf * loss_vf
Beispiel #11
0
def loss(y_true, y_pred):
    mask = np.zeros((1, 362))
    mask[0][-1] = 1
    value_loss = mean_squared_error(mask*y_true, mask*y_pred)
    mask = 1-mask
    policy_loss = K.sum(categorical_crossentropy(mask*y_true, mask*y_pred))
    return policy_loss+value_loss
def VAE_test(models, source_number, mixture, epochs=10, **kwargs):
    model = models[0] if source_number == 1 else models[1]
    target = mixture.mag1 if source_number == 1 else mixture.mag2
    learning_rate = kwargs['learning_rate']
    if kwargs['optimizer'] == 'Adam':
        optimizer = optimizers.Adam(learning_rate=learning_rate,
                                    beta_1=0.9,
                                    beta_2=0.999)
    elif kwargs['optimizer'] == 'RMSprop':
        optimizer = optimizers.RMSprop(learning_rate=learning_rate)

    z = tf.Variable(tf.random.normal([len(target), model.z_dim]),
                    trainable=True)
    for epoch in range(epochs):
        with tf.GradientTape() as tape:
            out = model.decoder(z)
            rec_loss = losses.mean_squared_error(target, out)
            loss = tf.reduce_mean(rec_loss)
        grads = tape.gradient(loss, z)
        # print('grads',grads)
        optimizer.apply_gradients([(grads, z)])
        if epoch % 50 == 0:
            print('epoch:', epoch, 'loss:', float(loss))

    return None
Beispiel #13
0
def vae_loss(y_true, y_pred, z_mean, z_log_var):
    """Variational autoencoder loss.

    This loss function calculates the sum of the reconstruction loss and
    the KL-divergence.

    Args:
        y_true (tf.Tensor): The true output.
        y_pred (tf.Tensor): The predicted output.
        z_mean (tf.Tensor): The mean of the latent distribution.
        z_log_var (tf.Tensor): The variance of the latent distribution.

    Returns:
        tf.Tensor: The output tensor of the loss function.
    """
    reconstruction_loss = mean_squared_error(y_true, y_pred)
    reconstruction_loss *= tf.cast(tf.shape(y_true)[-1], tf.float32)

    kl_loss = - 0.5 * K.sum(1 
                            + z_log_var 
                            - K.square(z_mean) 
                            - K.exp(z_log_var), 
                            axis=-1)
        
    return K.mean(reconstruction_loss + kl_loss)
Beispiel #14
0
    def __init__(self, latent_dim, input_shape, weights=None, debug=True):
        # set main class attributes
        self.input_shape = input_shape
        self.latent_dim = latent_dim
        self.debug = debug
        self.weightdir = 'weights/'
        self.callbacks = [tf.keras.callbacks.TensorBoard(log_dir='./logs')] # saves the necassary data to be viewed by the Tensorboard application

        # create input tensors and instantiate the encoder and decoder models
        bar_input = Input(shape = self.input_shape, name='encoder_input')
        latent_input = Input(shape = self.latent_dim, name = 'latent_input')
        self.encoder = self.make_encoder(bar_input)
        self.decoder = self.make_decoder(latent_input)

        # create the output tensors
        encoder_output = self.encoder(bar_input)
        vae_output = self.decoder(encoder_output[2])

        # instantiate the VAE model
        self.VAE = Model(bar_input, vae_output, name='VAE_DNN')

        # calcuate the loss functions and add them to the model
        z_mean, z_log_var, z = encoder_output
        kl_loss = -.5 * tf.math.reduce_sum(1 + z_log_var - tf.math.square(z_mean) - tf.math.exp(z_log_var), axis = -1)
        recon_loss = mean_squared_error(tf.reshape(bar_input, [-1]), tf.reshape(vae_output, [-1]))
        recon_loss *= np.prod(self.input_shape, dtype = float)
        vae_loss = tf.math.reduce_mean(0.1 * kl_loss + recon_loss)
        self.VAE.add_loss(vae_loss)
        self.VAE.add_metric(recon_loss, name = 'recon_loss', aggregation='mean') # add the reconstruction loss as an additional viewable metric for performance analysis

        # compile the model and load the weights if specified
        self.VAE.compile(optimizer='adam')
        if self.debug:
            self.VAE.summary()
        if weights: self.VAE.load_weights(weights)
    def train(self, X_train, y_train, yc, epochs):
        data = X_train, y_train, yc
        train_hist = {}
        train_hist['D_losses'] = []
        train_hist['G_losses'] = []
        train_hist['per_epoch_times'] = []
        train_hist['total_ptime'] = []


        for epoch in range(epochs):
            start = time.time()

            real_price, fake_price, loss = self.train_step(data)

            G_losses = []
            D_losses = []

            Real_price = []
            Predicted_price = []

            D_losses.append(loss['d_loss'].numpy())
            G_losses.append(loss['g_loss'].numpy())

            Predicted_price.append(fake_price)
            Real_price.append(real_price)

            # Save the model every 15 epochs
            if (epoch + 1) % 15 == 0:
                tf.keras.models.save_model(generator, 'gen_GRU_model_%d.h5' % epoch)
                self.checkpoint.save(file_prefix=self.checkpoint_prefix)
                print('epoch', epoch+1, 'd_loss', loss['d_loss'].numpy(), 'g_loss', loss['g_loss'].numpy())

            # For printing loss
            epoch_end_time = time.time()
            per_epoch_ptime = epoch_end_time - start
            train_hist['D_losses'].append(D_losses)
            train_hist['G_losses'].append(G_losses)
            train_hist['per_epoch_times'].append(per_epoch_ptime)
            
        # Reshape the predicted result & real
        Predicted_price = np.array(Predicted_price)
        Predicted_price = Predicted_price.reshape(Predicted_price.shape[1], Predicted_price.shape[2])
        Real_price = np.array(Real_price)
        Real_price = Real_price.reshape(Real_price.shape[1], Real_price.shape[2])

        # Plot the loss
        plt.plot(train_hist['D_losses'], label='D_loss')
        plt.plot(train_hist['G_losses'], label='G_loss')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.legend()
        plt.show()
        plt.savefig('train_loss.png')

        print("REAL", Real_price.shape)
        print(Real_price)
        print("PREDICTED", Predicted_price.shape)
        print(Predicted_price)

        return Predicted_price, Real_price, np.sqrt(mean_squared_error(Real_price, Predicted_price)) / np.mean(Real_price)
Beispiel #16
0
    def call(self, inputs, training):
        if training is None:
            training = K.learning_phase()

        masked_inputs = inputs
        if training:
            if self.noise_std > 0:
                masked_inputs = GaussianNoise(self.noise_std)(masked_inputs)

            if self.swap_prob > 0:
                masked_inputs = SwapNoiseMasker(probs=[self.swap_prob] * self.input_dim,
                                                seed=[self.seed] * 2)(masked_inputs)

            if self.mask_prob > 0:
                masked_inputs = ZeroNoiseMasker(probs=[self.mask_prob] * self.input_dim,
                                                seed=[self.seed] * 2)(masked_inputs)

        x = masked_inputs
        encoded_list = []
        for encoder in self.encoders:
            x = encoder(x)
            encoded_list.append(x)

        encoded = Concatenate()(encoded_list) if len(encoded_list) > 1 else encoded_list[0]
        decoded = self.decoder(encoded)

        rec_loss = K.mean(mean_squared_error(inputs, decoded))
        self.add_loss(rec_loss)

        return encoded, decoded
Beispiel #17
0
 def _critic_loss(self,returns,critic_logits):
     """The custom loss function for the critic.
     Keras always reuqires  a fucntion with two parameters y_true (targets) and y_pred (output,logits). 
     Y_true is what is put in for reference and y_pred is what is defined as model output. 
     In this case, the y_true is returns and the y_pred is the predicted value which
     is given by the critic logits.
     """
     return self.VALUE_LOSS_FACTOR * kls.mean_squared_error(returns,critic_logits)
Beispiel #18
0
def train_step(model, optimizer, images, labels):
    with tf.GradientTape() as tape:
        logits = model(images)
        loss = losses.mean_squared_error(y_true=labels, y_pred=logits)

    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))
    return loss
Beispiel #19
0
    def predict(self, data):
        data = np.array([data])
        predicted_ecg = self.model.predict(data)
        loss = mean_squared_error(predicted_ecg, data).numpy()[0][0]
        verdict = 'Abnormal ☠' if (loss > self.threshold) else 'Normal ❤️'
        to_give = {'loss': loss, 'verdict': verdict}

        return to_give
Beispiel #20
0
def custom_loss(y_true, y_pred, lambda_r=10):
    """
    custom loss function that is a combination of mean squared error and categorical cross entropy loss
    """

    reg_loss = lambda_r * mean_squared_error(y_true[:, :1], y_pred[:, :1])
    clf_loss = categorical_crossentropy(y_true[:, 1:], y_pred[:, 1:])
    return reg_loss + clf_loss
Beispiel #21
0
 def compute_layer_style_cost(self, layer):
     self.layer_model = Model(inputs=VGG.inputs,
                              outputs=VGG.get_layer(layer).output)
     self.a_S = self.layer_model(input_tensor=self.style_img)
     self.a_G = self.layer_model(input_tensor=self.generated_img)
     self.a_S = self.gram_matrix(self.a_S)
     self.a_G = self.gram_matrix(self.a_G)
     return mean_squared_error(self.a_S, self.a_G)
def GAN_separation(generators, discriminators, mixture, epochs, **kwargs):
    learning_rate = kwargs['learning_rate']
    if kwargs['optimizer'] == 'Adam':
        optimizer = optimizers.Adam(learning_rate=learning_rate,
                                    beta_1=0.9,
                                    beta_2=0.999)
    elif kwargs['optimizer'] == 'RMSprop':
        optimizer = optimizers.RMSprop(learning_rate=learning_rate)
    result_folder = kwargs['result_folder']
    log_dir = result_folder + 'tensorboard/'
    summary_writer = tf.summary.create_file_writer(log_dir)
    generator_male, generator_female = generators
    discriminator_male, discriminator_female = discriminators
    generator_male.trainable = False
    generator_female.trainable = False
    discriminator_male.trainable = False
    discriminator_female.trainable = False
    target = mixture.mag_mix
    h1 = tf.Variable(tf.random.normal([len(target), 513]), trainable=True)
    h2 = tf.Variable(tf.random.normal([len(target), 513]), trainable=True)

    for epoch in range(epochs):
        with tf.GradientTape() as tape:
            out1 = generator_male(h1)
            out2 = generator_female(h2)
            score_m = discriminator_male(out1)
            score_f = discriminator_female(out2)
            rec_loss = losses.mean_squared_error(mixture.mag_mix, out1 + out2)
            rec_loss = tf.reduce_mean(rec_loss)
            score_loss = -(score_f + score_m)
            score_loss = tf.reduce_mean(score_loss)
            smooth_loss = np.abs(out1[1:] - out1[:-1]) + np.abs(out2[1:] -
                                                                out2[:-1])
            smooth_loss = tf.reduce_mean(smooth_loss)

            loss = rec_loss + 0.1 * score_loss + 0.1 * smooth_loss

        grads = tape.gradient(loss, [h1, h2])
        optimizer.apply_gradients(zip(grads, [h1, h2]))
        with summary_writer.as_default():
            tf.summary.scalar('rec_loss', float(loss), step=epoch)
            tf.summary.scalar('score_loss', float(score_loss), step=epoch)
            tf.summary.scalar('smooth_loss', float(smooth_loss), step=epoch)
        if epoch % 50 == 0:
            print('epoch:', epoch, 'loss:', float(loss), 'rec_loss:',
                  float(rec_loss), 'score_loss:', float(score_loss),
                  'smooth_loss:', float(smooth_loss))

    out1 = out1.numpy()
    out2 = out2.numpy()
    eps = finfo(float32).eps
    estimated_source_male = out1 / (
        (out1 + out2) + eps) * mixture.mag_mix * np.exp(1j * mixture.phase_mix)
    estimated_source_female = out2 / (
        (out1 + out2) + eps) * mixture.mag_mix * np.exp(1j * mixture.phase_mix)
    s1 = librosa.istft(estimated_source_male.transpose(), hop_length=256)
    s2 = librosa.istft(estimated_source_female.transpose(), hop_length=256)
    return s1, s2
Beispiel #23
0
 def compute_content_cost(self):
     self.content_model = Model(inputs=VGG.inputs,
                                outputs=VGG.get_layer(
                                    self.content_layer).output)
     self.a_C = self.content_model(input_tensor=self.content_img)
     self.a_G = self.content_model(input_tensor=self.generated_img)
     self.a_C = Flatten()(self.a_C)
     self.a_G = Flatten()(self.a_G)
     return mean_squared_error(self.a_C, self.a_G)
Beispiel #24
0
def train_on_batch(model, optimizer, inputs):
    with tf.GradientTape() as tape:
        output = model(inputs)
        loss = mean_squared_error(output, inputs)

    grads = tape.gradient(target=loss, sources=model.trainable_variables)

    optimizer.apply_gradients(zip(grads, model.trainable_variables))

    return loss
Beispiel #25
0
def mae_vgg(y_true, y_pred):
    y_true = K.permute_dimensions(y_true, (0, 3, 1, 2))
    y_pred = K.permute_dimensions(y_pred, (0, 3, 1, 2))

    y_true = K.reshape(y_true, (K.shape(y_true)[0], K.shape(y_true)[1],
                                K.shape(y_true)[2] * K.shape(y_true)[3]))
    y_pred = K.reshape(y_pred, (K.shape(y_pred)[0], K.shape(y_pred)[1],
                                K.shape(y_pred)[2] * K.shape(y_pred)[3]))

    return K.mean(mean_squared_error(y_true, y_pred))
Beispiel #26
0
 def train_step(self, state_batch, mcts_probs, winner_batch):
     with tf.GradientTape() as tape:
         winner_batch = tf.cast(winner_batch, dtype=tf.float32)
         log_act_probs, values = self.model(state_batch)
         critic_loss = tf.reduce_sum(
             losses.mean_squared_error(values, winner_batch))
         actor_loss = -tf.reduce_mean(mcts_probs * log_act_probs)
         loss = critic_loss + actor_loss
     grads = tape.gradient(loss, self.model.trainable_variables)
     self.optimizer.apply_gradients(
         zip(grads, self.model.trainable_variables))
Beispiel #27
0
    def _create_model(self, shape_X, shape_y):

        self.encoder_ = self.get_encoder(input_shape=shape_X,
                                         **self.enc_params)
        self.task_ = self.get_task(input_shape=self.encoder_.output_shape[1:],
                                   output_shape=shape_y,
                                   **self.task_params)

        input_src = Input(shape_X)
        input_tgt = Input(shape_X)
        input_task = Input(shape_X)
        output_src = Input(shape_y)
        input_ones = Input((1, ))

        encoded_src = self.encoder_(input_src)
        encoded_tgt = self.encoder_(input_tgt)
        encoded_task = self.encoder_(input_task)

        tasked = self.task_(encoded_task)

        compil_params = copy.deepcopy(self.compil_params)
        if "loss" in compil_params:
            task_loss = K.mean(self.compil_params["loss"](output_src, tasked))
            compil_params.pop('loss')
        else:
            task_loss = K.mean(losses.mean_squared_error(output_src, tasked))

        ones_dot_encoded_src = K.dot(K.transpose(input_ones), encoded_src)
        corr_src = (1 / (K.sum(input_ones) - 1)) * (
            K.dot(K.transpose(encoded_src), encoded_src) -
            (1 / K.sum(input_ones)) *
            K.dot(K.transpose(ones_dot_encoded_src), ones_dot_encoded_src))
        ones_dot_encoded_tgt = K.dot(K.transpose(input_ones), encoded_tgt)
        corr_tgt = (1 / (K.sum(input_ones) - 1)) * (
            K.dot(K.transpose(encoded_tgt), encoded_tgt) -
            (1 / K.sum(input_ones)) *
            K.dot(K.transpose(ones_dot_encoded_tgt), ones_dot_encoded_tgt))

        corr_loss = (1. / 4.) * K.mean(K.square(corr_src - corr_tgt))

        loss = task_loss + self.lambdap * corr_loss

        self.model_ = Model(
            [input_src, input_tgt, input_task, output_src, input_ones],
            [encoded_src, encoded_tgt, tasked],
            name="DeepCORAL")
        self.model_.add_loss(loss)

        if not "optimizer" in compil_params:
            compil_params["optimizer"] = "adam"

        self.model_.compile(**compil_params)

        return self
Beispiel #28
0
 def ssim_l1_loss(gt, y_pred, max_val=1.0, l1_weight=1.0):
     """
     Computes SSIM loss with L1 normalization
     @param gt: Ground truth image
     @param y_pred: Predicted image
     @param max_val: Maximal SSIM value
     @param l1_weight: Weight of L1 normalization
     @return: SSIM L1 loss
     """
     ssim_loss = 1 - tf.reduce_mean(
         tf.image.ssim(gt, y_pred, max_val=max_val))
     l1 = mean_squared_error(gt, y_pred)
     return ssim_loss + tf.cast(l1 * l1_weight, tf.float32)
Beispiel #29
0
    def _value_loss(self, acts_and_advs, returns):
        actions, advantages = tf.split(acts_and_advs, 2, axis=-1)
        # actions = tf.one_hot(actions, env.action_space.n)
        # toto = actions.numpy()
        actions = tf.cast(actions, tf.int32)
        actions = tf.one_hot(actions, self.model.num_actions)

        # advantages *= actions
        # returns *= actions
        advantages = returns - returns * actions + advantages * actions
        # value loss is typically MSE between value estimates and returns
        return self.params['value'] * kls.mean_squared_error(
            returns, advantages)
Beispiel #30
0
    def __init__(self):
        self.encoder = self.create_encoder(repr_size)
        self.decoder = self.create_decoder(repr_size)

        inp = out = Input(shape=(*imgdims, 1))
        repr = self.encoder(out)
        qtz = Activation('softmax')(repr)
        # qtz  = Lambda(quantize_fn)(repr)		# Puts a 1 at only those positions with the highest value. The rest is 0 so when multiplied by the original will be blank.
        # qtz  = Lambda(lambda x: K.cast_to_floatx(K.argmax(x)), output_shape=(*imgdims, 1))(repr)
        out = self.decoder(qtz)
        out = Lambda(lambda x: x * 255)(out)
        self.model = Model(inp, [out, repr], name='vae')
        self.model.compile(Adam(),
                           loss=lambda true, out: mean_squared_error(
                               true, out[0]))  # out = (pred, repr)
Beispiel #31
0
 def _value_loss(self, returns, value):
     # value loss is typically MSE between value estimates and returns
     return self.params['value']*kls.mean_squared_error(returns, value)