Exemple #1
0
    def test_step(self, data):
        if isinstance(data, tuple):
            mask = data[1]
            data = data[0]
        # features = self.srmConv2D(data)
        features = data
        z_mean, z_log_var, z = self.encoder(features)
        reconstruction = self.decoder(z)

        L2 = squared_difference(features, reconstruction)
        error = tf.reduce_mean(L2, axis=-1)

        mean_0 = dicriminative_error(error, mask)

        reconstruction_loss = mean_0
        reconstruction_loss = tf.reduce_mean(reconstruction_loss)

        kl_loss = -0.5 * tf.reduce_mean(1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))

        total_loss = reconstruction_loss + kl_loss

        return {
            "loss": total_loss,
            "reconstruction_loss": reconstruction_loss,
            "kl_loss": kl_loss,
        }
Exemple #2
0
    def train_step(self, data):
        if isinstance(data, tuple):
            mask = data[1]
            data = data[0]
        with tf.GradientTape() as tape:
            # features = self.srmConv2D(data)
            features = data
            z_mean, z_log_var, z = self.encoder(features)
            reconstruction = self.decoder(z)

            L2 = squared_difference(features, reconstruction)
            error = tf.reduce_mean(L2, axis=-1)

            mean_0 = dicriminative_error(error, mask)

            reconstruction_loss = mean_0
            reconstruction_loss = tf.reduce_mean(reconstruction_loss)

            kl_loss = -0.5*tf.reduce_mean(1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))

            total_loss = reconstruction_loss + kl_loss
        grads = tape.gradient(total_loss, self.trainable_weights)
        self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
        return {
            "loss": total_loss,
            "reconstruction_loss": reconstruction_loss,
            "kl_loss": kl_loss,
        }
Exemple #3
0
    def test_step(self, data):
        if isinstance(data, tuple):
            data = data[0]
        features = self.srmConv2D(data)
        z_mean, z_log_var, z = self.encoder(features)
        reconstruction = self.decoder(z)

        L2 = squared_difference(features, reconstruction)
        error = tf.reduce_mean(L2, axis=-1)

        threshold = otsu(error)

        sigma = reduce_variance(error, axis=[1, 2])
        mean_0, sigma_b = dicriminative_error(error, threshold)

        reconstruction_loss = mean_0 + 5 * (1 - sigma_b / sigma)
        reconstruction_loss = tf.reduce_mean(reconstruction_loss)

        kl_loss = -0.5 * tf.reduce_mean(1 + z_log_var - tf.square(z_mean) -
                                        tf.exp(z_log_var))

        total_loss = reconstruction_loss + kl_loss

        return {
            "loss": total_loss,
            "reconstruction_loss": reconstruction_loss,
            "kl_loss": kl_loss,
        }
Exemple #4
0
    def train_step(self, data):
        with tf.GradientTape() as tape:
            features = self.srmConv2D(data)
            z_mean, z_log_var, z = self.encoder(features)
            reconstruction = self.decoder(z)

            L2 = squared_difference(features, reconstruction)
            error = tf.reduce_mean(L2, axis=-1)

            with tape.stop_recording():
                threshold = otsu(error)

            sigma = reduce_variance(error, axis=[1, 2])
            mean_0, sigma_b = dicriminative_error(error, threshold)

            reconstruction_loss = mean_0 + 5 * (1 - sigma_b / sigma)
            reconstruction_loss = tf.reduce_mean(reconstruction_loss)

            kl_loss = -0.5 * tf.reduce_mean(1 + z_log_var - tf.square(z_mean) -
                                            tf.exp(z_log_var))

            total_loss = reconstruction_loss + kl_loss
        grads = tape.gradient(total_loss, self.trainable_weights)
        self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
        return {
            "loss": total_loss,
            "reconstruction_loss": reconstruction_loss,
            "kl_loss": kl_loss,
        }
def _get_total_loss_tensor(activations):
  losses = []
  for activation in activations:
    losses.append(
        math_ops.reduce_mean(
            math_ops.reduce_sum(
                gen_math_ops.squared_difference(activation, 0), 1)))
  total_loss = array_ops.expand_dims_v2(sum(losses), 0)
  return total_loss
Exemple #6
0
    def call(self, inputs):
        features = self.srmConv2D(inputs)
        z_mean, z_log_var, z = self.encoder(features)
        reconstruction = self.decoder(z)

        L2 = squared_difference(features, reconstruction)
        error = tf.reduce_mean(L2, axis=-1)

        threshold = otsu(error)

        mask = discriminative_labelling(error, threshold)
        return features, reconstruction, error, mask