def cycle_consistency_loss(data_x, reconstructed_data_x, data_y, reconstructed_data_y, scope=None, add_summaries=False): """Defines the cycle consistency loss. The cyclegan model has two partial models where `model_x2y` generator F maps data set X to Y, `model_y2x` generator G maps data set Y to X. For a `data_x` in data set X, we could reconstruct it by * reconstructed_data_x = G(F(data_x)) Similarly * reconstructed_data_y = F(G(data_y)) The cycle consistency loss is about the difference between data and reconstructed data, namely * loss_x2x = |data_x - G(F(data_x))| (L1-norm) * loss_y2y = |data_y - F(G(data_y))| (L1-norm) * loss = (loss_x2x + loss_y2y) / 2 where `loss` is the final result. For the L1-norm, we follow the original implementation: https://github.com/junyanz/CycleGAN/blob/master/models/cycle_gan_model.lua we use L1-norm of pixel-wise error normalized by data size such that `cycle_loss_weight` can be specified independent of image size. See https://arxiv.org/abs/1703.10593 for more details. Args: data_x: A `Tensor` of data X. reconstructed_data_x: A `Tensor` of reconstructed data X. data_y: A `Tensor` of data Y. reconstructed_data_y: A `Tensor` of reconstructed data Y. scope: The scope for the operations performed in computing the loss. Defaults to None. add_summaries: Whether or not to add detailed summaries for the loss. Defaults to False. Returns: A scalar `Tensor` of cycle consistency loss. """ with ops.name_scope(scope, 'cycle_consistency_loss', values=[ data_x, reconstructed_data_x, data_y, reconstructed_data_y ]): loss_x2x = losses.absolute_difference(data_x, reconstructed_data_x) loss_y2y = losses.absolute_difference(data_y, reconstructed_data_y) loss = (loss_x2x + loss_y2y) / 2.0 if add_summaries: summary.scalar('cycle_consistency_loss_x2x', loss_x2x) summary.scalar('cycle_consistency_loss_y2y', loss_y2y) summary.scalar('cycle_consistency_loss', loss) return loss
def cycle_consistency_loss(data_x, reconstructed_data_x, data_y, reconstructed_data_y, scope=None, add_summaries=False): """Defines the cycle consistency loss. The cyclegan model has two partial models where `model_x2y` generator F maps data set X to Y, `model_y2x` generator G maps data set Y to X. For a `data_x` in data set X, we could reconstruct it by * reconstructed_data_x = G(F(data_x)) Similarly * reconstructed_data_y = F(G(data_y)) The cycle consistency loss is about the difference between data and reconstructed data, namely * loss_x2x = |data_x - G(F(data_x))| (L1-norm) * loss_y2y = |data_y - F(G(data_y))| (L1-norm) * loss = (loss_x2x + loss_y2y) / 2 where `loss` is the final result. For the L1-norm, we follow the original implementation: https://github.com/junyanz/CycleGAN/blob/master/models/cycle_gan_model.lua we use L1-norm of pixel-wise error normalized by data size such that `cycle_loss_weight` can be specified independent of image size. See https://arxiv.org/abs/1703.10593 for more details. Args: data_x: A `Tensor` of data X. reconstructed_data_x: A `Tensor` of reconstructed data X. data_y: A `Tensor` of data Y. reconstructed_data_y: A `Tensor` of reconstructed data Y. scope: The scope for the operations performed in computing the loss. Defaults to None. add_summaries: Whether or not to add detailed summaries for the loss. Defaults to False. Returns: A scalar `Tensor` of cycle consistency loss. """ with ops.name_scope( scope, 'cycle_consistency_loss', values=[data_x, reconstructed_data_x, data_y, reconstructed_data_y]): loss_x2x = losses.absolute_difference(data_x, reconstructed_data_x) loss_y2y = losses.absolute_difference(data_y, reconstructed_data_y) loss = (loss_x2x + loss_y2y) / 2.0 if add_summaries: summary.scalar('cycle_consistency_loss_x2x', loss_x2x) summary.scalar('cycle_consistency_loss_y2y', loss_y2y) summary.scalar('cycle_consistency_loss', loss) return loss
def model_fn(self, mode, features, labels, params): c = variable_scope.get_variable('c', initializer=constant_op.constant( 10, dtype=dtypes.float64), dtype=dtypes.float64) predictions = math_ops.multiply(features, c) loss = None if mode is not model_fn_lib.ModeKeys.PREDICT: loss = losses.absolute_difference(labels=labels, predictions=predictions, reduction=losses.Reduction.SUM) loss = math_ops.reduce_sum(loss) metrics = { 'accuracy': metrics_lib.accuracy(labels, predictions), 'auc': metrics_lib.auc(labels, predictions) } return model_fn_lib.EstimatorSpec( mode=mode, loss=loss, eval_metric_ops=metrics, predictions={'probabilities': predictions}, train_op=control_flow_ops.no_op( )) # This train_op isn't actually used.
def model_fn(self, mode, features, labels, params): c = variable_scope.get_variable( 'c', initializer=constant_op.constant(10, dtype=dtypes.float64), dtype=dtypes.float64) predictions = math_ops.multiply(features, c) loss = None if mode is not model_fn_lib.ModeKeys.PREDICT: loss = losses.absolute_difference( labels=labels, predictions=predictions, reduction=losses.Reduction.SUM) loss = math_ops.reduce_sum(loss) metrics = { 'accuracy': metrics_lib.accuracy(labels, predictions), 'auc': metrics_lib.auc(labels, predictions) } return model_fn_lib.EstimatorSpec( mode=mode, loss=loss, eval_metric_ops=metrics, predictions={'probabilities': predictions}, train_op=control_flow_ops.no_op()) # This train_op isn't actually used.
def model_fn(self, mode, features, labels, params): c = variable_scope.get_variable( 'c', initializer=constant_op.constant(0.25, dtype=dtypes.float64), dtype=dtypes.float64) predictions = math_ops.add(np.array([0.1, 0.2, 0.3, features[0]]), c) labels = np.array([0.1, 0.2, 0.3, labels[0]]) loss = losses.absolute_difference( labels=labels, predictions=predictions, reduction=losses.Reduction.SUM) return model_fn_lib.EstimatorSpec(mode=mode, loss=math_ops.reduce_sum(loss))
def network(x, y, la, lr): del x with variable_scope.variable_scope("vs", use_resource=True): w = variable_scope.get_variable( "w", shape=[10, 200], dtype=np.float32, initializer=init_ops.constant_initializer(2.)) g = nn.embedding_lookup(w, y) ce = losses.absolute_difference(labels=la, predictions=g) loss = math_ops.reduce_mean(ce) optimizer = gradient_descent.GradientDescentOptimizer(lr) train = optimizer.minimize(loss) return loss, train
def model_fn(self, mode, features, labels, params): c = variable_scope.get_variable('c', initializer=constant_op.constant( 10, dtype=dtypes.float64), dtype=dtypes.float64) predictions = {'probabilities': math_ops.multiply(features, c)} loss = losses.absolute_difference( labels=labels, predictions=predictions['probabilities'], reduction=losses.Reduction.SUM) metrics = { 'accuracy': metrics_lib.accuracy(labels, predictions['probabilities']), 'auc': metrics_lib.auc(labels, predictions['probabilities']) } tensor_string_repr = str(features) classes = constant_op.constant(re.search('(split_inputs/split:[0-9])', tensor_string_repr).group(1), dtype=dtypes.string) export_outputs = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: export_output.PredictOutput(predictions), 'classification_output': export_output.ClassificationOutput(predictions['probabilities'], classes), 'classification_scores': export_output.ClassificationOutput( scores=predictions['probabilities']), 'classification_classes': export_output.ClassificationOutput(classes=classes), 'regression_output': export_output.RegressionOutput(predictions['probabilities']), } return model_fn_lib.EstimatorSpec( mode=mode, loss=math_ops.reduce_sum(loss), eval_metric_ops=metrics, predictions=predictions, train_op=loss, # This train_op isn't actually used. export_outputs=export_outputs)
def model_fn(self, mode, features, labels, params): c = variable_scope.get_variable( 'c', initializer=constant_op.constant(10, dtype=dtypes.float64), dtype=dtypes.float64) predictions = {'probabilities': math_ops.multiply(features, c)} loss = losses.absolute_difference( labels=labels, predictions=predictions['probabilities'], reduction=losses.Reduction.SUM) metrics = { 'accuracy': metrics_lib.accuracy(labels, predictions['probabilities']), 'auc': metrics_lib.auc(labels, predictions['probabilities']) } tensor_string_repr = str(features) classes = constant_op.constant( re.search('(split_inputs/split:[0-9])', tensor_string_repr).group(1), dtype=dtypes.string) export_outputs = { signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: export_output.PredictOutput(predictions), 'classification_output': export_output.ClassificationOutput(predictions['probabilities'], classes), 'classification_scores': export_output.ClassificationOutput( scores=predictions['probabilities']), 'classification_classes': export_output.ClassificationOutput(classes=classes), 'regression_output': export_output.RegressionOutput(predictions['probabilities']), } return model_fn_lib.EstimatorSpec( mode=mode, loss=math_ops.reduce_sum(loss), eval_metric_ops=metrics, predictions=predictions, train_op=loss, # This train_op isn't actually used. export_outputs=export_outputs)
def _loss(gan_model, features, labels, _): """Make sure that features and labels are passed in from input.""" self.assertTrue(np.array_equal(features, true_features)) self.assertTrue(np.array_equal(labels, true_labels)) return losses.absolute_difference(expected_z_output, gan_model.generated_data)