Пример #1
0
 def test_mse(self):
     y_a = np.random.random([2, 3, 4])
     y_b = np.random.random([2, 3, 4])
     kloss = objectives.mean_squared_error
     bloss = OptimConverter.to_bigdl_criterion(
         objectives.mean_squared_error)
     self.compare_loss(y_a, y_b, kloss, bloss)
Пример #2
0
    def from_keras(cls, keras_model, dataset):
        import tensorflow.keras.backend as keras_backend

        loss = keras_model.total_loss
        inputs = keras_model.inputs + keras_model.targets + keras_model.sample_weights

        variables = keras_model._collected_trainable_weights
        keras_optimizer = keras_model.optimizer
        grads = keras_optimizer.get_gradients(loss, variables)
        sess = keras_backend.get_session()
        with sess.as_default():
            optim_method = OptimConverter.to_bigdl_optim_method(
                keras_optimizer)

        if keras_model.metrics:
            if isinstance(keras_model.metrics, dict):
                raise ValueError(
                    "different metrics for different outputs are not supported right now"
                )
            bigdl_val_methods = [
                to_bigdl_metric(m) for m in keras_model.metrics_names
            ]
            val_outputs = keras_model.outputs
            val_labels = keras_model.targets
        else:
            val_outputs = None
            val_labels = None
            bigdl_val_methods = None

        return cls(loss, optim_method, sess, dataset, inputs, grads, variables,
                   loss.graph, val_outputs, val_labels, bigdl_val_methods,
                   len(keras_model.sample_weights))
Пример #3
0
    def compile(self, optimizer, loss, metrics=None):
        """
        Configures the learning process. Must be called before fit.

        # Arguments
        optimizer: Optimization method to be used. One can alternatively pass in the corresponding
                   string representation, such as 'sgd'.
        loss: Criterion to be used. One can alternatively pass in the corresponding string
              representation, such as 'mse'.
        metrics: List of validation methods to be used. Default is None. One can alternatively use ['accuracy'].
        """
        if isinstance(optimizer, six.string_types):
            optimizer = OptimConverter.to_bigdl_optim_method(optimizer)
        if isinstance(loss, six.string_types):
            loss = OptimConverter.to_bigdl_criterion(loss)
        if all(isinstance(metric, six.string_types) for metric in metrics):
            metrics = OptimConverter.to_bigdl_metrics(metrics)
        callBigDlFunc(self.bigdl_type, "compile", self.value, optimizer, loss,
                      metrics)
Пример #4
0
 def test_sparse_categorical_crossentropy(self):
     import keras.backend as K
     y_a = np.array([0.12, 0.22, 0.30, 0.17, 0.19])
     # index starts from 1 in BigDL but starts from 0 in Keras
     y_b_bigdl = np.array([2])
     y_b_keras = np.array([1])
     kloss = objectives.sparse_categorical_crossentropy
     bloss = OptimConverter.to_bigdl_criterion("sparse_categorical_crossentropy")
     bigdl_output = bloss.forward(y_a, y_b_bigdl)
     keras_output = np.mean(K.eval(kloss(K.variable(y_b_keras), K.variable(y_a))))
     np.testing.assert_allclose(bigdl_output, keras_output)
Пример #5
0
 def test_sparse_categorical_crossentropy(self):
     import keras.backend as K
     y_a = np.array([0.12, 0.22, 0.30, 0.17, 0.19])
     # index starts from 1 in BigDL but starts from 0 in Keras
     y_b_bigdl = np.array([2])
     y_b_keras = np.array([1])
     kloss = objectives.sparse_categorical_crossentropy
     bloss = OptimConverter.to_bigdl_criterion(
         "sparse_categorical_crossentropy")
     bigdl_output = bloss.forward(y_a, y_b_bigdl)
     keras_output = np.mean(
         K.eval(kloss(K.variable(y_b_keras), K.variable(y_a))))
     np.testing.assert_allclose(bigdl_output, keras_output)
Пример #6
0
 def test_squared_hinge(self):
     y_a = np.random.random([2, 3, 4])
     y_b = np.random.random([2, 3, 4])
     kloss = objectives.squared_hinge
     bloss = OptimConverter.to_bigdl_criterion("squared_hinge")
     self.compare_loss(y_a, y_b, kloss, bloss)
Пример #7
0
 def test_categorical_crossentropy(self):
     y_a = np.random.random([2, 3])
     y_b = np.array([[0, 1, 0], [0, 0, 1]])
     kloss = objectives.categorical_crossentropy
     bloss = OptimConverter.to_bigdl_criterion("categorical_crossentropy")
     self.compare_loss(y_a, y_b, kloss, bloss)
Пример #8
0
 def test_binary_crossentropy(self):
     y_a = np.random.random([5, 6, 7])
     y_b = np.random.random([5, 6, 7])
     kloss = objectives.binary_crossentropy
     bloss = OptimConverter.to_bigdl_criterion("binary_crossentropy")
     self.compare_loss(y_a, y_b, kloss, bloss)
Пример #9
0
 def test_mape(self):
     y_a = np.random.random([5, 6, 7])
     y_b = np.random.random([5, 6, 7])
     kloss = objectives.mean_absolute_percentage_error
     bloss = OptimConverter.to_bigdl_criterion("mape")
     self.compare_loss(y_a, y_b, kloss, bloss)
Пример #10
0
 def test_kld(self):
     y_a = np.random.random([4, 5])
     y_b = np.random.random([4, 5])
     kloss = objectives.kullback_leibler_divergence
     bloss = OptimConverter.to_bigdl_criterion("kld")
     self.compare_loss(y_a, y_b, kloss, bloss)
Пример #11
0
 def test_mape(self):
     y_a = np.random.random([5, 6, 7])
     y_b = np.random.random([5, 6, 7])
     kloss = objectives.mean_absolute_percentage_error
     bloss = OptimConverter.to_bigdl_criterion("mape")
     self.compare_loss(y_a, y_b, kloss, bloss)
Пример #12
0
 def test_cosine_proximity(self):
     y_a = np.random.random([2, 3, 4])
     y_b = np.random.random([2, 3, 4])
     kloss = objectives.cosine_proximity
     bloss = OptimConverter.to_bigdl_criterion("cosine_proximity")
     self.compare_loss(y_a, y_b, kloss, bloss)
Пример #13
0
 def test_poisson(self):
     y_a = np.random.random([2, 3, 4])
     y_b = np.random.random([2, 3, 4])
     kloss = objectives.poisson
     bloss = OptimConverter.to_bigdl_criterion("poisson")
     self.compare_loss(y_a, y_b, kloss, bloss)
Пример #14
0
 def test_squared_hinge(self):
     y_a = np.random.random([2, 3, 4])
     y_b = np.random.random([2, 3, 4])
     kloss = objectives.squared_hinge
     bloss = OptimConverter.to_bigdl_criterion("squared_hinge")
     self.compare_loss(y_a, y_b, kloss, bloss)
Пример #15
0
 def test_categorical_crossentropy(self):
     y_a = np.random.random([2, 3])
     y_b = np.array([[0, 1, 0], [0, 0, 1]])
     kloss = objectives.categorical_crossentropy
     bloss = OptimConverter.to_bigdl_criterion("categorical_crossentropy")
     self.compare_loss(y_a, y_b, kloss, bloss)
Пример #16
0
 def test_binary_crossentropy(self):
     y_a = np.random.random([5, 6, 7])
     y_b = np.random.random([5, 6, 7])
     kloss = objectives.binary_crossentropy
     bloss = OptimConverter.to_bigdl_criterion("binary_crossentropy")
     self.compare_loss(y_a, y_b, kloss, bloss)
Пример #17
0
 def test_poisson(self):
     y_a = np.random.random([2, 3, 4])
     y_b = np.random.random([2, 3, 4])
     kloss = objectives.poisson
     bloss = OptimConverter.to_bigdl_criterion("poisson")
     self.compare_loss(y_a, y_b, kloss, bloss)
Пример #18
0
 def test_kld(self):
     y_a = np.random.random([4, 5])
     y_b = np.random.random([4, 5])
     kloss = objectives.kullback_leibler_divergence
     bloss = OptimConverter.to_bigdl_criterion("kld")
     self.compare_loss(y_a, y_b, kloss, bloss)
Пример #19
0
 def test_cosine_proximity(self):
     y_a = np.random.random([2, 3, 4])
     y_b = np.random.random([2, 3, 4])
     kloss = objectives.cosine_proximity
     bloss = OptimConverter.to_bigdl_criterion("cosine_proximity")
     self.compare_loss(y_a, y_b, kloss, bloss)
Пример #20
0
 def test_msle(self):
     y_a = np.random.random([2, 3, 4])
     y_b = np.random.random([2, 3, 4])
     kloss = objectives.mean_squared_logarithmic_error
     bloss = OptimConverter.to_bigdl_criterion("msle")
     self.compare_loss(y_a, y_b, kloss, bloss)