Beispiel #1
0
    def testL1Penalization(self):
        W0 = np.array([1., 1., -1.])
        W1 = np.array([-2., -2.])
        list_weight = [W0, W1]

        loss = CrossEntropy(penalization_type="L1")
        loss.weights = list_weight
        loss._compute_penalization()
        with self.test_session():
            expected = (1 + 1 + 1) + (2 + 2)
            self.assertEqual(loss.penality.eval(), expected)
Beispiel #2
0
    def testL2Penalization(self):
        W0 = np.array([1., 1., 1.])
        W1 = np.array([2., 2.])
        list_weight = [W0, W1]

        loss = CrossEntropy(penalization_type="L2")
        loss.weights = list_weight
        loss._compute_penalization()

        with self.test_session():
            expected = (3 * (1 ** 2) + 2 * (2 ** 2)) / 2
            self.assertEqual(loss.penality.eval(), expected)
Beispiel #3
0
    def testComputePredict(self):
        x_out = np.array([[1., 0.5],
                          [2., 0.],
                          [3.4, 10.]])
        loss = CrossEntropy()

        with self.test_session() as sess:
            expected_output = np.array([0, 0, 1])
            test_predict(self, sess, loss, x_out, expected_output)
Beispiel #4
0
    def _set_loss(self, weights: Sequence[tf.Variable]) -> None:
        """
        Use the cross entropy class to define the network loss function.

        Args
        ----

            weights : Sequence[tf.Variable]
                List of weight to apply regularization.
        """

        self.l_loss = CrossEntropy(penalization_rate=self.penalization_rate,
                                   penalization_type=self.penalization_type,
                                   name=f"cross_entropy")

        self.loss_opt, self.y_pred = self.l_loss.build(y=self.y,
                                                       x_out=self.x_out,
                                                       weights=weights)

        self.loss = self.l_loss.loss

        self.optimizer = self._minimize(self.loss_opt, name="optimizer")
Beispiel #5
0
    def testComputeLoss(self):
        x_out = np.array([[1., 0.5],
                          [2., 0.],
                          [3.4, 10.]])

        y = np.array([[1, 0],
                      [0, 1],
                      [1, 0]])

        loss = CrossEntropy()
        np_loss = np.exp(x_out)
        np_loss = np_loss / np_loss.sum(1).reshape(-1, 1)
        np_loss = - y * np.log(np_loss)
        expected_loss = np_loss.sum(1).mean()

        with self.test_session() as sess:
            test_loss(self, sess, loss, x_out, expected_loss, y, rtol=1e-6)
Beispiel #6
0
class MlpClassifier(BaseMlp):
    """
    This class allows to train a MLP for classification task. The target array must be a One Hot Vector Encoding
    with dimension equal to the number of label to predict. In addition the class provide an additional methods to
    predict directly the probability for each label.

    Args
    ----

        name : str
            Name of the network.

        use_gpu: bool
            If true train the network on a single GPU otherwise used all cpu. Parallelism setting can be improve with
            future version.

    Attributes
    ----------

        x: tf.Tensor, None
            Input tensor of the network.

        y: tf.Tensor, None
            Tensor containing all True target variable to predict.

        x_out: tf.Tensor, None
            Output of the network.

        loss: tf.Tensor, None
            Loss function optimized to train the MLP.

        y_pred: tf.Tensor, None
            Prediction tensor.

        l_fc: List[FullyConnected], None
            List containing all fully connected layer objects.

        l_output: FullyConnected, None
            Final layer for network output reduction.

        l_loss: AbstractLoss, None
            Loss layer object.
    """
    def __init__(self, name: str = 'MlpClassifier', use_gpu: bool = False):
        super().__init__(name, use_gpu)

    def _set_loss(self, weights: Sequence[tf.Variable]) -> None:
        """
        Use the cross entropy class to define the network loss function.

        Args
        ----

            weights : Sequence[tf.Variable]
                List of weight to apply regularization.
        """

        self.l_loss = CrossEntropy(penalization_rate=self.penalization_rate,
                                   penalization_type=self.penalization_type,
                                   name=f"cross_entropy")

        self.loss_opt, self.y_pred = self.l_loss.build(y=self.y,
                                                       x_out=self.x_out,
                                                       weights=weights)

        self.loss = self.l_loss.loss

        self.optimizer = self._minimize(self.loss_opt, name="optimizer")

    def predict_proba(self,
                      x: np.ndarray,
                      batch_size: Optional[int] = None) -> np.ndarray:
        """
        Predict a vector of probability for each label. If ``batch_size`` is not None predictions are predicted by
        mini-batch

        Args
        ----

            x: array with shape (n_observations, n_inputs)
                Array of input which must have a dimension equal to input_dim.

            batch_size: int
                Number of observation to used for each prediction step. If None predict all label using a single step.

        Returns
        -------

            array with shape (n_observation, n_labels)
                Array of predicted probabilities.
        """

        check_array(x, shape=(-1, self.input_dim))

        n_split = 1 if batch_size is None else len(x) // batch_size

        with self.graph.as_default():
            y_pred = []
            for x_batch in [x] if batch_size is None else np.array_split(
                    x, n_split, axis=0):
                feed_dict = self._get_feed_dict(is_training=False,
                                                keep_proba=1.)
                feed_dict.update({self.x: x_batch})
                y_pred.append(self.sess.run(self.x_out, feed_dict=feed_dict))

            y_pred = np.exp(np.concatenate(y_pred, 0))

            return y_pred / y_pred.sum(1).reshape(-1, 1)
Beispiel #7
0
 def testRestore(self):
     loss = CrossEntropy()
     test_restore(self, loss, [100, 10], [100, 10], tensors=["loss", "loss_opt", "y", "x_out", "y_pred"])