コード例 #1
0
    def predict(self,
                x: np.ndarray,
                batch_size: Optional[int] = None) -> np.ndarray:
        """
        Make predictions using the ``x`` array. If ``batch_size`` is not None predictions are predicted by mini-batch.

        Args
        ----

            x : array with shape (n_observation, input_dim)
                Array of input which must have a dimension equal to input_dim.

            batch_size : int, None
                Number of observations to used for each prediction step. If None predict all label using a single step.

        Returns
        -------
            array with shape (n_observation,)
                Array of predictions
         """

        check_array(x, shape=(-1, ) + tuple(self.input_dim))

        n_split = 1 if batch_size is None else len(x) // batch_size

        with self.graph.as_default():
            y_predict = []
            for x_batch in [x] if batch_size is None else np.array_split(
                    x, n_split, axis=0):
                feed_dict = self._get_feed_dict(is_training=False)
                feed_dict.update({self.x: x_batch})
                y_predict.append(
                    self.sess.run(self.y_pred, feed_dict=feed_dict))

            return np.concatenate(y_predict, 0)
コード例 #2
0
ファイル: MLP.py プロジェクト: nderemacle/deep_learning
    def predict_proba(self,
                      x: np.ndarray,
                      batch_size: Optional[int] = None) -> np.ndarray:
        """
        Predict a vector of probability for each label. If ``batch_size`` is not None predictions are predicted by
        mini-batch

        Args
        ----

            x: array with shape (n_observations, n_inputs)
                Array of input which must have a dimension equal to input_dim.

            batch_size: int
                Number of observation to used for each prediction step. If None predict all label using a single step.

        Returns
        -------

            array with shape (n_observation, n_labels)
                Array of predicted probabilities.
        """

        check_array(x, shape=(-1, self.input_dim))

        n_split = 1 if batch_size is None else len(x) // batch_size

        with self.graph.as_default():
            y_pred = []
            for x_batch in [x] if batch_size is None else np.array_split(
                    x, n_split, axis=0):
                feed_dict = self._get_feed_dict(is_training=False,
                                                keep_proba=1.)
                feed_dict.update({self.x: x_batch})
                y_pred.append(self.sess.run(self.x_out, feed_dict=feed_dict))

            y_pred = np.exp(np.concatenate(y_pred, 0))

            return y_pred / y_pred.sum(1).reshape(-1, 1)
コード例 #3
0
ファイル: MLP.py プロジェクト: nderemacle/deep_learning
    def fit(self,
            x: np.ndarray,
            y: np.ndarray,
            n_epoch: int = 1,
            batch_size: int = 10,
            learning_rate: float = 0.001,
            keep_proba: float = 1.,
            rmax: float = 3.,
            rmin: float = 0.33,
            dmax: float = 5,
            verbose: bool = True) -> None:
        """ Fit the MLP ``n_epoch`` using the ``x`` and ``y`` array of observations.

        Args
        ----

            x: array with shape (n_observation, input_dim)
                Array of input which must have a dimension equal to input_dim.

            y: array with shape (n_observation, output_dim)
                Array of target which must have a dimension equal to output_dim.

            n_epoch: int
                Number of epochs to train the neural network.

            batch_size: int
                Number of observations to used for each backpropagation step.

            learning_rate: float
                Learning rate use for gradient descent methodologies.

            keep_proba: float
                Probability to keep a neurone activate during training.

            rmin: float
                Minimum ratio used to clip the standard deviation ratio when batch renormalization is applied.

            rmax: float
                Maximum ratio used to clip the standard deviation ratio when batch renormalization is applied.

            dmax: float
                When batch renormalization is used the scaled mu differences is clipped between (-dmax, dmax).

            verbose: bool
                If True print the value of the loss function after each epoch.

        """

        check_array(x, shape=(-1, self.input_dim))
        check_array(y, shape=(-1, self.output_dim))

        sample_index = np.arange(len(x))
        n_split = len(x) // batch_size

        with self.graph.as_default():
            for epoch in range(n_epoch):
                np.random.shuffle(sample_index)
                for batch_index in np.array_split(sample_index, n_split):
                    feed_dict = self._get_feed_dict(True, learning_rate,
                                                    keep_proba, rmin, rmax,
                                                    dmax)
                    feed_dict.update({
                        self.x: x[batch_index, :],
                        self.y: y[batch_index, :]
                    })
                    _, loss = self.sess.run([self.optimizer, self.loss],
                                            feed_dict=feed_dict)

                    self.learning_curve.append(loss)

                if verbose:
                    print(f'Epoch {epoch}: {self.learning_curve[-1]}')