def testComputePredict(self): x_out = np.array([[1., 0.5], [2., 0.], [3.4, 10.]]).astype(np.float32) loss = MeanSquareError() with self.test_session() as sess: expected_output = x_out test_predict(self, sess, loss, x_out, expected_output)
def _set_loss(self, weights: Sequence[tf.Variable]) -> None: """ Use the MeanSquareError class to define the network loss function. Args ---- weights : Sequence[tf.Variable] List of weight to apply regularization. """ self.l_loss = MeanSquareError(penalization_rate=self.penalization_rate, penalization_type=self.penalization_type, name=f"mean_square_error") self.loss_opt, self.y_pred = self.l_loss.build(y=self.y, x_out=self.x_out, weights=weights) self.loss = self.l_loss.loss self.optimizer = self._minimize(self.loss_opt, name="optimizer")
def testComputeLoss(self): x_out = np.array([[1., 0.5], [2., 0.], [3.4, 10.]]) y = np.array([[1, 0], [0, 1], [1, 0]]) loss = MeanSquareError() expected_loss = np.mean(np.sum(np.power(np.subtract(x_out, y), 2), 1)) with self.test_session() as sess: test_loss(self, sess, loss, x_out, expected_loss, y, rtol=1e-6)
class MlpRegressor(BaseMlp): """ This class allows to train a MLP for regression task. The target array must be a square matrix having one or more objective variable to learn. Args ---- name : str Name of the network. use_gpu: bool If true train the network on a single GPU otherwise used all cpu. Parallelism setting can be improve with future version. Attributes ---------- x: tf.Tensor, None Input tensor of the network. y: tf.Tensor, None Tensor containing all True target variable to predict. x_out: tf.Tensor, None Output of the network. loss: tf.Tensor, None Loss function optimized to train the MLP. y_pred: tf.Tensor, None Prediction tensor. l_fc: List[FullyConnected], None List containing all fully connected layer objects. l_output: FullyConnected, None Final layer for network output reduction. l_loss: AbstractLoss, None Loss layer object. """ def __init__(self, name: str = 'MlpRegressor', use_gpu: bool = False): super().__init__(name, use_gpu) def _set_loss(self, weights: Sequence[tf.Variable]) -> None: """ Use the MeanSquareError class to define the network loss function. Args ---- weights : Sequence[tf.Variable] List of weight to apply regularization. """ self.l_loss = MeanSquareError(penalization_rate=self.penalization_rate, penalization_type=self.penalization_type, name=f"mean_square_error") self.loss_opt, self.y_pred = self.l_loss.build(y=self.y, x_out=self.x_out, weights=weights) self.loss = self.l_loss.loss self.optimizer = self._minimize(self.loss_opt, name="optimizer")
def testRestore(self): loss = MeanSquareError() test_restore(self, loss, [100, 10], [100, 10], tensors=["loss", "loss_opt", "y", "x_out", "y_pred"])
class ConvNetRegressor(BaseConvNet): """This class allow to train a Convolution Network for regression problems. The ConvNet takes as input a 3D tensor of input data which are filtered using a series of convolution, pooling and inception steps. In the end data are used as input of a series of fully connected layer in order to solve the prediction task. Args ---- name: str Name of the network. use_gpu: bool If true train the network on a single GPU otherwise used all cpu. Parallelism setting can be improve with future version. Attributes ---------- x: tf.Tensor, None Input tensor of the network. y: tf.Tensor, None Tensor containing all True target variable to predict. x_out: tf.Tensor, None Output of the network. loss: tf.Tensor, None Loss function optimized to train the MLP. y_pred: tf.Tensor, None Prediction tensor. l_fc: List[FullyConnected], None List containing all fully connected layer objects. l_output: FullyConnected, None Final layer for network output reduction. l_loss: AbstractLoss, None Loss layer object. """ def __init__(self, name: str = 'ConvNetRegressor', use_gpu: bool = False): super().__init__(name, use_gpu) def _set_loss(self, weights: Sequence[tf.Variable]) -> None: """ Use the MeanSquareError class to define the network loss function. Args ---- weights : Sequence[tf.Variable] List of weight to apply regularization. """ self.l_loss = MeanSquareError(penalization_rate=self.penalization_rate, penalization_type=self.penalization_type, name=f"mean_square_error") self.loss_opt, self.y_pred = self.l_loss.build(y=self.y, x_out=self.x_out, weights=weights) self.loss = self.l_loss.loss self.optimizer = self._minimize(self.loss_opt, name="optimizer")