def fit(self, X, y, batch_size=32, epochs=1, verbose=1, validation_split=0.0, validation_data=None, shuffle=True): """Trains the model for a fixed number of epochs. Arguments: X: tensor or array-like Input data. y: tensor or array-like Target data. batch_size: integer, Default: 32 Number of sample in gradient update. epochs: integer, Default: 1 Number of epochs to train the model. verbose: integer, Default: 1 Verbosity mode. 0 = silent, 1 = one line per epoch. validation_split: float, Default: 0 Fraction of the training data to be used as validation data. The validation data is selected from the last samples in the `x` and `y` data, before shuffling. validation_data: tuple `(x_val, y_val)`, Default: None Data on which to evaluate the loss and any model metrics at the end of each epoch. `validation_data` will override `validation_split`. shuffle: boolean, Default: True Whether to shuffle the data before each epoch. """ if validation_data is None: split = train_test_split(X, y, validation_split, shuffle=False) X, X_val, y, y_val = split else: X_val, y_val = validation_data progbar = Progbar(epochs, X.shape[0], batch_size) for epoch in range(epochs): X, y = shuffle_data(X, y) losses = [] metrics = [] for output, target in batch_iterator(X, y, batch_size): loss, metric = self.train_on_batch(output, target) losses.append(loss) metrics.append(metric) if verbose >= 1: progbar.update(epoch, loss, metric) loss = M.mean(losses) metric = M.mean(metrics) val_loss, val_metric = self.evaluate(X_val, y_val, batch_size) self.losses['training'].append(loss) self.metrics['training'].append(metric) self.losses['validation'].append(val_loss) self.metrics['validation'].append(val_metric) if verbose >= 1: progbar.update(epoch, loss, metric, val_loss, val_metric)
def evaluate(self, X=None, y=None, batch_size=32, verbose=0): """Returns the loss value & metrics values for the model in test mode. Arguments: X: array-like, Default: None Input data. y: array-like, Default: None Target data. batch_size: integer, Default: 32 Number of sample in gradient update. verbose: integer, Default: 0 Verbosity mode. 0 = silent, 1 = one line per batch. Returns: A scalar test loss or list of scalars with metrics. """ losses = [] metrics = [] for output, target in batch_iterator(X, y, batch_size): loss, metric, output = self.test_on_batch(output, target) losses.append(loss) metrics.append(metric) if verbose >= 1: print(f"-> {target} - raw{M.round(output, decimals=3)}") return M.mean(losses), M.mean(metrics)
def call(self, y_true, y_pred): target = y_true output = y_pred if self.from_logits: output = 1 / (1 + M.exp(-y_pred)) output = M.clip(output, M.epsilon(), 1.0 - M.epsilon()) output = -target * M.log(output) - (1.0 - target) * M.log(1.0 - output) return M.mean(output, axis=-1)
def fit(self, X): """Compute the mean and std to be used for later scaling. Arguments: X: array-like The data used to compute the mean and standard deviation used for later scaling along the features axis. """ self.mean = M.array([]) self.std = M.array([]) for i in range(X.shape[1]): self.mean = M.append(self.mean, M.mean(X[:, i])) self.std = M.append(self.std, M.std(X[:, i])) return self
def test_on_batch(self, X, y): """Test the model on a single batch of samples. Arguments: X: array-like Input data. y: array-like Target data. Returns: A scalar test loss or list of scalars with metrics. """ output = X for layer in self.layers: trainable = layer.trainable layer.trainable = False output = layer(output) layer.trainable = trainable loss = M.mean(self.loss(y, output)) metric = self.compile_metrics(y, output) return loss, metric, output
def train_on_batch(self, X, y): """Runs a single gradient update on a single batch of data. Arguments: X: array-like Input data. y: array-like Target data. Returns: A scalar test loss or list of scalars with metrics. """ output = X for layer in self.layers: output = layer(output) loss = M.mean(self.loss(y, output)) metric = self.compile_metrics(y, output) grads = self.loss.gradient(y, output) for layer in reversed(self.layers): grads = layer.backward(grads) return loss, metric
def call(self, y_true, y_pred): return M.mean(M.square(y_pred - y_true), axis=-1)
def binary_accuracy(self, y_true, y_pred, threshold=0.5): if threshold != 0.5: y_pred = (y_pred > threshold) return M.mean(M.equal(y_true, M.round(y_pred)), axis=-1)