def fit(self, inputs, targets, verbose=False): fit_stats = { "train_loss": [], "train_acc": [], "valid_loss": [], "valid_acc": [] } inputs = np.column_stack((np.ones(inputs.shape[0]), inputs)) self.weights = self.init_method.initialize_weights((inputs.shape[1], )) for i in range(self.epochs): predictions = inputs.dot(self.weights) mse = np.sum( (self.loss.forward(np.expand_dims(predictions, axis=1), np.expand_dims(targets, axis=1)), self.regularization.regulate(self.weights))) acc = self.loss.accuracy(predictions, targets) fit_stats["train_loss"].append(np.mean(mse)) fit_stats["train_acc"].append(np.mean(acc)) cost_gradient = self.loss.backward(predictions, targets) d_weights = cost_gradient.dot( inputs) + self.regularization.derivative(self.weights) self.weights = self.optimizer.update(self.weights, d_weights, i, 1, 1) if verbose: print('TRAINING: Epoch-{} loss: {:2.4f} acc: {:2.4f}'.format( i + 1, mse, acc)) else: computebar(self.epochs, i) return fit_stats
def fit(self, train_data, train_label, batch_size, epochs, validation_data = (), shuffle_data = True, verbose = False): fit_stats = {'train_loss': [], 'train_acc': [], 'valid_loss': [], 'valid_acc': []} for epoch_idx in np.arange(epochs): batch_stats = {'batch_loss': [], 'batch_acc': []} for train_batch_data, train_batch_label in minibatches(train_data, train_label, batch_size, shuffle_data): loss, acc = self.train_on_batch(train_batch_data, train_batch_label) batch_stats['batch_loss'].append(loss) batch_stats['batch_acc'].append(acc) if verbose: print('TRAINING: Epoch-{} loss: {:2.4f} accuracy: {:2.4f}'.format(epoch_idx+1, loss, acc)) fit_stats['train_loss'].append(np.mean(batch_stats['batch_loss'])) fit_stats['train_acc'].append(np.mean(batch_stats['batch_acc'])) if validation_data: val_loss, val_acc = self.test_on_batch(validation_data[0], validation_data[1]) fit_stats['valid_loss'].append(val_loss) fit_stats['valid_acc'].append(val_acc) if verbose: print('VALIDATION: Epoch-{} loss: {:2.4f} accuracy: {:2.4f}'.format(epoch_idx+1, val_loss, val_acc)) if not verbose: computebar(epochs, epoch_idx) return fit_stats
def fit(self, inputs, targets, verbose = False): fit_stats = {"train_loss": [], "train_acc": [], "valid_loss": [], "valid_acc": []} self.weights = self.init_method.initialize_weights((inputs.shape[1], targets.shape[1])) self.bias = np.zeros((1, targets.shape[1])) for i in range(self.epochs): linear_predictions = inputs.dot(self.weights) + self.bias predictions = self.activate.forward(linear_predictions) loss = self.loss.forward(predictions, targets) + self.regularization.regulate(self.weights) acc = self.loss.accuracy(predictions, targets) fit_stats["train_loss"].append(np.mean(loss)) fit_stats["train_acc"].append(np.mean(acc)) grad = self.loss.backward(predictions, targets) * self.activate.backward(linear_predictions) d_weights = inputs.T.dot(grad) + self.regularization.derivative(self.weights) d_bias = np.sum(grad, axis = 0, keepdims = True) + self.regularization.derivative(self.bias) self.weights = optimize(self.optimizer).update(self.weights, d_weights, i, 1, 1) self.bias = optimize(self.optimizer).update(self.bias, d_bias, i, 1, 1) if verbose: print('TRAINING: Epoch-{} loss: {:2.4f} acc: {:2.4f}'.format(i+1, loss, acc)) else: computebar(self.epochs, i) return fit_stats
def evaluate(self, test_data, test_label, batch_size=128, shuffle_data=True, verbose=False): eval_stats = {'valid_batches': 0, 'valid_loss': [], 'valid_acc': []} batches = minibatches(test_data, test_label, batch_size, shuffle_data) eval_stats['valid_batches'] = len(batches) for idx, (test_data_batch_data, test_batch_label) in enumerate(batches): loss, acc = self.test_on_batch(test_data_batch_data, test_batch_label) eval_stats['valid_loss'].append(np.mean(loss)) eval_stats['valid_acc'].append(np.mean(acc)) if verbose: print('VALIDATION: loss: {:2.4f} accuracy: {:2.4f}'.format( eval_stats['valid_loss'], eval_stats['valid_acc'])) else: computebar(eval_stats['valid_batches'], idx) return eval_stats
def fit_NR(self, inputs, targets, verbose=False): ''' Newton-Raphson Method ''' fit_stats = { "train_loss": [], "train_acc": [], "valid_loss": [], "valid_acc": [] } self.weights = self.init_method.initialize_weights((inputs.shape[1], )) for i in range(self.epochs): predictions = self.activate.forward(inputs.dot(self.weights)) cost = np.sum( (self.loss.forward(np.expand_dims(predictions, axis=1), np.expand_dims(targets, axis=1)), self.regularization.regulate(self.weights))) acc = self.loss.accuracy(predictions, targets) fit_stats["train_loss"].append(np.mean(cost)) fit_stats["train_acc"].append(np.mean(acc)) diag_grad = np.diag( self.activate.backward(inputs.dot(self.weights))) self.weights += np.linalg.pinv( inputs.T.dot(diag_grad).dot(inputs) + self.regularization.derivative(self.weights)).dot( inputs.T.dot(diag_grad)).dot((targets - predictions)) if verbose: print('TRAINING: Epoch-{} loss: {:2.4f} acc: {:2.4f}'.format( i + 1, cost, acc)) else: computebar(self.epochs, i) return fit_stats