def fit(self, X, y=None): '''Contrastive Divergence training procedure''' self._initialize_weights(X) self.training_errors = [] self.training_reconstructions = [] for _ in self.progressbar(range(self.n_iterations)): batch_errors = [] for batch in batch_iterator(X, batch_size=self.batch_size): # Positive phase positive_hidden = sigmoid(batch.dot(self.W) + self.h0) hidden_states = self._sample(positive_hidden) positive_associations = batch.T.dot(positive_hidden) # Negative phase negative_visible = sigmoid(hidden_states.dot(self.W.T) + self.v0) negative_visible = self._sample(negative_visible) negative_hidden = sigmoid(negative_visible.dot(self.W) + self.h0) negative_associations = negative_visible.T.dot(negative_hidden) self.W += self.lr * (positive_associations - negative_associations) self.h0 += self.lr * (positive_hidden.sum(axis=0) - negative_hidden.sum(axis=0)) self.v0 += self.lr * (batch.sum(axis=0) - negative_visible.sum(axis=0)) batch_errors.append(np.mean((batch - negative_visible) ** 2)) self.training_errors.append(np.mean(batch_errors)) # Reconstruct a batch of images from the training set idx = np.random.choice(range(X.shape[0]), self.batch_size) self.training_reconstructions.append(self.reconstruct(X[idx]))
def fit(self, X, y, n_epochs, batch_size): """ Trains the model for a fixed number of epochs """ for _ in self.progressbar(range(n_epochs)): batch_error = [] for X_batch, y_batch in batch_iterator(X, y, batch_size=batch_size): loss, _ = self.train_on_batch(X_batch, y_batch) batch_error.append(loss) self.errors["training"].append(np.mean(batch_error)) if self.val_set is not None: val_loss, _ = self.test_on_batch(self.val_set["X"], self.val_set["y"]) self.errors["validation"].append(val_loss) return self.errors["training"], self.errors["validation"]
def fit(self, X, y, n_epochs, batch_size): n_samples = np.shape(X)[0] n_batches = int(n_samples / batch_size) bar = progressbar.ProgressBar(widgets=bar_widgets) for _ in bar(range(n_epochs)): batch_error = 0 for X_batch, y_batch in batch_iterator(X, y, batch_size=batch_size): loss, _ = self.train_on_batch(X_batch, y_batch) batch_error += loss self.errors["training"].append(batch_error / n_batches) if self.validation_set: # Determine validation error y_val_pred = self._forward_pass(self.X_val) validation_loss = np.mean( self.loss_function.loss(self.y_val, y_val_pred)) self.errors["validation"].append(validation_loss) return self.errors["training"], self.errors["validation"]
def fit(self, X, y=None): '''Contrastive Divergence training procedure''' self._initialize_weights(X) self.training_errors = [] self.training_reconstructions = [] for _ in self.progressbar(range(self.n_iterations)): batch_errors = [] for batch in batch_iterator(X, batch_size=self.batch_size): # Positive phase positive_hidden = sigmoid(batch.dot(self.W) + self.h0) hidden_states = self._sample(positive_hidden) positive_associations = batch.T.dot(positive_hidden) # Negative phase negative_visible = sigmoid( hidden_states.dot(self.W.T) + self.v0) negative_visible = self._sample(negative_visible) negative_hidden = sigmoid( negative_visible.dot(self.W) + self.h0) negative_associations = negative_visible.T.dot(negative_hidden) self.W += self.lr * (positive_associations - negative_associations) self.h0 += self.lr * (positive_hidden.sum(axis=0) - negative_hidden.sum(axis=0)) self.v0 += self.lr * (batch.sum(axis=0) - negative_visible.sum(axis=0)) batch_errors.append(np.mean((batch - negative_visible)**2)) self.training_errors.append(np.mean(batch_errors)) # Reconstruct a batch of images from the training set idx = np.random.choice(range(X.shape[0]), self.batch_size) self.training_reconstructions.append(self.reconstruct(X[idx]))