def fit(self, initial_theta, stochastic=False, adapt=False, batch_size=1, learning_rate=0.1, momentum=0.9, epoch=1000, threshold=1e-4, regularization_lambda=1.0): if stochastic: (result, costs) = Trainer.batch_gradient_descent( learning_rate=0.05, apapt=adapt, batch_size=batch_size, epoch=epoch, costFunction=self.costFunctionReg, theta=initial_theta, X=self.X, Y=self.Y, l=0.0) else: (result, costs) = Trainer.gradient_descent( maxiter=epoch, learning_rate=0.5, momentum=momentum, threshold=threshold, costFunction=self.costFunctionReg, theta=initial_theta, X=self.X, Y=self.Y, l=regularization_lambda) self.trained_theta = result['theta'] return result, costs
def fit_multi_class(self, initial_theta, stochastic=False, adapt=False, batch_size=1, learning_rate=0.1, momentum=0.0, epoch=1000, threshold=1e-4, regularization_lambda=1.0): all_theta = np.matrix(np.zeros((len(initial_theta), len(self.labels)))) if stochastic: col = 0; for c in self.labels: Yc = (self.Y == c).astype(float) (result, cost) = Trainer.batch_gradient_descent(learning_rate=learning_rate, adapt=adapt, batch_size=batch_size, epoch=epoch, costFunction=self.costFunctionReg, theta=initial_theta, X=self.X, Y=Yc, l=0.0) all_theta[:, col] = result['theta'] col += 1 self.trained_theta = all_theta else: col = 0; for c in self.labels: Yc = (self.Y == c).astype(float) (result, cost) = Trainer.gradient_descent(maxiter=epoch, learning_rate=learning_rate, momentum=momentum, threshold=threshold, costFunction=self.costFunctionReg, theta=initial_theta, X=self.X, Y=Yc, l=regularization_lambda) all_theta[:, col] = result['theta'] col += 1 self.trained_theta = all_theta return all_theta
def fit(self, initial_theta, stochastic=False, adapt=False, batch_size=1, learning_rate=0.1, momentum=0.9, epoch=1000, threshold=1e-4, regularization_lambda=1.0): if stochastic: (result, costs) = Trainer.batch_gradient_descent(learning_rate=0.05, apapt=adapt, batch_size=batch_size, epoch=epoch, costFunction=self.costFunctionReg, theta=initial_theta, X=self.X, Y=self.Y, l=0.0) else: (result, costs) = Trainer.gradient_descent(maxiter=epoch, learning_rate=0.5, momentum=momentum, threshold=threshold, costFunction=self.costFunctionReg, theta=initial_theta, X=self.X, Y=self.Y, l=regularization_lambda) self.trained_theta = result['theta'] return result, costs
def fit_multi_class(self, initial_theta, stochastic=False, adapt=False, batch_size=1, learning_rate=0.1, momentum=0.0, epoch=1000, threshold=1e-4, regularization_lambda=1.0): all_theta = np.matrix(np.zeros((len(initial_theta), len(self.labels)))) if stochastic: col = 0 for c in self.labels: Yc = (self.Y == c).astype(float) (result, cost) = Trainer.batch_gradient_descent( learning_rate=learning_rate, adapt=adapt, batch_size=batch_size, epoch=epoch, costFunction=self.costFunctionReg, theta=initial_theta, X=self.X, Y=Yc, l=0.0) all_theta[:, col] = result['theta'] col += 1 self.trained_theta = all_theta else: col = 0 for c in self.labels: Yc = (self.Y == c).astype(float) (result, cost) = Trainer.gradient_descent( maxiter=epoch, learning_rate=learning_rate, momentum=momentum, threshold=threshold, costFunction=self.costFunctionReg, theta=initial_theta, X=self.X, Y=Yc, l=regularization_lambda) all_theta[:, col] = result['theta'] col += 1 self.trained_theta = all_theta return all_theta