def train(self, X, y, iter=10): self.clean() # Convert input values to RavOp tensors X = Tensor(X, name="X") y = Tensor(y, name="y") # Initialize params learning_rate = Scalar(self.learning_rate) size = X.shape[1] no_samples = Scalar(X.shape[0]) weights = Tensor(np.random.uniform(0, 1, size).reshape((size, 1)), name="weights") # 1. Predict y_pred = X.matmul(weights, name="y_pred") # 2. Compute cost cost = self.__compute_cost(y, y_pred, no_samples) # 3. Gradient descent - Update weight values for i in range(iter): y_pred = X.matmul(weights, name="y_pred{}".format(i)) c = X.trans().matmul(y_pred) d = learning_rate.div(no_samples) weights = weights.sub(c.elemul(d), name="weights{}".format(i)) cost = self.__compute_cost(y, y_pred, no_samples, name="cost{}".format(i)) return cost, weights
def train(self, X, y, iter=10): # Remove old ops and start from scratch self.clean() # Convert input values to RavOp tensors X = Tensor(X, name="X") y = Tensor(y, name="y") # Initialize params learning_rate = Scalar(self._learning_rate) size = X.shape[1] no_samples = Scalar(X.shape[0]) weights = Tensor(np.random.uniform(0, 1, size).reshape((size, 1)), name="weights") # 1. Predict - Calculate y_pred y_pred = self.sigmoid(X.matmul(weights), name="y_pred") # 2. Compute cost cost = self.__compute_cost(y, y_pred, no_samples) for i in range(iter): y_pred = self.sigmoid(X.matmul(weights), name="y_pred{}".format(i)) weights = weights.sub(learning_rate.div(no_samples).elemul(X.trans().matmul(y_pred.sub(y))), name="weights{}".format(i)) cost = self.__compute_cost(y=y, y_pred=y_pred, no_samples=no_samples, name="cost{}".format(i)) return cost, weights