def log_likelihood(self): # Get Probablities p = af.logistic(self.features.dot(self.w)) # Get Log Likelihood For Each Row of Dataset loglikelihood = self.labels * np.log(p + 1e-24) + (1 - self.labels) * np.log(1 - p + 1e-24) # Return Sum return -1 * loglikelihood.sum() + self.C * pn.penalty[self.penalty](self.w)
def predict(self, X): features = (X - self.mean_x) / self.std_x probs = af.logistic(features.dot(self.w)) for p in probs: if p[0] >= 0.5: p[0] = 1 else: p[0] = 0 return probs
def log_likelihood(self): # Get Probablities p = af.logistic(self.features.dot(self.w)) # Get Log Likelihood For Each Row of Dataset loglikelihood = self.labels * np.log(p + 1e-24) + ( 1 - self.labels) * np.log(1 - p + 1e-24) # Return Sum return -1 * loglikelihood.sum() + self.C * pn.penalty[self.penalty]( self.w)
def log_likelihood_gradient(self): error = self.labels - af.logistic(self.features.dot(self.w)) product = error * self.features return product.sum(axis=0).reshape(self.w.shape) - (self.C/self.w.size)* pn.penalty[self.penalty](self.w)
def predict_proba(self, X): features = (X - self.mean_x) / self.std_x probs = af.logistic(features.dot(self.w)) return probs
def log_likelihood_gradient(self): error = self.labels - af.logistic(self.features.dot(self.w)) product = error * self.features return product.sum(axis=0).reshape(self.w.shape) - ( self.C / self.w.size) * pn.penalty[self.penalty](self.w)