def update_vmu(self): # XXX: The topics (vmus) must lie on the hypersphere, i.e. have unit L2 norm. I'm not sure if scipy has # an optimization method that can accommodate this type of constraint, so instead, I'm encoding # it here a Lagrange multiplier. This should at least push the optimizer towards solutions close to the # L2 constraint. # Set the strength of the Lagrange multipler to something much larger than the objective LAMBDA = 10.0*self.l_vmu() def f(): squared_norms = np.sum(self.vmu ** 2, axis=0) return self.l_vmu() - LAMBDA*np.sum((squared_norms - 1.0)**2) def g(): squared_norms = np.sum(self.vmu ** 2, axis=0) return self.tangent_grad_l_vmu() - LAMBDA*2.0*(squared_norms - 1.0)*(2.0*self.vmu) optimize.optimize_parameter(self, 'vmu', f, g, bounds=(-1.0, 1.0)) self.vmu = l2_normalize(self.vmu) # Renormalize
def update_vmu(self): # XXX: The topics (vmus) must lie on the hypersphere, i.e. have unit L2 norm. I'm not sure if scipy has # an optimization method that can accommodate this type of constraint, so instead, I'm encoding # it here a Lagrange multiplier. This should at least push the optimizer towards solutions close to the # L2 constraint. # Set the strength of the Lagrange multipler to something much larger than the objective LAMBDA = 10.0 * self.l_vmu() def f(): squared_norms = np.sum(self.vmu**2, axis=0) return self.l_vmu() - LAMBDA * np.sum((squared_norms - 1.0)**2) def g(): squared_norms = np.sum(self.vmu**2, axis=0) return self.tangent_grad_l_vmu() - LAMBDA * 2.0 * ( squared_norms - 1.0) * (2.0 * self.vmu) optimize.optimize_parameter(self, 'vmu', f, g, bounds=(-1.0, 1.0)) self.vmu = l2_normalize(self.vmu) # Renormalize
def update_xi(self): optimize.optimize_parameter(self, 'xi', self.l_xi, self.grad_l_xi)
def update_alpha(self): optimize.optimize_parameter(self, 'alpha', self.l_alpha, self.grad_l_alpha)