def merge(self, follower, m): if follower != self.creator: fdq = crane.mantisStore.load_one( { 'name': self.name, 'creator': follower }, { 'dq': True }).get('dq', []) fdq = FlexibleVector(generic=fdq) else: fdq = self.dq rd = (fdq.norm() + EPS) / (self.panda.z.norm() + EPS) if rd < self.eps: logger.debug('Converged, no need to merge') return False else: self.panda.z.add(fdq, 1.0 / (m + 1 / self.rho)) logger.debug('m = {0}'.format(m)) logger.debug('update z {0}'.format(self.panda.z)) logger.debug('relative difference of z {0}'.format(rd)) metricValue(self, 'rz', rd) #self.panda.update_fields({self.panda.FCONSENSUS:self.panda.z.generic()}) if fdq is not self.dq: del fdq return True
def merge(self, follower, m): if follower != self.creator: fdq = crane.mantisStore.load_one({'name':self.name, 'creator':follower}, {'dq':True}).get('dq',[]) fdq = FlexibleVector(generic=fdq) else: fdq = self.dq rd = (fdq.norm() + EPS) / (self.panda.z.norm() + EPS) if rd < self.eps: logger.debug('Converged, no need to merge') return False else: self.panda.z.add(fdq, 1.0 / (m + 1 / self.rho)) logger.debug('m = {0}'.format(m)) logger.debug('update z {0}'.format(self.panda.z)) logger.debug('relative difference of z {0}'.format(rd)) metricValue(self, 'rz', rd) #self.panda.update_fields({self.panda.FCONSENSUS:self.panda.z.generic()}) if fdq is not self.dq: del fdq return True
def train(self, leader): if not self.data: logger.debug('no data, skip training') return logger.debug('gamma in mantis {0}'.format(self.gamma)) # check out z if leader: z = crane.pandaStore.load_one( { 'name': self.panda.name, 'creator': leader }, { 'z': True }).get('z', []) z = FlexibleVector(generic=z) else: z = self.panda.z if z is None: logger.debug('no consensus checked out') return #metricAbs(metricLog, self, '|z|', z) #metricAbs(metricLog, self, '|q|', self.q) metricRelAbs(self, 'z~q', self.q, z) # update mu self.dq.clear() self.dq.add(self.mu, -1) self.mu.add(self.q, 1) self.mu.add(z, -1) self.dq.add(self.mu, 1) metricAbs(self, 'mu', self.mu) #metricAbs(metricLog, self, '|dmu|', self.dq) #metricValue(metricLog, self, 'sup(mu)', 2 * self.solver.num_instances * self.solver.maxxnorm() * z.norm()) # update w self.solver.setModel0(z, self.mu) #loss = self.solver.status() #metricValue(metricLog, self, 'loss', loss) #metricRelAbs(metricLog, self, '|q~w|', self.q, self.panda.weights) #logger.debug('q = {0}'.format(self.q)) #logger.debug('w = {0}'.format(self.panda.weights)) self.solver.trainModel() loss = self.solver.status() metricValue(self, 'loss', loss) metricValue(self, 'x', self.solver.maxxnorm()) # update q r = self.rho / float(self.rho + self.gamma) self.dq.add(self.q, -1) self.q.clear() self.q.add(z, r) self.q.add(self.panda.weights, 1 - r) self.q.add(self.mu, -r) self.dq.add(self.q, 1) if z is not self.panda.z: del z logger.debug('q = {0}'.format(self.q)) logger.debug('w = {0}'.format(self.panda.weights)) # measure convergence #metricAbs(self, '|dq|', self.dq) #metricAbs(self, '|q|', self.q) metricRelAbs(self, 'q~w', self.q, self.panda.weights) # commit changes self.panda.update_fields( {self.panda.FWEIGHTS: self.panda.weights.generic()}) self.commit()
def train(self, leader): if not self.data: logger.debug('no data, skip training') return logger.debug('gamma in mantis {0}'.format(self.gamma)) # check out z if leader: z = crane.pandaStore.load_one({'name':self.panda.name, 'creator':leader}, {'z':True}).get('z',[]) z = FlexibleVector(generic=z) else: z = self.panda.z if z is None: logger.debug('no consensus checked out') return #metricAbs(metricLog, self, '|z|', z) #metricAbs(metricLog, self, '|q|', self.q) metricRelAbs(self, 'z~q', self.q, z) # update mu self.dq.clear() self.dq.add(self.mu, -1) self.mu.add(self.q, 1) self.mu.add(z, -1) self.dq.add(self.mu, 1) metricAbs(self, 'mu', self.mu) #metricAbs(metricLog, self, '|dmu|', self.dq) #metricValue(metricLog, self, 'sup(mu)', 2 * self.solver.num_instances * self.solver.maxxnorm() * z.norm()) # update w self.solver.setModel0(z, self.mu) #loss = self.solver.status() #metricValue(metricLog, self, 'loss', loss) #metricRelAbs(metricLog, self, '|q~w|', self.q, self.panda.weights) #logger.debug('q = {0}'.format(self.q)) #logger.debug('w = {0}'.format(self.panda.weights)) self.solver.trainModel() loss = self.solver.status() metricValue(self, 'loss', loss) metricValue(self, 'x', self.solver.maxxnorm()) # update q r = self.rho / float(self.rho + self.gamma) self.dq.add(self.q, -1) self.q.clear() self.q.add(z, r) self.q.add(self.panda.weights, 1 - r) self.q.add(self.mu, -r) self.dq.add(self.q, 1) if z is not self.panda.z: del z logger.debug('q = {0}'.format(self.q)) logger.debug('w = {0}'.format(self.panda.weights)) # measure convergence #metricAbs(self, '|dq|', self.dq) #metricAbs(self, '|q|', self.q) metricRelAbs(self, 'q~w', self.q, self.panda.weights) # commit changes self.panda.update_fields({self.panda.FWEIGHTS:self.panda.weights.generic()}) self.commit()