def manager_change(self, managers): for manager in managers: if manager not in self.managers: # Read the key and update all mappings. keys = self.zk_conn.read(paths.manager_keys(manager)).split(",") logging.info("Found manager %s with %d keys." % (manager, len(keys))) self.managers[manager] = keys for key in keys: self.key_to_manager[key] = manager managers_to_remove = [] for manager in self.managers: if manager not in managers: # Remove all mappings. keys = self.managers[manager] logging.info("Removing manager %s with %d keys." % (manager, len(keys))) for key in keys: if key in self.key_to_manager: del self.key_to_manager[key] managers_to_remove.append(manager) for manager in managers_to_remove: if manager in self.managers: del self.managers[manager] # Recompute all endpoint owners. for endpoint in self.endpoints.values(): self.manager_select(endpoint) # Reload all managers IPs. self.manager_ips = \ map(lambda x: BackendIP(x), self.zk_conn.list_children(paths.manager_ips())) # Kick the loadbalancer. self.reload_loadbalancer()
def __determine_manager_keys(self, key_num): # Generate keys. while len(self.manager_keys) < key_num: # Generate a random hash key to associate with this manager. self.manager_keys.append(hashlib.md5(str(uuid.uuid4())).hexdigest()) while len(self.manager_keys) > key_num: # Drop keys while we're too high. del self.manager_keys[len(self.manager_keys) - 1] # Write out our associated hash keys as an ephemeral node. key_string = ",".join(self.manager_keys) self.zk_conn.write(paths.manager_keys(self.uuid), key_string, ephemeral=True) logging.info("Generated %d keys." % len(self.manager_keys))