def split_work(work, across): allotted = [] if across == 0: logger.info('Nothing to split work across') return allotted total_load = sum(ele[1] * ele[2] for ele in work) mean_load = total_load / across # Sort work in descending order of load work = sorted(work, key=lambda e: e[1] * e[2], reverse=True) # Toggle ceiling of mean loads toggler = toggle_ceils() toggler.send(None) while True: # Assemble a mean portion of work split = raccumulate(deepcopy(work), toggler.send(mean_load), []) # Deduct assigned work from the main workload work = deduct(split, work) # Store assigned work allotted.append(split) if len(allotted) == across: break # Randomly assign remaining work for pending in work: idx = random.randint(0, across - 1) aw = allotted[idx] for (i, ele) in enumerate(aw): if ele[0] == pending[0]: aw[i] = (ele[0], ele[1] + pending[1], ele[2]) break else: aw.append(pending) allotted[idx] = aw return allotted
def on_state_conf_change(self, data, stat, event): if event and event.type == EventType.CHANGED: logger.info('Handling state conf change') # Get toolchains toolchains = self.zk.get_children(self.path.toolchain()) if toolchains: # Distribute work across toolchains self._distribute(data, toolchains)
def create(conf_path): if not path.exists(conf_path): logger.info('Generating conf from template') # Copy main conf shutil.copyfile(CONF_TEMPLATE, conf_path) # Touch magic conf open(path.join(path.dirname(conf_path), 'magic.ini'), 'w').close() return Conf(conf_path)
def distribute_work(conf, toolchains): logger.info('distributing work across toolchains') conf = parse_content(conf) prog_tuples = extract_prog_tuples(conf) work_splits = split_work(prog_tuples, len(toolchains)) assigned = {} for (tc, split) in zip(toolchains, work_splits): assigned[tc] = split return assigned
def setup(self): logger.info('Setting up Sauron') if self.override_state: # Override previous state of conf self.eye.set_state_conf(self.conf) else: # Merge provided conf with state conf self.eye.set_base_conf(self.conf) # Register File Watcher notifier = inotify.INotify() notifier.startReading() notifier.watch(filepath.FilePath(self.conf_path), callbacks=[self.on_conf_change]) self.notifer = notifier
def on_conf_change(self, ignore, fp, mask): """Callback to be invoked when the file at config path changes.""" if mask == inotify.IN_DELETE_SELF: logger.info('Handling conf path change') self.notifer.stopReading() notifier = inotify.INotify() notifier.startReading() notifier.watch(filepath.FilePath(fp.path), callbacks=[self.on_conf_change]) self.notifer = notifier with fp.open() as conf: self.eye.set_base_conf(conf.read()) elif mask == inotify.IN_MODIFY: with fp.open() as conf: self.eye.set_base_conf(conf.read())
def make_orc(self, conf_path, zk_host, zk_port): logger.info('Making Orc') if conf_path is None: conf_path = os.path.join(os.getcwd(), 'supervisord.conf') conf = Conf.create(conf_path) else: conf = Conf(conf_path) if zk_host is None: zk_host = os.environ.get('ZK_HOST', 'localhost') if zk_port is None: zk_port = os.environ.get('ZK_PORT', 2181) orc_host = socket.gethostbyname(socket.gethostname()) orc_host = os.environ.get('ORC_HOST', orc_host) supervisor = Supervisor(conf) return Orc(zk_host, zk_port, supervisor, orc_host)
def start_server(): options = parse_opts() conf = build_conf(options) sauron = SauronFactory().make_sauron(**conf) # Register TCP listener reactor.listenTCP(conf['master_tcp_port'], SuperwiserTCPFactory(sauron)) # Register Web listener logger.info('Adding web interface') reactor.listenTCP(conf['master_web_port'], SuperwiserWebFactory().make_site(sauron)) # Register teardown function reactor.addSystemEventTrigger('before', 'shutdown', sauron.teardown) logger.info('Starting server now') reactor.run()
def on_toolchains(self, children, event): if not event: return added = set(children) - set(self.toolchains) removed = set(self.toolchains) - set(children) if added: logger.info('Toolchain joined') self.distribute() elif removed: logger.info('Toolchain left') # Hit callbacks for cb in self.node_drop_callbacks: requests.post(cb['url'], json={ 'node_count': len(children), 'event': 'toolchain_dropped', 'token': cb.get('auth_token', ''), }) if self.auto_redistribute: self.distribute() self.toolchains = children
def on_base_conf_change(self, data, stat, event): if event and event.type == EventType.CHANGED: logger.info('Handling base conf change') state_programs = { name: (numprocs, weight) for name, numprocs, weight in extract_prog_tuples( parse_content(self.get_state_conf())) } base_conf = parse_content(data) base_tuples = extract_prog_tuples(base_conf) # Rebuild state conf prog_tuples = [] for (program_name, numprocs, weight) in base_tuples: tup = (program_name, ) if program_name in state_programs: tup += state_programs[program_name] else: tup += (numprocs, weight) prog_tuples.append(tup) # Trigger distribute state_conf = unparse(build_conf(prog_tuples, base_conf)) self.set_state_conf(state_conf)
def restore_state(self): logger.info('Restoring state') self.set_state_conf(self.get_bkp_conf())
def backup_state(self): logger.info('Backing up state') self.zk.set(self.path.stateconfbkp(), self.get_state_conf())
def teardown(self): logger.info('Tearing down the eye of Mordor!') self.zk.stop() self.zk.close()
def get_bkp_conf(self): logger.info('Getting backed up conf') data, _ = self.zk.get(self.path.stateconfbkp()) return data
def set_state_conf(self, conf): logger.info('Updating state conf') self.zk.set(self.path.stateconf(), conf)