def move_devices(self, balance_changes, balance_backing_device=True): # timer = Timer("Timer move_devices") # logging.info("\x1b[37mMoving devices:\x1b[39m") if not balance_changes: # logging.info("no balance changes") return for dev_id, (old_worker, new_worker) in balance_changes.items(): # logging.info("\x1b[37mdev: %s from worker: %s to worker: %s" # "\x1b[39m\n" % # (dev_id, old_worker["id"], new_worker["id"])) # timer.checkpoint("dev %s" % (dev_id,)) vhost_write(Vhost.INSTANCE.devices[dev_id], "worker", new_worker["id"]) # timer.checkpoint("dev %s vhost_write" % (dev_id,)) Vhost.INSTANCE.devices[dev_id]["worker"] = new_worker["id"] new_worker["dev_list"].append(dev_id) old_worker["dev_list"].remove(dev_id) # timer.checkpoint("dev %s end" % (dev_id,)) # timer.checkpoint("before backing_devices_manager.balance") if balance_backing_device: self.backing_devices_manager.balance(self.io_workers) self.backing_devices_manager.update()
def enable_shared_workers(): logging.info("\x1b[37menable shared IO workers - start polling." "\x1b[39m\n") for vq_id, vq in Vhost.INSTANCE.queues.items(): if vq["can_poll"] == 0 or vq["poll"] == 1: continue # logging.info("\x1b[37mvq=%s start polling.\x1b[39m" % (vq_id,)) vhost_write(vq, "poll", 1)
def disable_shared_workers(self): self.poll_policy.disable_shared_workers() self.io_workers = [] vhost = Vhost.INSTANCE # create a worker for each device for dev in self.devices[1:]: worker_id = self._add_io_worker() vhost_write(vhost.devices[dev.id], "worker", worker_id) vhost_worker_set_cpu_mask(vhost.workers[worker_id], 0xFF) vhost.vhost_light.update(rescan=True) self.backing_devices_manager.balance(self.io_workers) self.backing_devices_manager.update()
def _add_io_worker(new_io_core=0): # add a new worker to the I/O cores vhost = Vhost.INSTANCE vhost_write(vhost.workersGlobal, "create", new_io_core) new_worker_id = vhost_read(vhost.workersGlobal, "create").strip() vhost.update_all_entries_with_id(new_worker_id) vhost.workers[new_worker_id]["cpu_usage_counter"] = \ get_cpu_usage(vhost.workers[new_worker_id]["pid"]) # vhost.workers[new_worker_id]["cpu_usage_counter"] = \ # ProcessCPUUsageCounter(new_worker_id) vhost_worker_set_cpu_mask(vhost.workers[new_worker_id], 1 << new_io_core) # logging.info("Added Worker: {id: %s, cpu: %d}" % (new_worker_id, # new_io_core)) vhost.vhost_light.update(rescan=True) return new_worker_id
def _remove_io_worker(removed_worker): # logging.info("removed_worker: %s" % (removed_worker["id"],)) # remove the worker from the cores vhost = Vhost.INSTANCE workers = vhost.workers # lock the worker vhost_write(removed_worker, "locked", 1) removed_worker["locked"] = 1 # remove the worker from the workers dictionary # removed_worker_dev_ids = vhost_read(removed_worker, # "dev_list").strip().split("\t") # logging.info("removed_worker_dev_ids: %s" % (removed_worker_dev_ids,)) # assert not removed_worker_dev_ids del workers[removed_worker["id"]] vhost_write(vhost.workersGlobal, "remove", removed_worker["id"]) vhost.vhost_light.update(rescan=True)
def update_polling(self): if not self.shared_workers: return cycles_this_epoch = Vhost.INSTANCE.vhost["cycles_this_epoch"] cycles = Vhost.INSTANCE.vhost["cycles"] for vq_id, vq in Vhost.INSTANCE.queues.items(): if self.vqs_data[vq_id]["epochs_last_action"] < \ self.cooling_off_period: self.vqs_data[vq_id]["epochs_last_action"] += 1 continue if vq["can_poll"] == 0 and vq["poll"] == 0: continue poll_idle = cycles - vq["last_poll_cycles"] if vq["poll"] == 1 and poll_idle > self.poll_stop_idle: logging.info("vq: %s" % (vq_id, )) # logging.info("poll: %d" % (vq["poll"], )) logging.info("poll_cycles: %s" % (vq["poll_cycles"], )) logging.info("last_poll_cycles: %d" % (vq["last_poll_cycles"], )) logging.info("cycles: %d" % (cycles, )) logging.info("\x1b[37mvq=%s stop polling, poll_idle: " "%d.\x1b[39m" % (vq_id, poll_idle)) vhost_write(vq, "poll", 0) continue # checking for start rate notif_works_this_epoch = vq["notif_works_this_epoch"] cycles_per_work = cycles_this_epoch / notif_works_this_epoch \ if notif_works_this_epoch != 0 else float("inf") if vq["poll"] == 0 and cycles_per_work < self.poll_start_rate: logging.info("vq: %s" % (vq_id, )) # logging.info("poll: %d" % (vq["poll"], )) logging.info("\x1b[37mvq=%s start polling, " "cycles_per_work: %d.\x1b[39m" % (vq_id, cycles_per_work)) vhost_write(vq, "poll", 1)
(len(sys.argv) - 1, )) # initialize vhost Vhost.initialize() Vhost.INSTANCE.update(False) workers = Vhost.INSTANCE.workers devices = Vhost.INSTANCE.devices if sys.argv[1] not in devices: usage(sys.argv[0], "device %s not found!" % (sys.argv[1], )) dev = devices[sys.argv[1]] if sys.argv[2] not in workers: usage(sys.argv[0], "worker %s not found!" % (sys.argv[2], )) worker_1 = workers[sys.argv[2]] if sys.argv[3] not in workers: usage(sys.argv[0], "worker %s not found!" % (sys.argv[3], )) worker_2 = workers[sys.argv[3]] try: interval = float(sys.argv[4]) except IOError: usage(sys.argv[0], "%s is not a number!" % (sys.argv[4], )) while True: vhost_write(dev, "worker", worker_1["id"]) time.sleep(interval) vhost_write(dev, "worker", worker_2["id"]) time.sleep(interval)
def _stop_polling(): for vq_id, vq in Vhost.INSTANCE.queues.items(): if vq["can_poll"] == 0 and vq["poll"] == 0: continue # logging.info("\x1b[37mvq=%s stop polling.\x1b[39m" % (vq_id,)) vhost_write(vq, "poll", 0)
def vhost_remove_worker(removed_worker): workers_global = Vhost.INSTANCE.workersGlobal vhost_write(removed_worker, "locked", 1) vhost_write(workers_global, "remove", removed_worker["id"])
def vhost_worker_create(): workers_global = Vhost.INSTANCE.workersGlobal vhost_write(workers_global, "create", 0)
cpu_mask = parse_cpu_mask_from_cpu_list(worker_conf["cpu"]) worker = Vhost.INSTANCE.workers[worker_mapping[worker_conf["id"]]] msg("io worker: worker_conf: %s, worker: %s, cpu_mask: %x" % (worker_conf["id"], worker["id"], cpu_mask)) vhost_worker_set_cpu_mask(worker, cpu_mask) # move devices to the correct workers and fix workers affinity msg("move devices to the correct workers and fix workers affinity") for vm in vms_conf: for dev in vm["devices"]: # move devices to the correct workers if dev["vhost_worker"] != worker_mapping[dev["vhost_worker"]]: dev["vhost_worker"] = worker_mapping[dev["vhost_worker"]] # msg(dev["vhost_worker"]) vhost_write(devices[dev["id"]], "worker", dev["vhost_worker"]) # update the configuration file for worker in config["workers"]: worker["id"] = worker_mapping[worker["id"]] # start polling msg("start polling") for q in queues.values(): can_poll = vhost_read(q, "can_poll") if can_poll == "0": continue vhost_write(q, "poll", "1") workers_for_removal = len(workers) - len(workers_conf) msg(workers_for_removal)