def execute(self, command, *args, **kargs): if command == "show": prj_id = kargs.get("project_id", None) prj_name = kargs.get("project_name", None) all_projects = kargs.get("all_projects", None) if prj_id: project = self.projects.get(prj_id, None) if project: return project raise Exception("project (id=%r) not found!" % prj_id) elif prj_name: for project in self.projects.values(): if prj_name == project.getName(): return project raise Exception("project (name=%r) not found!" % prj_name) elif all_projects: return self.projects.values() else: return SharedQuota() elif command == "GET_SHARED_QUOTA": return SharedQuota() elif command == "GET_PROJECTS": return self.projects.values() elif command == "GET_PROJECT": return self.getProject(*args, **kargs) else: raise Exception("command=%r not supported!" % command)
def execute(self, command, *args, **kargs): if command == "GET_PRIVATE_QUOTA": prj_id = kargs.get("id", None) prj_name = kargs.get("name", None) project = self.project_manager.getProject(prj_id, prj_name) if project: return project.getQuota() else: raise SynergyError("project not found!") if project: return project elif command == "GET_SHARED_QUOTA": return SharedQuota() else: raise SynergyError("command %r not supported!" % command)
def run(self): LOG.info("Worker %s running!" % self.name) queue_items = [] last_release_time = SharedQuota.getLastReleaseTime() while not self.exit and not self.queue.isClosed(): if last_release_time < SharedQuota.getLastReleaseTime(): last_release_time = SharedQuota.getLastReleaseTime() while queue_items: self.queue.reinsertItem(queue_items.pop(0)) if len(queue_items) >= self.backfill_depth: SharedQuota.wait() continue queue_item = self.queue.getItem(blocking=False) if queue_item is None: if self.queue.getSize(): SharedQuota.wait() continue else: queue_item = self.queue.getItem(blocking=True) if queue_item is None: continue try: request = Request.fromDict(queue_item.getData()) user_id = request.getUserId() prj_id = request.getProjectId() context = request.getContext() server = request.getServer() server_id = server.getId() quota = None try: s = self.nova_manager.getServer(server_id, detail=True) if s.getState() != "building": # or server["OS-EXT-STS:task_state"] != "scheduling": self.queue.deleteItem(queue_item) continue except Exception as ex: LOG.warn("the server %s is not anymore available!" "(reason=%s)" % (server_id, ex)) self.queue.deleteItem(queue_item) continue quota = self.projects[prj_id].getQuota() blocking = False if server.isEphemeral() and not SharedQuota.isEnabled(): blocking = True if quota.allocate(server, blocking=blocking): found = False try: km = self.keystone_manager trust = km.getTrust(context["trust_id"]) token = trust.getToken(km.getToken().getId()) context["auth_token"] = token.getId() context["user_id"] = token.getUser().getId() except Exception as ex: LOG.error("error on getting the token for server " "%s (reason=%s)" % (server.getId(), ex)) raise ex try: self.nova_manager.buildServer(request) LOG.info("building server %s (user_id=%s prj_id=%s quo" "ta=shared)" % (server_id, user_id, prj_id)) found = True except Exception as ex: LOG.error( "error on building the server %s (reason=%s)" % (server.getId(), ex)) if found: self.queue.deleteItem(queue_item) else: quota.release(server) queue_items.append(queue_item) else: queue_items.append(queue_item) except Exception as ex: LOG.error("Exception has occured", exc_info=1) LOG.error("Worker %s: %s" % (self.name, ex)) self.queue.deleteItem(queue_item) LOG.info("Worker %s destroyed!" % self.name)
def doOnEvent(self, event_type, *args, **kwargs): if event_type == "PROJECT_ADDED": project = kwargs.get("project", None) if not project: return try: quota = self.nova_manager.getQuota(project.getId()) if quota.getSize("vcpus") > 0 and \ quota.getSize("memory") > 0 and \ quota.getSize("instances") > 0: self.nova_manager.updateQuota(quota, is_class=True) quota.setSize("vcpus", -1) quota.setSize("memory", -1) quota.setSize("instances", -1) self.nova_manager.updateQuota(quota) class_quota = self.nova_manager.getQuota(project.getId(), is_class=True) quota = project.getQuota() quota.setId(project.getId()) quota.setSize("vcpus", class_quota.getSize("vcpus")) quota.setSize("memory", class_quota.getSize("memory")) quota.setSize("instances", class_quota.getSize("instances")) quota.setSize("vcpus", SharedQuota.getSize("vcpus"), private=False) quota.setSize("memory", SharedQuota.getSize("memory"), private=False) quota.setSize("instances", SharedQuota.getSize("instances"), private=False) servers = self.nova_manager.getProjectServers(project.getId()) for server in servers: if server.getState() != "building": try: quota.allocate(server) except SynergyError as ex: fl = server.getFlavor() vcpus_size = quota.getSize("vcpus") + fl.getVCPUs() mem_size = quota.getSize("memory") + fl.getMemory() quota.setSize("vcpus", vcpus_size) quota.setSize("memory", mem_size) self.nova_manager.updateQuota(quota, is_class=True) LOG.warn("private quota autoresized (vcpus=%s, " "memory=%s) for project %r (id=%s)" % (quota.getSize("vcpus"), quota.getSize("memory"), project.getName(), project.getId())) quota.allocate(server) self.updateSharedQuota() except SynergyError as ex: LOG.error(ex) raise ex elif event_type == "PROJECT_REMOVED": project = kwargs.get("project", None) if not project: return quota = self.nova_manager.getQuota(project.getId()) if quota.getSize("vcpus") <= -1 and \ quota.getSize("memory") <= -1 and \ quota.getSize("instances") <= -1: qc = self.nova_manager.getQuota(project.getId(), is_class=True) self.nova_manager.updateQuota(qc) quota = project.getQuota() ids = [] ids.extend(quota.getServers("active", private=False)) ids.extend(quota.getServers("building", private=False)) ids.extend(quota.getServers("error", private=False)) try: for server_id in ids: self.nova_manager.deleteServer(server_id) except SynergyError as ex: LOG.error(ex) raise ex
def destroy(self): LOG.info("destroy invoked!") SharedQuota.disable()
def updateSharedQuota(self): # calculate the the total limit per cores and ram total_memory = float(0) total_vcpus = float(0) static_memory = float(0) static_vcpus = float(0) shared_memory = float(0) shared_vcpus = float(0) try: cpu_ratio = self.nova_manager.getParameter("cpu_allocation_ratio") ram_ratio = self.nova_manager.getParameter("ram_allocation_ratio") hypervisors = self.nova_manager.getHypervisors() for hv in hypervisors: if hv.getState() == "down" or hv.getStatus() == "disabled": continue if hv.getMemory() > 0: total_memory += hv.getMemory() if hv.getVCPUs() > 0: total_vcpus += hv.getVCPUs() total_memory *= float(ram_ratio) total_vcpus *= float(cpu_ratio) domain = self.keystone_manager.getDomains(name="default") if not domain: raise SynergyError("domain 'default' not found!") domain = domain[0] dom_id = domain.getId() kprojects = self.keystone_manager.getProjects(domain_id=dom_id) for kproject in kprojects: quota = self.nova_manager.getQuota(kproject.getId()) if quota.getSize("vcpus") == -1 and\ quota.getSize("memory") == -1: quota = self.nova_manager.getQuota(kproject.getId(), is_class=True) if quota.getSize("vcpus") > 0: static_vcpus += quota.getSize("vcpus") if quota.getSize("memory") > 0: static_memory += quota.getSize("memory") enabled = False if total_vcpus < static_vcpus: if self.project_manager.getProjects(): LOG.warn("shared quota: the total statically " "allocated vcpus (%s) is greater than the " "total amount of vcpus allowed (%s)" % (static_vcpus, total_vcpus)) else: shared_vcpus = total_vcpus - static_vcpus if total_memory < static_memory: if self.project_manager.getProjects(): LOG.warn("shared quota: the total statically " "allocated memory (%s) is greater than " "the total amount of memory allowed (%s)" % (static_memory, total_memory)) else: enabled = True shared_memory = total_memory - static_memory if enabled: LOG.info("shared quota enabled: vcpus=%s memory=%s" % (shared_vcpus, shared_memory)) SharedQuota.enable() SharedQuota.setSize("vcpus", shared_vcpus) SharedQuota.setSize("memory", shared_memory) else: LOG.info("shared quota disabled") SharedQuota.disable() SharedQuota.setSize("vcpus", 0) SharedQuota.setSize("memory", 0) for project in self.project_manager.getProjects(): quota = project.getQuota() quota.setSize("vcpus", shared_vcpus, private=False) quota.setSize("memory", shared_memory, private=False) except SynergyError as ex: LOG.error(ex) raise ex
def addProject(self, project): if self.projects.get(project.getId(), None): raise Exception("project %r already exists!" % (project.getId())) try: quota = self.nova_manager.getQuota(project.getId()) if quota.getSize("vcpus") > 0 and \ quota.getSize("memory") > 0 and \ quota.getSize("instances") > 0: self.nova_manager.updateQuota(quota, is_class=True) quota.setSize("vcpus", -1) quota.setSize("memory", -1) quota.setSize("instances", -1) self.nova_manager.updateQuota(quota) class_quota = self.nova_manager.getQuota( project.getId(), is_class=True) quota = project.getQuota() quota.setId(project.getId()) quota.setSize("vcpus", class_quota.getSize("vcpus")) quota.setSize("memory", class_quota.getSize("memory")) quota.setSize("instances", class_quota.getSize("instances")) quota.setSize( "vcpus", SharedQuota.getSize("vcpus"), private=False) quota.setSize( "memory", SharedQuota.getSize("memory"), private=False) quota.setSize( "instances", SharedQuota.getSize("instances"), private=False) servers = self.nova_manager.getProjectServers(project.getId()) for server in servers: if server.getState() != "building": try: quota.allocate(server) except Exception as ex: flavor = server.getFlavor() vcpus_size = quota.getSize("vcpus") + flavor.getVCPUs() mem_size = quota.getSize("memory") + flavor.getMemory() quota.setSize("vcpus", vcpus_size) quota.setSize("memory", mem_size) self.nova_manager.updateQuota(quota, is_class=True) LOG.warn("private quota autoresized (vcpus=%s, " "memory=%s) for project %r (id=%s)" % (quota.getSize("vcpus"), quota.getSize("memory"), project.getName(), project.getId())) quota.allocate(server) self.projects[project.getId()] = project except Exception as ex: LOG.error(ex) raise ex
def updateSharedQuota(self): # calculate the the total limit per cores and ram total_memory = float(0) total_vcpus = float(0) static_memory = float(0) static_vcpus = float(0) shared_memory = float(0) shared_vcpus = float(0) try: cpu_ratio = self.nova_manager.getParameter("cpu_allocation_ratio") ram_ratio = self.nova_manager.getParameter("ram_allocation_ratio") hypervisors = self.nova_manager.getHypervisors() for hv in hypervisors: if hv.getState() == "down" or hv.getStatus() == "disabled": continue if hv.getMemory() > 0: total_memory += hv.getMemory() if hv.getVCPUs() > 0: total_vcpus += hv.getVCPUs() total_memory *= float(ram_ratio) total_vcpus *= float(cpu_ratio) domain = self.keystone_manager.getDomains(name="default") if not domain: raise Exception("domain 'default' not found!") domain = domain[0] dom_id = domain.getId() kprojects = self.keystone_manager.getProjects(domain_id=dom_id) for kproject in kprojects: project = self.getProject(kproject.getId()) if project: quota = self.nova_manager.getQuota(project.getId(), is_class=True) pquota = project.getQuota() vcpus_size = quota.getSize("vcpus") vcpus_usage = pquota.getUsage("vcpus") mem_size = quota.getSize("memory") mem_usage = pquota.getUsage("memory") if vcpus_usage > vcpus_size or mem_usage > mem_size: LOG.info("cannot shrink the private quota for project" " %r (id=%s) because the usage of current " "quota exceeds the new size (vcpus=%s, " "memory=%s)" % (project.getName(), project.getId(), quota.getSize("vcpus"), quota.getSize("memory"))) self.nova_manager.updateQuota(pquota, is_class=True) quota = pquota else: pquota.setSize("vcpus", value=quota.getSize("vcpus")) pquota.setSize("memory", value=quota.getSize("memory")) pquota.setSize("instances", value=quota.getSize("instances")) else: quota = self.nova_manager.getQuota(kproject.getId()) if quota.getSize("vcpus") > 0: static_vcpus += quota.getSize("vcpus") if quota.getSize("memory") > 0: static_memory += quota.getSize("memory") enabled = False if total_vcpus < static_vcpus: if self.getProjects(): LOG.warn("shared quota: the total statically " "allocated vcpus (%s) is greater than the " "total amount of vcpus allowed (%s)" % (static_vcpus, total_vcpus)) else: shared_vcpus = total_vcpus - static_vcpus if total_memory < static_memory: if self.getProjects(): LOG.warn("shared quota: the total statically " "allocated memory (%s) is greater than " "the total amount of memory allowed (%s)" % (static_memory, total_memory)) else: enabled = True shared_memory = total_memory - static_memory if enabled: LOG.info("shared quota enabled: vcpus=%s memory=%s" % (shared_vcpus, shared_memory)) SharedQuota.enable() SharedQuota.setSize("vcpus", shared_vcpus) SharedQuota.setSize("memory", shared_memory) else: LOG.info("shared quota disabled") SharedQuota.disable() SharedQuota.setSize("vcpus", 0) SharedQuota.setSize("memory", 0) for project in self.getProjects().values(): quota = project.getQuota() quota.setSize("vcpus", shared_vcpus, private=False) quota.setSize("memory", shared_memory, private=False) except Exception as ex: LOG.error(ex) raise ex
def run(self): LOG.info("Worker %s running!" % self.name) queue_items = [] last_release_time = SharedQuota.getLastReleaseTime() while not self.exit and not self.queue.isClosed(): try: if last_release_time < SharedQuota.getLastReleaseTime(): last_release_time = SharedQuota.getLastReleaseTime() while queue_items: self.queue.restore(queue_items.pop(0)) for project in self.project_manager.getProjects(): for user in project.getUsers(): self.queue.updatePriority(user) if len(queue_items) >= self.backfill_depth: SharedQuota.wait() continue queue_item = self.queue.dequeue(block=False) if queue_item is None: if self.queue.getSize(): SharedQuota.wait() continue else: queue_item = self.queue.dequeue(block=True) if queue_item is None: continue request = Request.fromDict(queue_item.getData()) user_id = request.getUserId() prj_id = request.getProjectId() context = request.getContext() server = request.getServer() server_id = server.getId() quota = None try: s = self.nova_manager.getServer(server_id, detail=True) if s.getState() != "building": # or server["OS-EXT-STS:task_state"] != "scheduling": self.queue.delete(queue_item) continue except SynergyError as ex: LOG.warn("the server %s is not anymore available!" " (reason=%s)" % (server_id, ex)) self.queue.delete(queue_item) continue project = self.project_manager.getProject(id=prj_id) if not project: raise SynergyError("project %r not found!" % prj_id) quota = project.getQuota() blocking = False if server.isEphemeral() and not SharedQuota.isEnabled(): blocking = True if quota.allocate(server, blocking=blocking): found = False try: km = self.keystone_manager trust = km.getTrust(context["trust_id"]) token = trust.getToken(km.getToken().getId()) context["auth_token"] = token.getId() context["user_id"] = token.getUser().getId() except SynergyError as ex: LOG.error("error on getting the token for server " "%s (reason=%s)" % (server.getId(), ex)) raise ex try: self.nova_manager.buildServer(request) LOG.info("building server %s user_id=%s prj_id=%s quo" "ta=shared" % (server_id, user_id, prj_id)) found = True except Exception as ex: LOG.error("error on building the server %s (reason=%s)" % (server.getId(), ex)) if found: self.queue.delete(queue_item) else: quota.release(server) queue_items.append(queue_item) else: queue_items.append(queue_item) except Exception as ex: LOG.error("Exception has occured", exc_info=1) LOG.error("Worker %s: %s" % (self.name, ex)) self.queue.delete(queue_item) LOG.info("Worker %s destroyed!" % self.name)
def doOnEvent(self, event_type, *args, **kwargs): if event_type == "PROJECT_ADDED": project = kwargs.get("project", None) if not project: return try: quota = self.nova_manager.getQuota(project.getId()) if quota.getSize("vcpus") > 0 and \ quota.getSize("memory") > 0 and \ quota.getSize("instances") > 0: self.nova_manager.updateQuota(quota, is_class=True) quota.setSize("vcpus", -1) quota.setSize("memory", -1) quota.setSize("instances", -1) self.nova_manager.updateQuota(quota) class_quota = self.nova_manager.getQuota( project.getId(), is_class=True) quota = project.getQuota() quota.setId(project.getId()) quota.setSize("vcpus", class_quota.getSize("vcpus")) quota.setSize("memory", class_quota.getSize("memory")) quota.setSize("instances", class_quota.getSize("instances")) quota.setSize("vcpus", SharedQuota.getSize("vcpus"), private=False) quota.setSize("memory", SharedQuota.getSize("memory"), private=False) quota.setSize("instances", SharedQuota.getSize("instances"), private=False) servers = self.nova_manager.getProjectServers(project.getId()) for server in servers: if server.getState() != "building": try: quota.allocate(server) except SynergyError as ex: fl = server.getFlavor() vcpus_size = quota.getSize("vcpus") + fl.getVCPUs() mem_size = quota.getSize("memory") + fl.getMemory() quota.setSize("vcpus", vcpus_size) quota.setSize("memory", mem_size) self.nova_manager.updateQuota(quota, is_class=True) LOG.warn("private quota autoresized (vcpus=%s, " "memory=%s) for project %r (id=%s)" % (quota.getSize("vcpus"), quota.getSize("memory"), project.getName(), project.getId())) quota.allocate(server) self.updateSharedQuota() except SynergyError as ex: LOG.error(ex) raise ex elif event_type == "PROJECT_REMOVED": project = kwargs.get("project", None) if not project: return quota = self.nova_manager.getQuota(project.getId()) if quota.getSize("vcpus") <= -1 and \ quota.getSize("memory") <= -1 and \ quota.getSize("instances") <= -1: qc = self.nova_manager.getQuota(project.getId(), is_class=True) self.nova_manager.updateQuota(qc) quota = project.getQuota() ids = [] ids.extend(quota.getServers("active", private=False)) ids.extend(quota.getServers("building", private=False)) ids.extend(quota.getServers("error", private=False)) try: for server_id in ids: self.nova_manager.deleteServer(server_id) except SynergyError as ex: LOG.error(ex) raise ex