Ejemplo n.º 1
0
    def reserveUnits(self, var_name, reservations):
        """ give a set of reservations (dictionary of slicename:cpuid_list),
            write those reservations to the appropriate cgroup files.

            reservations["_default"] is assumed to be the default reservation
            for slices that do not reserve cores. It's essentially the leftover
            cpu cores.
        """

        default = reservations["_default"]

        # set the default vserver cpuset. this will deal with any vservers
        # that might be created before the nodemanager has had a chance to
        # update the cpusets.
        self.reserveDefault(var_name, default)

        for cgroup in self.get_cgroups():
            if cgroup in reservations:
                cpus = reservations[cgroup]
                logger.log("CoreSched: reserving " + var_name + " on " +
                           cgroup + ": " + str(cpus))
            else:
                # no log message for default; too much verbosity in the common case
                cpus = default

            if glo_coresched_simulate:
                print "R", "/dev/cgroup/" + cgroup + "/" + var_name, self.listToRange(
                    cpus)
            else:
                cgroups.write(cgroup, var_name, self.listToRange(cpus))
    def reserveUnits (self, var_name, reservations):
        """ 
        give a set of reservations (dictionary of slicename:cpuid_list),
        write those reservations to the appropriate cgroup files.

        reservations["_default"] is assumed to be the default reservation
        for slices that do not reserve cores. It's essentially the leftover
        cpu cores.
        """

        default = reservations["_default"]

        # set the default vserver cpuset. this will deal with any vservers
        # that might be created before the nodemanager has had a chance to
        # update the cpusets.
        self.reserveDefault(var_name, default)

        for cgroup in self.get_cgroups():
            if cgroup in reservations:
                cpus = reservations[cgroup]
                logger.log("CoreSched: reserving " + var_name + " on " + cgroup + ": " + str(cpus))
            else:
                # no log message for default; too much verbosity in the common case
                cpus = default

            if glo_coresched_simulate:
                print("R", cgroup + "/" + var_name, self.listToRange(cpus))
            else:
                cgroups.write(cgroup, var_name, self.listToRange(cpus))
    def configure(self, rec):

        #sliver.[LXC/QEMU] tolower case
        #sliver_type = rec['type'].split('.')[1].lower() 

        #BASE_DIR = '/cgroup/libvirt/{}/{}/'.format(sliver_type, self.name)

        # Disk allocation
        # No way through cgroups... figure out how to do that with user/dir quotas.
        # There is no way to do quota per directory. Chown-ing would create
        # problems as username namespaces are not yet implemented (and thus, host
        # and containers share the same name ids

        # Btrfs support quota per volumes

        if "rspec" in rec and "tags" in rec["rspec"]:
            if cgroups.get_cgroup_path(self.name) == None:
                # If configure is called before start, then the cgroups won't exist
                # yet. NM will eventually re-run configure on the next iteration.
                # TODO: Add a post-start configure, and move this stuff there
                logger.log("Configure: postponing tag check on {} as cgroups are not yet populated"
                           .format(self.name))
            else:
                tags = rec["rspec"]["tags"]
                # It will depend on the FS selection
                if 'disk_max' in tags:
                    disk_max = tags['disk_max']
                    if disk_max == 0:
                        # unlimited
                        pass
                    else:
                        # limit to certain number
                        pass

                # Memory allocation
                if 'memlock_hard' in tags:
                    mem = str(int(tags['memlock_hard']) * 1024) # hard limit in bytes
                    cgroups.write(self.name, 'memory.limit_in_bytes', mem, subsystem="memory")
                if 'memlock_soft' in tags:
                    mem = str(int(tags['memlock_soft']) * 1024) # soft limit in bytes
                    cgroups.write(self.name, 'memory.soft_limit_in_bytes', mem, subsystem="memory")

                # CPU allocation
                # Only cpu_shares until figure out how to provide limits and guarantees
                # (RT_SCHED?)
                if 'cpu_share' in tags:
                    cpu_share = tags['cpu_share']
                    cgroups.write(self.name, 'cpu.shares', cpu_share)

        # Call the upper configure method (ssh keys...)
        Account.configure(self, rec)
    def configure(self, rec):

        #sliver.[LXC/QEMU] tolower case
        #sliver_type = rec['type'].split('.')[1].lower() 

        #BASE_DIR = '/cgroup/libvirt/%s/%s/'%(sliver_type, self.name)

        # Disk allocation
        # No way through cgroups... figure out how to do that with user/dir quotas.
        # There is no way to do quota per directory. Chown-ing would create
        # problems as username namespaces are not yet implemented (and thus, host
        # and containers share the same name ids

        # Btrfs support quota per volumes

        if rec.has_key("rspec") and rec["rspec"].has_key("tags"):
            if cgroups.get_cgroup_path(self.name) == None:
                # If configure is called before start, then the cgroups won't exist
                # yet. NM will eventually re-run configure on the next iteration.
                # TODO: Add a post-start configure, and move this stuff there
                logger.log("Configure: postponing tag check on %s as cgroups are not yet populated" % self.name)
            else:
                tags = rec["rspec"]["tags"]
                # It will depend on the FS selection
                if tags.has_key('disk_max'):
                    disk_max = tags['disk_max']
                    if disk_max == 0:
                        # unlimited
                        pass
                    else:
                        # limit to certain number
                        pass

                # Memory allocation
                if tags.has_key('memlock_hard'):
                    mem = str(int(tags['memlock_hard']) * 1024) # hard limit in bytes
                    cgroups.write(self.name, 'memory.limit_in_bytes', mem, subsystem="memory")
                if tags.has_key('memlock_soft'):
                    mem = str(int(tags['memlock_soft']) * 1024) # soft limit in bytes
                    cgroups.write(self.name, 'memory.soft_limit_in_bytes', mem, subsystem="memory")

                # CPU allocation
                # Only cpu_shares until figure out how to provide limits and guarantees
                # (RT_SCHED?)
                if tags.has_key('cpu_share'):
                    cpu_share = tags['cpu_share']
                    cgroups.write(self.name, 'cpu.shares', cpu_share)

        # Call the upper configure method (ssh keys...)
        Account.configure(self, rec)