def setUp(self): self.test_dir = os.path.join(os.path.dirname(__file__), 'test_files') Universe.reset() Universe.path_prefix = self.test_dir dstags = MagicMock() dstags.get.return_value = [] common.services.register(ServiceName.DATASTORE_TAGS, dstags)
def run(self): def _print_error(msg): print "! " + msg # Load scheduler tree from yaml file content = open(self.file, 'r').read() tree_config = yaml.load(content) Universe.get_tree().load_schedulers(tree_config, _print_error)
def run(self): self.setup_schedulers() # pprint(self.tree_config) def _print_error(msg): print "! " + msg Universe.get_tree().load_schedulers(self.tree_config, _print_error)
def check_placement(results): for request, response in results: request = request.place_request resource = request.resource request_constraints = resource.vm.resource_constraints if request_constraints: host = Universe.get_tree().get_scheduler(response.agent_id) nw_constraints = [ c.values[0] for c in host.constraints if c.type == RCType.NETWORK ] ds_constraints = [ c.values[0] for c in host.constraints if c.type == RCType.DATASTORE ] for constraint in request_constraints: if constraint.type == RCType.NETWORK: if constraint.values[0] not in nw_constraints: raise ConstraintError(resource.vm.id, constraint.values[0], host.id) if constraint.type == RCType.DATASTORE: if constraint.values[0] not in ds_constraints: raise ConstraintError(resource.vm.id, constraint.values[0], host.id)
def get_child_objects(self): from psim.universe import Universe schedulers = Universe.get_tree().schedulers for cid in self._child_ids: if cid not in schedulers: raise ConfigError("Child '%d' of scheduler '%d' not found" % (cid, self.id)) self.adopt_child(schedulers[cid])
def place(self, request): try: response = Universe.get_tree().root_scheduler.place(request) sys.stdout.write('.') return response except Exception, e: self._logger.error(e) traceback.print_exc(file=sys.stdout) sys.stdout.write('F') return None
def tabulate_hosts(): hosts = [h for h in Universe.get_tree().schedulers.values() if isinstance(h, Host)] rows = [] for h in hosts: rows.append(h.get_info()) header = ["Host Id", "VM Count", "Mem", "Used Mem", "Mem. Consumed", "Disk", "Used Disk", "Constraints"] return tabulate(rows, headers=header, tablefmt="rst")
def run(self): if not Universe.get_tree().root_scheduler: raise PreconditionError("Root scheduler is not configured." + "Please run load_tree first.") self.register_services() requests = Requests(self.requests_file) create_queue = {} results = [] reserve_failures = [] create_failures = [] sys.stdout.write('Running') for index, request in enumerate(requests.requests): self._logger.info("Req #%d: %s", index + 1, request.place_request) response = self.place(request.place_request) if response: results.append((request, response)) # Generate the "iteration" when this place will be created. create_index = min(len(requests.requests) - 1, self.create_interval + index) if create_index not in create_queue: create_queue[create_index] = collections.deque() create_queue[create_index].appendleft((request, response)) # If there are requests to create for this iteration, do so. if index in create_queue: queue = create_queue[index] while queue: request, response = queue.pop() if response.result == PlaceResultCode.OK: agent = Universe.get_tree().get_scheduler( response.agent_id) self.reserve_and_create(request, agent, response, reserve_failures, create_failures) create_queue.pop(index) assert len(create_queue) == 0 Universe.results = Results(results, reserve_failures, create_failures) print "Done"
def run(self): if not Universe.get_tree().root_scheduler: raise PreconditionError("Root scheduler is not configured." + "Please run load_tree first.") self.register_services() requests = Requests(self.requests_file) create_queue = {} results = [] reserve_failures = [] create_failures = [] sys.stdout.write('Running') for index, request in enumerate(requests.requests): self._logger.info("Req #%d: %s", index + 1, request.place_request) response = self.place(request.place_request) if response: results.append((request, response)) # Generate the "iteration" when this place will be created. create_index = min( len(requests.requests) - 1, self.create_interval + index) if create_index not in create_queue: create_queue[create_index] = collections.deque() create_queue[create_index].appendleft((request, response)) # If there are requests to create for this iteration, do so. if index in create_queue: queue = create_queue[index] while queue: request, response = queue.pop() if response.result == PlaceResultCode.OK: agent = Universe.get_tree().get_scheduler( response.agent_id) self.reserve_and_create(request, agent, response, reserve_failures, create_failures) create_queue.pop(index) assert len(create_queue) == 0 Universe.results = Results(results, reserve_failures, create_failures) print "Done"
def tabulate_hosts(): hosts = [ h for h in Universe.get_tree().schedulers.values() if isinstance(h, Host) ] rows = [] for h in hosts: rows.append(h.get_info()) header = [ "Host Id", "VM Count", "Mem", "Used Mem", "Mem. Consumed", "Disk", "Used Disk", "Constraints" ] return tabulate(rows, headers=header, tablefmt="rst")
def create_host(id, cpu, mem, disk, constraint_set, overcommit): from psim.universe import Universe networks = [constraint for constraint in constraint_set if constraint.type == RCT.NETWORK] datastores = [constraint for constraint in constraint_set if constraint.type == RCT.DATASTORE] if not networks: # create a dummy network so host handler doesn't throw warnings networks.append(ResourceConstraint(RCT.NETWORK, ["VM Network"])) local_ds_name = Universe.add_local_ds(id, disk) datastores.append(ResourceConstraint(RCT.DATASTORE, [local_ds_name])) host = Host(id, networks, datastores, cpu, mem, disk, overcommit) return host
def __init__(self, file): self.file = Universe.get_path(file) content = open(self.file, 'r').read() config = yaml.load(content) self.num_hosts = config["num_hosts"] self.root_branchout = config["root_branchout"] self.sched_branchout = config["sched_branchout"] self.host_config = config["host_config"] self.id = 1 self.tree_config = \ { "overcommit": config["overcommit"], "root_config": config["root_config"], "schedulers": [] }
def tabulate_stats(): hosts = [h for h in Universe.get_tree().schedulers.values() if isinstance(h, Host)] hypervisors = [(h.id, h.hypervisor.hypervisor) for h in hosts] mem_stats = StatsHelper(dict( (id, x.system.memory_info().used) for (id, x) in hypervisors)) disk_stats = StatsHelper(dict( (id, x.total_datastore_info().used) for (id, x) in hypervisors)) vm_stats = StatsHelper(dict( (id, len(x.vm_manager._resources)) for (id, x) in hypervisors)) header = ["Memory Mean (MB)", "Memory Std. Dev. (MB)", "Disk Mean (GB)", "Disk Std. Dev. (GB)", "VM Count Mean", "VM Count Std. Dev."] row = [(mem_stats.mean(), mem_stats.stddev(), disk_stats.mean(), disk_stats.stddev(), vm_stats.mean(), vm_stats.stddev())] return tabulate(row, headers=header, tablefmt="rst")
def check_hosts(self): hosts = [ h for h in Universe.get_tree().schedulers.values() if isinstance(h, Host) ] hypervisors = [(h.id, h.hypervisor.hypervisor) for h in hosts] mem_stats = StatsHelper( dict((id, x.system.memory_info().used) for (id, x) in hypervisors)) disk_stats = StatsHelper( dict((id, x.total_datastore_info().used) for (id, x) in hypervisors)) if self.expected: # We do not do an exact compare of stddev and # mean. We check if it is within 10% of the # expected self.check_stat(mem_stats.stddev(), self.expected['mem_std_dev']) self.check_stat(mem_stats.mean(), self.expected['mem_mean']) self.check_stat(disk_stats.stddev(), self.expected['disk_std_dev']) self.check_stat(disk_stats.mean(), self.expected['disk_mean'])
def check_hosts(self): hosts = [h for h in Universe.get_tree().schedulers.values() if isinstance(h, Host)] hypervisors = [(h.id, h.hypervisor.hypervisor) for h in hosts] mem_stats = StatsHelper(dict( (id, x.system.memory_info().used) for (id, x) in hypervisors)) disk_stats = StatsHelper(dict( (id, x.total_datastore_info().used) for (id, x) in hypervisors)) if self.expected: # We do not do an exact compare of stddev and # mean. We check if it is within 10% of the # expected self.check_stat(mem_stats.stddev(), self.expected['mem_std_dev']) self.check_stat(mem_stats.mean(), self.expected['mem_mean']) self.check_stat(disk_stats.stddev(), self.expected['disk_std_dev']) self.check_stat(disk_stats.mean(), self.expected['disk_mean'])
def tabulate_stats(): hosts = [ h for h in Universe.get_tree().schedulers.values() if isinstance(h, Host) ] hypervisors = [(h.id, h.hypervisor.hypervisor) for h in hosts] mem_stats = StatsHelper( dict((id, x.system.memory_info().used) for (id, x) in hypervisors)) disk_stats = StatsHelper( dict((id, x.total_datastore_info().used) for (id, x) in hypervisors)) vm_stats = StatsHelper( dict( (id, len(x.vm_manager._resources)) for (id, x) in hypervisors)) header = [ "Memory Mean (MB)", "Memory Std. Dev. (MB)", "Disk Mean (GB)", "Disk Std. Dev. (GB)", "VM Count Mean", "VM Count Std. Dev." ] row = [(mem_stats.mean(), mem_stats.stddev(), disk_stats.mean(), disk_stats.stddev(), vm_stats.mean(), vm_stats.stddev())] return tabulate(row, headers=header, tablefmt="rst")
def check_placement(results): for request, response in results: request = request.place_request resource = request.resource request_constraints = resource.vm.resource_constraints if request_constraints: host = Universe.get_tree().get_scheduler( response.agent_id) nw_constraints = [c.values[0] for c in host.constraints if c.type == RCType.NETWORK] ds_constraints = [c.values[0] for c in host.constraints if c.type == RCType.DATASTORE] for constraint in request_constraints: if constraint.type == RCType.NETWORK: if constraint.values[0] not in nw_constraints: raise ConstraintError(resource.vm.id, constraint.values[0], host.id) if constraint.type == RCType.DATASTORE: if constraint.values[0] not in ds_constraints: raise ConstraintError(resource.vm.id, constraint.values[0], host.id)
def __init__(self, requests_file, max_create_interval=1): self.requests_file = Universe.get_path(requests_file) self._logger = logging.getLogger(__name__) self.reserve_count = 1 self.max_create_interval = max_create_interval
def run(self): Universe.get_tree().pretty_print()
def __init__(self, file): self.file = Universe.get_path(file)
def _get_capacity_map(self): from psim.universe import Universe return Universe.get_capacity_map()
def __init__(self, requests_file): self.requests = [] content = open(Universe.get_path(requests_file), 'r').read() requests = yaml.load(content) request_id = 1 disk_id = 1 if 'auto' in requests: requests = self.generate_requests(requests['auto']) for request in requests: place_request = PlaceRequest() resource = Resource() resource.disks = [] env_info = {} if 'vm' in request: resource.vm = Vm() # Make the vm id look like a uuid by zero-padding. Otherwise # reference counting doesn't work. resource.vm.id = "{0:032d}".format(request_id) resource.vm.state = State.STARTED flavor = Universe.vm_flavors[request['vm']['flavor']] resource.vm.flavor = flavor.name resource.vm.flavor_info = flavor.to_thrift() resource.vm.disks = [] if 'constraints' in request: constraints = [] for c in request['constraints']: constraint = ResourceConstraint() constraint.type = RCT._NAMES_TO_VALUES[c['type']] constraint.values = c['values'] if 'negative' in c: constraint.negative = c['negative'] else: constraint.negative = False constraints.append(constraint) if constraints: resource.vm.resource_constraints = constraints if 'load' in request['vm']: env_info['mem_load'] = request['vm']['load']['mem'] if 'disks' in request: for d in request['disks']: disk = Disk() flavor = Universe.ephemeral_disk_flavors[d['flavor']] disk.flavor = flavor.name disk.flavor_info = flavor.to_thrift() disk.id = str(disk_id) disk.persistent = False disk.new_disk = True disk.capacity_gb = 1024 # hard coded in FakeVmManager disk_id += 1 resource.vm.disks.append(disk) place_request.resource = resource tracing_info = TracingInfo() tracing_info.request_id = request_id place_request.tracing_info = tracing_info request_id += 1 self.requests.append(PsimVmRequest(place_request, env_info))
def setUp(self): Universe.reset() self.test_dir = os.path.join(os.path.dirname(__file__), 'test_files')
def __init__(self, result_file): self.result_file = Universe.get_path(result_file) content = open(self.result_file, 'r').read() self.expected = yaml.load(content)
def load_schedulers(self, tree_config, errback=lambda *args: 0): schedulers = tree_config["schedulers"] overcommit = tree_config["overcommit"] if "root_config" in tree_config: root_config = tree_config["root_config"] else: root_config = None # Load all schedulers sys.stdout.write("Loading schedulers...") sys.stdout.flush() for (i, scheduler) in enumerate(schedulers): id = scheduler['id'] role = scheduler['role'] children = [] if 'children' in scheduler: children = scheduler['children'] if id in self.schedulers: errback("duplicated id '%d'" % id) continue if scheduler['role'] not in self.ROLES: errback("invalid role '%s', should be among %s" % (role, self.ROLES)) continue if role in FakeBranchScheduler.ROLES: if role == 'root': self.schedulers[id] = FakeBranchScheduler(id, role, children, root_config) if self.root_scheduler is None: self.root_scheduler = self.schedulers[id] else: errback("duplicated root scheduler '%d'" % id) else: self.schedulers[id] = FakeBranchScheduler(id, role, children) elif role == FakeLeafScheduler.ROLE: self.schedulers[id] = FakeLeafScheduler(id, children) elif role == Host.ROLE: from psim.universe import Universe cpu = self._get_key(scheduler, 'cpu') disk = self._get_key(scheduler, 'disk') mem = self._get_key(scheduler, 'mem') constraint_list = self._get_key(scheduler, 'constraints') constraint_set = set() for c in constraint_list: type = RCT._NAMES_TO_VALUES[c['type']] c_id = c['values'][0] if type == RCT.DATASTORE: ds_uuid = Universe.get_ds_uuid(c_id) if ds_uuid not in Universe.datastores.keys(): raise ValueError("Invalid Datastore: " + c_id) constraint_set.add( ResourceConstraint(type=type, values=[c_id])) self.schedulers[id] = SchedulerTree.create_host( id, cpu, mem, disk, constraint_set, overcommit) # configure schedulers self.root_scheduler.update() print "Done." print "Loaded %d schedulers." % len(schedulers)