def generate_scenario_1(self): vms = [] current_state_cluster = modelroot.ModelRoot() # number of nodes count_node = 5 # number max of vm per node node_count_vm = 7 # total number of virtual machine count_vm = (count_node * node_count_vm) # define ressouce ( CPU, MEM disk, ... ) mem = resource.Resource(resource.ResourceType.memory) # 2199.954 Mhz num_cores = resource.Resource(resource.ResourceType.cpu_cores) disk = resource.Resource(resource.ResourceType.disk) current_state_cluster.create_resource(mem) current_state_cluster.create_resource(num_cores) current_state_cluster.create_resource(disk) for i in range(0, count_node): node_uuid = "Node_{0}".format(i) node = hypervisor.Hypervisor() node.uuid = node_uuid node.hostname = "hostname_{0}".format(i) mem.set_capacity(node, 132) disk.set_capacity(node, 250) num_cores.set_capacity(node, 40) current_state_cluster.add_hypervisor(node) for i in range(0, count_vm): vm_uuid = "VM_{0}".format(i) vm = modelvm.VM() vm.uuid = vm_uuid mem.set_capacity(vm, 2) disk.set_capacity(vm, 20) num_cores.set_capacity(vm, 10) vms.append(vm) current_state_cluster.add_vm(vm) current_state_cluster.get_mapping().map( current_state_cluster.get_hypervisor_from_id("Node_0"), current_state_cluster.get_vm_from_id("VM_0")) current_state_cluster.get_mapping().map( current_state_cluster.get_hypervisor_from_id("Node_0"), current_state_cluster.get_vm_from_id("VM_1")) current_state_cluster.get_mapping().map( current_state_cluster.get_hypervisor_from_id("Node_1"), current_state_cluster.get_vm_from_id("VM_2")) current_state_cluster.get_mapping().map( current_state_cluster.get_hypervisor_from_id("Node_2"), current_state_cluster.get_vm_from_id("VM_3")) current_state_cluster.get_mapping().map( current_state_cluster.get_hypervisor_from_id("Node_2"), current_state_cluster.get_vm_from_id("VM_4")) current_state_cluster.get_mapping().map( current_state_cluster.get_hypervisor_from_id("Node_2"), current_state_cluster.get_vm_from_id("VM_5")) current_state_cluster.get_mapping().map( current_state_cluster.get_hypervisor_from_id("Node_3"), current_state_cluster.get_vm_from_id("VM_6")) current_state_cluster.get_mapping().map( current_state_cluster.get_hypervisor_from_id("Node_4"), current_state_cluster.get_vm_from_id("VM_7")) return current_state_cluster
def test_exception_cluster_empty(self): sercon = strategies.BasicConsolidation() model = model_root.ModelRoot() self.assertRaises(exception.ClusterEmpty, sercon.execute, model)
class BaseClusterDataModelCollector(loadable.LoadableSingleton): STALE_MODEL = model_root.ModelRoot(stale=True) def __init__(self, config, osc=None, audit_scope=None): super(BaseClusterDataModelCollector, self).__init__(config) self.osc = osc if osc else clients.OpenStackClients() self._cluster_data_model = None self.lock = threading.RLock() self._audit_scope = audit_scope self._audit_scope_handler = None @abc.abstractproperty def audit_scope_handler(self): """Get audit scope handler""" raise NotImplementedError() @property def cluster_data_model(self): if self._cluster_data_model is None: self.lock.acquire() self._cluster_data_model = self.execute() self.lock.release() return self._cluster_data_model @cluster_data_model.setter def cluster_data_model(self, model): self.lock.acquire() self._cluster_data_model = model self.lock.release() @abc.abstractproperty def notification_endpoints(self): """Associated notification endpoints :return: Associated notification endpoints :rtype: List of :py:class:`~.EventsNotificationEndpoint` instances """ raise NotImplementedError() def set_cluster_data_model_as_stale(self): self.cluster_data_model = self.STALE_MODEL @abc.abstractmethod def execute(self): """Build a cluster data model""" raise NotImplementedError() @classmethod def get_config_opts(cls): return [ cfg.IntOpt('period', default=3600, help='The time interval (in seconds) between each ' 'synchronization of the model'), ] def get_latest_cluster_data_model(self): LOG.debug("Creating copy") LOG.debug(self.cluster_data_model.to_xml()) return copy.deepcopy(self.cluster_data_model) def synchronize(self): """Synchronize the cluster data model Whenever called this synchronization will perform a drop-in replacement with the existing cluster data model """ self.cluster_data_model = self.execute()
def test_exception_cluster_empty(self): model = model_root.ModelRoot() self.m_model.return_value = model self.assertRaises(exception.ClusterEmpty, self.strategy.execute)
def build_scenario_1(self): instances = [] model = modelroot.ModelRoot() # number of nodes node_count = 5 # number max of instance per node node_instance_count = 7 # total number of virtual machine instance_count = (node_count * node_instance_count) for id_ in range(0, node_count): node_uuid = "Node_{0}".format(id_) hostname = "hostname_{0}".format(id_) node_attributes = { "id": id_, "uuid": node_uuid, "hostname": hostname, "memory": 132, "disk": 250, "disk_capacity": 250, "vcpus": 40, } node = element.ComputeNode(**node_attributes) model.add_node(node) for i in range(0, instance_count): instance_uuid = "INSTANCE_{0}".format(i) if instance_uuid == "INSTANCE_1": project_id = "26F03131-32CB-4697-9D61-9123F87A8147" elif instance_uuid == "INSTANCE_2": project_id = "109F7909-0607-4712-B32C-5CC6D49D2F15" else: project_id = "91FFFE30-78A0-4152-ACD2-8310FF274DC9" instance_attributes = { "uuid": instance_uuid, "memory": 2, "disk": 20, "disk_capacity": 20, "vcpus": 10, "metadata": '{"optimize": true,"top": "floor","nested": {"x": "y"}}', "project_id": project_id } instance = element.Instance(**instance_attributes) instances.append(instance) model.add_instance(instance) mappings = [ ("INSTANCE_0", "Node_0"), ("INSTANCE_1", "Node_0"), ("INSTANCE_2", "Node_1"), ("INSTANCE_3", "Node_2"), ("INSTANCE_4", "Node_2"), ("INSTANCE_5", "Node_2"), ("INSTANCE_6", "Node_3"), ("INSTANCE_7", "Node_4"), ] for instance_uuid, node_uuid in mappings: model.map_instance( model.get_instance_by_uuid(instance_uuid), model.get_node_by_uuid(node_uuid), ) return model
def setUp(self): super(TestWorkloadStabilization, self).setUp() # fake metrics self.fake_metrics = faker_metrics_collector.FakerMetricsCollector() # fake cluster self.fake_cluster = faker_cluster_state.FakerModelCollector() self.hosts_load_assert = { 'Node_0': { 'cpu_util': 0.07, 'memory.resident': 7.0, 'vcpus': 40 }, 'Node_1': { 'cpu_util': 0.05, 'memory.resident': 5, 'vcpus': 40 }, 'Node_2': { 'cpu_util': 0.1, 'memory.resident': 29, 'vcpus': 40 }, 'Node_3': { 'cpu_util': 0.04, 'memory.resident': 8, 'vcpus': 40 }, 'Node_4': { 'cpu_util': 0.02, 'memory.resident': 4, 'vcpus': 40 } } p_model = mock.patch.object(strategies.WorkloadStabilization, "compute_model", new_callable=mock.PropertyMock) self.m_model = p_model.start() self.addCleanup(p_model.stop) p_ceilometer = mock.patch.object(strategies.WorkloadStabilization, "ceilometer", new_callable=mock.PropertyMock) self.m_ceilometer = p_ceilometer.start() self.addCleanup(p_ceilometer.stop) p_audit_scope = mock.patch.object(strategies.WorkloadStabilization, "audit_scope", new_callable=mock.PropertyMock) self.m_audit_scope = p_audit_scope.start() self.addCleanup(p_audit_scope.stop) self.m_model.return_value = model_root.ModelRoot() self.m_ceilometer.return_value = mock.Mock( statistic_aggregation=self.fake_metrics.mock_get_statistics) self.m_audit_scope.return_value = mock.Mock() self.strategy = strategies.WorkloadStabilization(config=mock.Mock()) self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters.update({ 'metrics': ["cpu_util", "memory.resident"], 'thresholds': { "cpu_util": 0.2, "memory.resident": 0.2 }, 'weights': { "cpu_util_weight": 1.0, "memory.resident_weight": 1.0 }, 'instance_metrics': { "cpu_util": "hardware.cpu.util", "memory.resident": "hardware.memory.used" }, 'host_choice': 'retry', 'retry_count': 1 }) self.strategy.metrics = ["cpu_util", "memory.resident"] self.strategy.thresholds = {"cpu_util": 0.2, "memory.resident": 0.2} self.strategy.weights = { "cpu_util_weight": 1.0, "memory.resident_weight": 1.0 } self.strategy.instance_metrics = { "cpu_util": "hardware.cpu.util", "memory.resident": "hardware.memory.used" } self.strategy.host_choice = 'retry' self.strategy.retry_count = 1
def execute(self): model = model_root.ModelRoot() # Do something here... return model