Esempio n. 1
0
    def _soft_delete_removed_goals(self):
        removed_goals = [
            g for g in self.available_goals
            if g.name not in self.discovered_map['goals']]
        for removed_goal in removed_goals:
            removed_goal.soft_delete()
            filters = {"goal_id": removed_goal.id}

            invalid_ats = objects.AuditTemplate.list(self.ctx, filters=filters)
            for at in invalid_ats:
                LOG.warning(
                    _LW("Audit Template '%(audit_template)s' references a "
                        "goal that does not exist"), audit_template=at.uuid)

            stale_audits = objects.Audit.list(
                self.ctx, filters=filters, eager=True)
            for audit in stale_audits:
                LOG.warning(
                    _LW("Audit '%(audit)s' references a "
                        "goal that does not exist"), audit=audit.uuid)
                if audit.id not in self.stale_audits_map:
                    audit.state = objects.audit.State.CANCELLED
                    self.stale_audits_map[audit.id] = audit
                else:
                    self.stale_audits_map[
                        audit.id].state = objects.audit.State.CANCELLED
Esempio n. 2
0
    def do_execute(self):
        """Strategy execution phase

        This phase is where you should put the main logic of your strategy.
        """
        self.threshold = self.input_parameters.threshold
        self._period = self.input_parameters.period
        source_nodes, target_nodes, avg_workload, workload_cache = (
            self.group_hosts_by_cpu_util())

        if not source_nodes:
            LOG.debug("No hosts require optimization")
            return self.solution

        if not target_nodes:
            LOG.warning(
                _LW("No hosts current have CPU utilization under %s "
                    "percent, therefore there are no possible target "
                    "hosts for any migration"), self.threshold)
            return self.solution

        # choose the server with largest cpu_util
        source_nodes = sorted(source_nodes,
                              reverse=True,
                              key=lambda x: (x[self.METER_NAME]))

        instance_to_migrate = self.choose_instance_to_migrate(
            source_nodes, avg_workload, workload_cache)
        if not instance_to_migrate:
            return self.solution
        source_node, instance_src = instance_to_migrate
        # find the hosts that have enough resource for the VM to be migrated
        destination_hosts = self.filter_destination_hosts(
            target_nodes, instance_src, avg_workload, workload_cache)
        # sort the filtered result by workload
        # pick up the lowest one as dest server
        if not destination_hosts:
            # for instance.
            LOG.warning(
                _LW("No proper target host could be found, it might "
                    "be because of there's no enough CPU/Memory/DISK"))
            return self.solution
        destination_hosts = sorted(destination_hosts,
                                   key=lambda x: (x["cpu_util"]))
        # always use the host with lowerest CPU utilization
        mig_destination_node = destination_hosts[0]['node']
        # generate solution to migrate the instance to the dest server,
        if self.compute_model.migrate_instance(instance_src, source_node,
                                               mig_destination_node):
            parameters = {
                'migration_type': 'live',
                'source_node': source_node.uuid,
                'destination_node': mig_destination_node.uuid
            }
            self.solution.add_action(action_type=self.MIGRATION,
                                     resource_id=instance_src.uuid,
                                     input_parameters=parameters)
Esempio n. 3
0
    def do_execute(self):
        """Strategy execution phase

        This phase is where you should put the main logic of your strategy.
        """
        self.threshold = self.input_parameters.threshold
        self._period = self.input_parameters.period
        source_nodes, target_nodes, avg_workload, workload_cache = (
            self.group_hosts_by_cpu_util())

        if not source_nodes:
            LOG.debug("No hosts require optimization")
            return self.solution

        if not target_nodes:
            LOG.warning(_LW("No hosts current have CPU utilization under %s "
                            "percent, therefore there are no possible target "
                            "hosts for any migration"),
                        self.threshold)
            return self.solution

        # choose the server with largest cpu_util
        source_nodes = sorted(source_nodes,
                              reverse=True,
                              key=lambda x: (x[self.METER_NAME]))

        instance_to_migrate = self.choose_instance_to_migrate(
            source_nodes, avg_workload, workload_cache)
        if not instance_to_migrate:
            return self.solution
        source_node, instance_src = instance_to_migrate
        # find the hosts that have enough resource for the VM to be migrated
        destination_hosts = self.filter_destination_hosts(
            target_nodes, instance_src, avg_workload, workload_cache)
        # sort the filtered result by workload
        # pick up the lowest one as dest server
        if not destination_hosts:
            # for instance.
            LOG.warning(_LW("No proper target host could be found, it might "
                            "be because of there's no enough CPU/Memory/DISK"))
            return self.solution
        destination_hosts = sorted(destination_hosts,
                                   key=lambda x: (x["cpu_util"]))
        # always use the host with lowerest CPU utilization
        mig_destination_node = destination_hosts[0]['node']
        # generate solution to migrate the instance to the dest server,
        if self.compute_model.mapping.migrate_instance(
                instance_src, source_node, mig_destination_node):
            parameters = {'migration_type': 'live',
                          'source_node': source_node.uuid,
                          'destination_node': mig_destination_node.uuid}
            self.solution.add_action(action_type=self.MIGRATION,
                                     resource_id=instance_src.uuid,
                                     input_parameters=parameters)
Esempio n. 4
0
    def do_execute(self):
        """Strategy execution phase

        This phase is where you should put the main logic of your strategy.
        """
        src_hypervisors, target_hypervisors, avg_workload, workload_cache = (
            self.group_hosts_by_cpu_util())

        if not src_hypervisors:
            LOG.debug("No hosts require optimization")
            return self.solution

        if not target_hypervisors:
            LOG.warning(_LW("No hosts current have CPU utilization under %s "
                            "percent, therefore there are no possible target "
                            "hosts for any migrations"),
                        self.threshold)
            return self.solution

        # choose the server with largest cpu_util
        src_hypervisors = sorted(src_hypervisors,
                                 reverse=True,
                                 key=lambda x: (x[self.METER_NAME]))

        vm_to_migrate = self.choose_vm_to_migrate(
            src_hypervisors, avg_workload, workload_cache)
        if not vm_to_migrate:
            return self.solution
        source_hypervisor, vm_src = vm_to_migrate
        # find the hosts that have enough resource for the VM to be migrated
        destination_hosts = self.filter_destination_hosts(
            target_hypervisors, vm_src, avg_workload, workload_cache)
        # sort the filtered result by workload
        # pick up the lowest one as dest server
        if not destination_hosts:
            LOG.warning(_LW("No target host could be found; it might "
                            "be because there is not enough CPU, memory "
                            "or disk"))
            return self.solution
        destination_hosts = sorted(destination_hosts,
                                   key=lambda x: (x["cpu_util"]))
        # always use the host with lowerest CPU utilization
        mig_dst_hypervisor = destination_hosts[0]['hv']
        # generate solution to migrate the vm to the dest server
        if self.model.get_mapping().migrate_vm(vm_src, source_hypervisor,
                                               mig_dst_hypervisor):
            parameters = {'migration_type': 'live',
                          'src_hypervisor': source_hypervisor.uuid,
                          'dst_hypervisor': mig_dst_hypervisor.uuid}
            self.solution.add_action(action_type=self.MIGRATION,
                                     resource_id=vm_src.uuid,
                                     input_parameters=parameters)
    def do_execute(self):
        self.threshold_airflow = self.input_parameters.threshold_airflow
        self.threshold_inlet_t = self.input_parameters.threshold_inlet_t
        self.threshold_power = self.input_parameters.threshold_power
        self._period = self.input_parameters.period
        source_nodes, target_nodes = self.group_hosts_by_airflow()

        if not source_nodes:
            LOG.debug("No hosts require optimization")
            return self.solution

        if not target_nodes:
            LOG.warning(
                _LW("No hosts currently have airflow under %s, "
                    "therefore there are no possible target "
                    "hosts for any migration"), self.threshold_airflow)
            return self.solution

        # migrate the instance from server with largest airflow first
        source_nodes = sorted(source_nodes,
                              reverse=True,
                              key=lambda x: (x["airflow"]))
        instances_to_migrate = self.choose_instance_to_migrate(source_nodes)
        if not instances_to_migrate:
            return self.solution
        source_node, instances_src = instances_to_migrate
        # sort host with airflow
        target_nodes = sorted(target_nodes, key=lambda x: (x["airflow"]))
        # find the hosts that have enough resource
        # for the instance to be migrated
        destination_hosts = self.filter_destination_hosts(
            target_nodes, instances_src)
        if not destination_hosts:
            LOG.warning(
                _LW("No target host could be found; it might "
                    "be because there is not enough resources"))
            return self.solution
        # generate solution to migrate the instance to the dest server,
        for info in destination_hosts:
            instance = info['instance']
            destination_node = info['node']
            if self.compute_model.migrate_instance(instance, source_node,
                                                   destination_node):
                parameters = {
                    'migration_type': 'live',
                    'source_node': source_node.uuid,
                    'destination_node': destination_node.uuid
                }
                self.solution.add_action(action_type=self.MIGRATION,
                                         resource_id=instance.uuid,
                                         input_parameters=parameters)
Esempio n. 6
0
    def _soft_delete_removed_strategies(self):
        removed_strategies = [
            s for s in self.available_strategies
            if s.name not in self.discovered_map['strategies']]

        for removed_strategy in removed_strategies:
            removed_strategy.soft_delete()
            filters = {"strategy_id": removed_strategy.id}
            invalid_ats = objects.AuditTemplate.list(self.ctx, filters=filters)
            for at in invalid_ats:
                LOG.info(
                    _LI("Audit Template '%(audit_template)s' references a "
                        "strategy that does not exist"),
                    audit_template=at.uuid)
                # In this case we can reset the strategy ID to None
                # so the audit template can still achieve the same goal
                # but with a different strategy
                if at.id not in self.stale_audit_templates_map:
                    at.strategy_id = None
                    self.stale_audit_templates_map[at.id] = at
                else:
                    self.stale_audit_templates_map[at.id].strategy_id = None

            stale_audits = objects.Audit.list(
                self.ctx, filters=filters, eager=True)
            for audit in stale_audits:
                LOG.warning(
                    _LW("Audit '%(audit)s' references a "
                        "strategy that does not exist"), audit=audit.uuid)
                if audit.id not in self.stale_audits_map:
                    audit.state = objects.audit.State.CANCELLED
                    self.stale_audits_map[audit.id] = audit
                else:
                    self.stale_audits_map[
                        audit.id].state = objects.audit.State.CANCELLED

            stale_action_plans = objects.ActionPlan.list(
                self.ctx, filters=filters, eager=True)
            for action_plan in stale_action_plans:
                LOG.warning(
                    _LW("Action Plan '%(action_plan)s' references a "
                        "strategy that does not exist"),
                    action_plan=action_plan.uuid)
                if action_plan.id not in self.stale_action_plans_map:
                    action_plan.state = objects.action_plan.State.CANCELLED
                    self.stale_action_plans_map[action_plan.id] = action_plan
                else:
                    self.stale_action_plans_map[
                        action_plan.id].state = (
                            objects.action_plan.State.CANCELLED)
Esempio n. 7
0
    def do_execute(self):
        self.threshold_airflow = self.input_parameters.threshold_airflow
        self.threshold_inlet_t = self.input_parameters.threshold_inlet_t
        self.threshold_power = self.input_parameters.threshold_power
        self._period = self.input_parameters.period
        source_nodes, target_nodes = self.group_hosts_by_airflow()

        if not source_nodes:
            LOG.debug("No hosts require optimization")
            return self.solution

        if not target_nodes:
            LOG.warning(_LW("No hosts currently have airflow under %s, "
                            "therefore there are no possible target "
                            "hosts for any migration"),
                        self.threshold_airflow)
            return self.solution

        # migrate the instance from server with largest airflow first
        source_nodes = sorted(source_nodes,
                              reverse=True,
                              key=lambda x: (x["airflow"]))
        instances_to_migrate = self.choose_instance_to_migrate(source_nodes)
        if not instances_to_migrate:
            return self.solution
        source_node, instances_src = instances_to_migrate
        # sort host with airflow
        target_nodes = sorted(target_nodes, key=lambda x: (x["airflow"]))
        # find the hosts that have enough resource
        # for the instance to be migrated
        destination_hosts = self.filter_destination_hosts(
            target_nodes, instances_src)
        if not destination_hosts:
            LOG.warning(_LW("No target host could be found; it might "
                            "be because there is not enough resources"))
            return self.solution
        # generate solution to migrate the instance to the dest server,
        for info in destination_hosts:
            instance = info['instance']
            destination_node = info['node']
            if self.compute_model.mapping.migrate_instance(
                    instance, source_node, destination_node):
                parameters = {'migration_type': 'live',
                              'source_node': source_node.uuid,
                              'destination_node': destination_node.uuid}
                self.solution.add_action(action_type=self.MIGRATION,
                                         resource_id=instance.uuid,
                                         input_parameters=parameters)
Esempio n. 8
0
    def group_hosts_by_outlet_temp(self):
        """Group hosts based on outlet temp meters"""
        hypervisors = self.model.get_all_hypervisors()
        size_cluster = len(hypervisors)
        if size_cluster == 0:
            raise wexc.ClusterEmpty()

        hosts_need_release = []
        hosts_target = []
        for hypervisor_id in hypervisors:
            hypervisor = self.model.get_hypervisor_from_id(
                hypervisor_id)
            resource_id = hypervisor.uuid

            outlet_temp = self.ceilometer.statistic_aggregation(
                resource_id=resource_id,
                meter_name=self._meter,
                period="30",
                aggregate='avg')
            # some hosts may not have outlet temp meters, remove from target
            if outlet_temp is None:
                LOG.warning(_LW("%s: no outlet temp data"), resource_id)
                continue

            LOG.debug("%s: outlet temperature %f" % (resource_id, outlet_temp))
            hvmap = {'hv': hypervisor, 'outlet_temp': outlet_temp}
            if outlet_temp >= self.threshold:
                # mark the hypervisor to release resources
                hosts_need_release.append(hvmap)
            else:
                hosts_target.append(hvmap)
        return hosts_need_release, hosts_target
Esempio n. 9
0
    def group_hosts_by_outlet_temp(self):
        """Group hosts based on outlet temp meters"""
        nodes = self.compute_model.get_all_compute_nodes()
        size_cluster = len(nodes)
        if size_cluster == 0:
            raise wexc.ClusterEmpty()

        hosts_need_release = []
        hosts_target = []
        for node in nodes.values():
            resource_id = node.uuid

            outlet_temp = self.ceilometer.statistic_aggregation(
                resource_id=resource_id,
                meter_name=self._meter,
                period="30",
                aggregate='avg')
            # some hosts may not have outlet temp meters, remove from target
            if outlet_temp is None:
                LOG.warning(_LW("%s: no outlet temp data"), resource_id)
                continue

            LOG.debug("%s: outlet temperature %f" % (resource_id, outlet_temp))
            instance_data = {'node': node, 'outlet_temp': outlet_temp}
            if outlet_temp >= self.threshold:
                # mark the node to release resources
                hosts_need_release.append(instance_data)
            else:
                hosts_target.append(instance_data)
        return hosts_need_release, hosts_target
Esempio n. 10
0
    def group_hosts_by_airflow(self):
        """Group hosts based on airflow meters"""

        nodes = self.compute_model.get_all_compute_nodes()
        if not nodes:
            raise wexc.ClusterEmpty()
        overload_hosts = []
        nonoverload_hosts = []
        for node_id in nodes:
            node = self.compute_model.get_node_from_id(
                node_id)
            resource_id = node.uuid
            airflow = self.ceilometer.statistic_aggregation(
                resource_id=resource_id,
                meter_name=self.meter_name_airflow,
                period=self._period,
                aggregate='avg')
            # some hosts may not have airflow meter, remove from target
            if airflow is None:
                LOG.warning(_LW("%s: no airflow data"), resource_id)
                continue

            LOG.debug("%s: airflow %f" % (resource_id, airflow))
            nodemap = {'node': node, 'airflow': airflow}
            if airflow >= self.threshold_airflow:
                # mark the node to release resources
                overload_hosts.append(nodemap)
            else:
                nonoverload_hosts.append(nodemap)
        return overload_hosts, nonoverload_hosts
Esempio n. 11
0
    def group_hosts_by_outlet_temp(self):
        """Group hosts based on outlet temp meters"""
        nodes = self.compute_model.get_all_compute_nodes()
        size_cluster = len(nodes)
        if size_cluster == 0:
            raise wexc.ClusterEmpty()

        hosts_need_release = []
        hosts_target = []
        for node_id in nodes:
            node = self.compute_model.get_node_from_id(
                node_id)
            resource_id = node.uuid

            outlet_temp = self.ceilometer.statistic_aggregation(
                resource_id=resource_id,
                meter_name=self._meter,
                period="30",
                aggregate='avg')
            # some hosts may not have outlet temp meters, remove from target
            if outlet_temp is None:
                LOG.warning(_LW("%s: no outlet temp data"), resource_id)
                continue

            LOG.debug("%s: outlet temperature %f" % (resource_id, outlet_temp))
            instance_data = {'node': node, 'outlet_temp': outlet_temp}
            if outlet_temp >= self.threshold:
                # mark the node to release resources
                hosts_need_release.append(instance_data)
            else:
                hosts_target.append(instance_data)
        return hosts_need_release, hosts_target
Esempio n. 12
0
    def schedule(self, context, audit_id, solution):
        LOG.debug('Create an action plan for the audit uuid: %s ', audit_id)
        action_plan = self._create_action_plan(context, audit_id)

        actions = list(solution.actions)
        to_schedule = []
        for action in actions:
            json_action = self.create_action(
                action_plan_id=action_plan.id,
                action_type=action.get('action_type'),
                input_parameters=action.get('input_parameters'))
            to_schedule.append(
                (self.priorities[action.get('action_type')], json_action))

        # scheduling
        scheduled = sorted(to_schedule, key=lambda x: (x[0]))
        if len(scheduled) == 0:
            LOG.warning(_LW("The action plan is empty"))
            action_plan.first_action_id = None
            action_plan.save()
        else:
            # create the first action
            parent_action = self._create_action(context, scheduled[0][1], None)
            # remove first
            scheduled.pop(0)

            action_plan.first_action_id = parent_action.id
            action_plan.save()

            for s_action in scheduled:
                current_action = self._create_action(context, s_action[1],
                                                     parent_action)
                parent_action = current_action

        return action_plan
Esempio n. 13
0
 def revert(self, *args, **kwargs):
     LOG.warning(_LW("Revert action: %s"), self.name)
     try:
         self.action.revert()
     except Exception as e:
         LOG.exception(e)
         LOG.critical(_LC("Oops! We need a disaster recover plan."))
Esempio n. 14
0
    def group_hosts_by_airflow(self):
        """Group hosts based on airflow meters"""

        nodes = self.compute_model.get_all_compute_nodes()
        if not nodes:
            raise wexc.ClusterEmpty()
        overload_hosts = []
        nonoverload_hosts = []
        for node_id in nodes:
            node = self.compute_model.get_node_by_uuid(
                node_id)
            resource_id = node.uuid
            airflow = self.ceilometer.statistic_aggregation(
                resource_id=resource_id,
                meter_name=self.meter_name_airflow,
                period=self._period,
                aggregate='avg')
            # some hosts may not have airflow meter, remove from target
            if airflow is None:
                LOG.warning(_LW("%s: no airflow data"), resource_id)
                continue

            LOG.debug("%s: airflow %f" % (resource_id, airflow))
            nodemap = {'node': node, 'airflow': airflow}
            if airflow >= self.threshold_airflow:
                # mark the node to release resources
                overload_hosts.append(nodemap)
            else:
                nonoverload_hosts.append(nodemap)
        return overload_hosts, nonoverload_hosts
Esempio n. 15
0
 def _set_status(self, name):
     service = objects.Service.get_by_name(pecan.request.context, name)
     last_heartbeat = (service.last_seen_up or service.updated_at
                       or service.created_at)
     if isinstance(last_heartbeat, six.string_types):
         # NOTE(russellb) If this service came in over rpc via
         # conductor, then the timestamp will be a string and needs to be
         # converted back to a datetime.
         last_heartbeat = timeutils.parse_strtime(last_heartbeat)
     else:
         # Objects have proper UTC timezones, but the timeutils comparison
         # below does not (and will fail)
         last_heartbeat = last_heartbeat.replace(tzinfo=None)
     elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow())
     is_up = abs(elapsed) <= CONF.service_down_time
     if not is_up:
         LOG.warning(
             _LW('Seems service %(name)s on host %(host)s is down. '
                 'Last heartbeat was %(lhb)s.'
                 'Elapsed time is %(el)s'), {
                     'name': service.name,
                     'host': service.host,
                     'lhb': str(last_heartbeat),
                     'el': str(elapsed)
                 })
         self._status = objects.service.ServiceStatus.FAILED
     else:
         self._status = objects.service.ServiceStatus.ACTIVE
Esempio n. 16
0
 def revert(self, *args, **kwargs):
     LOG.warning(_LW("Revert action: %s"), self.name)
     try:
         # TODO(jed): do we need to update the states in case of failure?
         self.action.revert()
     except Exception as e:
         LOG.exception(e)
         LOG.critical(_LC("Oops! We need a disaster recover plan."))
Esempio n. 17
0
    def do_execute(self):
        src_hypervisors, target_hypervisors = (
            self.group_hosts_by_airflow())

        if not src_hypervisors:
            LOG.debug("No hosts require optimization")
            return self.solution

        if not target_hypervisors:
            LOG.warning(_LW("No hosts current have airflow under %s "
                            ", therefore there are no possible target "
                            "hosts for any migration"),
                        self.threshold_airflow)
            return self.solution

        # migrate the vm from server with largest airflow first
        src_hypervisors = sorted(src_hypervisors,
                                 reverse=True,
                                 key=lambda x: (x["airflow"]))
        vms_to_migrate = self.choose_vm_to_migrate(src_hypervisors)
        if not vms_to_migrate:
            return self.solution
        source_hypervisor, vms_src = vms_to_migrate
        # sort host with airflow
        target_hypervisors = sorted(target_hypervisors,
                                    key=lambda x: (x["airflow"]))
        # find the hosts that have enough resource for the VM to be migrated
        destination_hosts = self.filter_destination_hosts(target_hypervisors,
                                                          vms_src)
        if not destination_hosts:
            LOG.warning(_LW("No proper target host could be found, it might "
                            "be because of there's no enough resource"))
            return self.solution
        # generate solution to migrate the vm to the dest server,
        for info in destination_hosts:
            vm_src = info['vm']
            mig_dst_hypervisor = info['hv']
            if self.model.get_mapping().migrate_vm(vm_src,
                                                   source_hypervisor,
                                                   mig_dst_hypervisor):
                parameters = {'migration_type': 'live',
                              'src_hypervisor': source_hypervisor.uuid,
                              'dst_hypervisor': mig_dst_hypervisor.uuid}
                self.solution.add_action(action_type=self.MIGRATION,
                                         resource_id=vm_src.uuid,
                                         input_parameters=parameters)
Esempio n. 18
0
    def do_execute(self):
        unsuccessful_migration = 0

        for node_uuid, node in self.compute_model.get_all_compute_nodes(
        ).items():
            node_instances = self.compute_model.mapping.get_node_instances(
                node)
            if node_instances:
                if node.state == element.ServiceState.ENABLED:
                    self.add_change_service_state(
                        node_uuid, element.ServiceState.DISABLED.value)

        scores = self.compute_score_of_nodes()
        # Sort compute nodes by Score decreasing
        sorted_scores = sorted(scores, reverse=True, key=lambda x: (x[1]))
        LOG.debug("Compute node(s) BFD %s", sorted_scores)
        # Get Node to be released
        if len(scores) == 0:
            LOG.warning(
                _LW("The workloads of the compute nodes"
                    " of the cluster is zero"))
            return

        while sorted_scores and (
                not self.migration_attempts
                or self.migration_attempts >= unsuccessful_migration):
            node_to_release, instance_score = self.node_and_instance_score(
                sorted_scores)

            # Sort instances by Score
            sorted_instances = sorted(instance_score,
                                      reverse=True,
                                      key=lambda x: (x[1]))
            # BFD: Best Fit Decrease
            LOG.debug("Instance(s) BFD %s", sorted_instances)

            migrations = self.calculate_num_migrations(sorted_instances,
                                                       node_to_release,
                                                       sorted_scores)

            unsuccessful_migration = self.unsuccessful_migration_actualization(
                migrations, unsuccessful_migration)

            if not migrations:
                # We don't have any possible migrations to perform on this node
                # so we discard the node so we can try to migrate instances
                # from the next one in the list
                sorted_scores.pop()

        infos = {
            "released_compute_nodes_count": self.number_of_released_nodes,
            "instance_migrations_count": self.number_of_migrations,
            "efficacy": self.efficacy
        }
        LOG.debug(infos)
Esempio n. 19
0
 def _remove_instances_from_model(self, instances_to_remove, cluster_model):
     for instance_uuid in instances_to_remove:
         try:
             node_name = (cluster_model.get_mapping().
                          get_node_by_instance_uuid(instance_uuid).uuid)
         except KeyError:
             LOG.warning(
                 _LW("The following instance %s cannot be found. "
                     "It might be deleted from CDM along with node"
                     " instance was hosted on."), instance_uuid)
             continue
         self._remove_instance(cluster_model, instance_uuid, node_name)
Esempio n. 20
0
    def do_execute(self):
        # the migration plan will be triggered when the outlet temperature
        # reaches threshold
        self.threshold = self.input_parameters.threshold
        LOG.debug("Initializing Outlet temperature strategy with threshold=%d",
                  self.threshold)
        hosts_need_release, hosts_target = self.group_hosts_by_outlet_temp()

        if len(hosts_need_release) == 0:
            # TODO(zhenzanz): return something right if there's no hot servers
            LOG.debug("No hosts require optimization")
            return self.solution

        if len(hosts_target) == 0:
            LOG.warning(_LW("No hosts under outlet temp threshold found"))
            return self.solution

        # choose the server with highest outlet t
        hosts_need_release = sorted(hosts_need_release,
                                    reverse=True,
                                    key=lambda x: (x["outlet_temp"]))

        instance_to_migrate = self.choose_instance_to_migrate(
            hosts_need_release)
        # calculate the instance's cpu cores,memory,disk needs
        if instance_to_migrate is None:
            return self.solution

        mig_source_node, instance_src = instance_to_migrate
        dest_servers = self.filter_dest_servers(hosts_target, instance_src)
        # sort the filtered result by outlet temp
        # pick up the lowest one as dest server
        if len(dest_servers) == 0:
            # TODO(zhenzanz): maybe to warn that there's no resource
            # for instance.
            LOG.info(_LI("No proper target host could be found"))
            return self.solution

        dest_servers = sorted(dest_servers, key=lambda x: (x["outlet_temp"]))
        # always use the host with lowerest outlet temperature
        mig_destination_node = dest_servers[0]['node']
        # generate solution to migrate the instance to the dest server,
        if self.compute_model.migrate_instance(instance_src, mig_source_node,
                                               mig_destination_node):
            parameters = {
                'migration_type': 'live',
                'source_node': mig_source_node.uuid,
                'destination_node': mig_destination_node.uuid
            }
            self.solution.add_action(action_type=self.MIGRATION,
                                     resource_id=instance_src.uuid,
                                     input_parameters=parameters)
Esempio n. 21
0
 def _soft_delete_removed_goals(self):
     removed_goals = [
         g for g in self.available_goals
         if g.name not in self.discovered_map['goals']]
     for removed_goal in removed_goals:
         removed_goal.soft_delete()
         filters = {"goal_id": removed_goal.id}
         invalid_ats = objects.AuditTemplate.list(self.ctx, filters=filters)
         for at in invalid_ats:
             LOG.warning(
                 _LW("Audit Template '%(audit_template)s' references a "
                     "goal that does not exist"),
                 audit_template=at.uuid)
Esempio n. 22
0
 def remove_instances_from_model(self, instances_to_remove, cluster_model):
     for instance_uuid in instances_to_remove:
         try:
             node_name = cluster_model.get_node_by_instance_uuid(
                 instance_uuid).uuid
         except exception.ComputeResourceNotFound:
             LOG.warning(
                 _LW("The following instance %s cannot be found. "
                     "It might be deleted from CDM along with node"
                     " instance was hosted on."), instance_uuid)
             continue
         self.remove_instance(
             cluster_model,
             cluster_model.get_instance_by_uuid(instance_uuid), node_name)
Esempio n. 23
0
    def do_execute(self):
        # the migration plan will be triggered when the outlet temperature
        # reaches threshold
        self.threshold = self.input_parameters.threshold
        LOG.debug("Initializing Outlet temperature strategy with threshold=%d",
                  self.threshold)
        hosts_need_release, hosts_target = self.group_hosts_by_outlet_temp()

        if len(hosts_need_release) == 0:
            # TODO(zhenzanz): return something right if there's no hot servers
            LOG.debug("No hosts require optimization")
            return self.solution

        if len(hosts_target) == 0:
            LOG.warning(_LW("No hosts under outlet temp threshold found"))
            return self.solution

        # choose the server with highest outlet t
        hosts_need_release = sorted(hosts_need_release,
                                    reverse=True,
                                    key=lambda x: (x["outlet_temp"]))

        instance_to_migrate = self.choose_instance_to_migrate(
            hosts_need_release)
        # calculate the instance's cpu cores,memory,disk needs
        if instance_to_migrate is None:
            return self.solution

        mig_source_node, instance_src = instance_to_migrate
        dest_servers = self.filter_dest_servers(hosts_target, instance_src)
        # sort the filtered result by outlet temp
        # pick up the lowest one as dest server
        if len(dest_servers) == 0:
            # TODO(zhenzanz): maybe to warn that there's no resource
            # for instance.
            LOG.info(_LI("No proper target host could be found"))
            return self.solution

        dest_servers = sorted(dest_servers, key=lambda x: (x["outlet_temp"]))
        # always use the host with lowerest outlet temperature
        mig_destination_node = dest_servers[0]['node']
        # generate solution to migrate the instance to the dest server,
        if self.compute_model.mapping.migrate_instance(
                instance_src, mig_source_node, mig_destination_node):
            parameters = {'migration_type': 'live',
                          'source_node': mig_source_node.uuid,
                          'destination_node': mig_destination_node.uuid}
            self.solution.add_action(action_type=self.MIGRATION,
                                     resource_id=instance_src.uuid,
                                     input_parameters=parameters)
Esempio n. 24
0
    def filter_destination_hosts(self, hosts, instances_to_migrate):
        """Find instance and host with sufficient available resources"""

        cap_cores = self.compute_model.get_resource_by_uuid(
            element.ResourceType.cpu_cores)
        cap_disk = self.compute_model.get_resource_by_uuid(
            element.ResourceType.disk)
        cap_mem = self.compute_model.get_resource_by_uuid(
            element.ResourceType.memory)
        # large instance go first
        instances_to_migrate = sorted(
            instances_to_migrate, reverse=True,
            key=lambda x: (cap_cores.get_capacity(x)))
        # find hosts for instances
        destination_hosts = []
        for instance_to_migrate in instances_to_migrate:
            required_cores = cap_cores.get_capacity(instance_to_migrate)
            required_disk = cap_disk.get_capacity(instance_to_migrate)
            required_mem = cap_mem.get_capacity(instance_to_migrate)
            dest_migrate_info = {}
            for nodemap in hosts:
                host = nodemap['node']
                if 'cores_used' not in nodemap:
                    # calculate the available resources
                    nodemap['cores_used'], nodemap['mem_used'],\
                        nodemap['disk_used'] = self.calculate_used_resource(
                            host, cap_cores, cap_mem, cap_disk)
                cores_available = (cap_cores.get_capacity(host) -
                                   nodemap['cores_used'])
                disk_available = (cap_disk.get_capacity(host) -
                                  nodemap['disk_used'])
                mem_available = (
                    cap_mem.get_capacity(host) - nodemap['mem_used'])
                if (cores_available >= required_cores and
                        disk_available >= required_disk and
                        mem_available >= required_mem):
                    dest_migrate_info['instance'] = instance_to_migrate
                    dest_migrate_info['node'] = host
                    nodemap['cores_used'] += required_cores
                    nodemap['mem_used'] += required_mem
                    nodemap['disk_used'] += required_disk
                    destination_hosts.append(dest_migrate_info)
                    break
        # check if all instances have target hosts
        if len(destination_hosts) != len(instances_to_migrate):
            LOG.warning(_LW("Not all target hosts could be found; it might "
                            "be because there is not enough resource"))
            return None
        return destination_hosts
Esempio n. 25
0
    def filter_destination_hosts(self, hosts, instances_to_migrate):
        """Find instance and host with sufficient available resources"""

        cap_cores = self.compute_model.get_resource_from_id(
            element.ResourceType.cpu_cores)
        cap_disk = self.compute_model.get_resource_from_id(
            element.ResourceType.disk)
        cap_mem = self.compute_model.get_resource_from_id(
            element.ResourceType.memory)
        # large instance go first
        instances_to_migrate = sorted(
            instances_to_migrate, reverse=True,
            key=lambda x: (cap_cores.get_capacity(x)))
        # find hosts for instances
        destination_hosts = []
        for instance_to_migrate in instances_to_migrate:
            required_cores = cap_cores.get_capacity(instance_to_migrate)
            required_disk = cap_disk.get_capacity(instance_to_migrate)
            required_mem = cap_mem.get_capacity(instance_to_migrate)
            dest_migrate_info = {}
            for nodemap in hosts:
                host = nodemap['node']
                if 'cores_used' not in nodemap:
                    # calculate the available resources
                    nodemap['cores_used'], nodemap['mem_used'],\
                        nodemap['disk_used'] = self.calculate_used_resource(
                            host, cap_cores, cap_mem, cap_disk)
                cores_available = (cap_cores.get_capacity(host) -
                                   nodemap['cores_used'])
                disk_available = (cap_disk.get_capacity(host) -
                                  nodemap['disk_used'])
                mem_available = (
                    cap_mem.get_capacity(host) - nodemap['mem_used'])
                if (cores_available >= required_cores and
                        disk_available >= required_disk and
                        mem_available >= required_mem):
                    dest_migrate_info['instance'] = instance_to_migrate
                    dest_migrate_info['node'] = host
                    nodemap['cores_used'] += required_cores
                    nodemap['mem_used'] += required_mem
                    nodemap['disk_used'] += required_disk
                    destination_hosts.append(dest_migrate_info)
                    break
        # check if all instances have target hosts
        if len(destination_hosts) != len(instances_to_migrate):
            LOG.warning(_LW("Not all target hosts could be found; it might "
                            "be because there is not enough resource"))
            return None
        return destination_hosts
Esempio n. 26
0
def safe_rstrip(value, chars=None):
    """Removes trailing characters from a string if that does not make it empty

    :param value: A string value that will be stripped.
    :param chars: Characters to remove.
    :return: Stripped value.

    """
    if not isinstance(value, six.string_types):
        LOG.warning(
            _LW("Failed to remove trailing character. Returning original object."
                "Supplied object is not a string: %s,"), value)
        return value

    return value.rstrip(chars) or value
Esempio n. 27
0
def safe_rstrip(value, chars=None):
    """Removes trailing characters from a string if that does not make it empty

    :param value: A string value that will be stripped.
    :param chars: Characters to remove.
    :return: Stripped value.

    """
    if not isinstance(value, six.string_types):
        LOG.warning(_LW(
            "Failed to remove trailing character. Returning original object."
            "Supplied object is not a string: %s,"), value)
        return value

    return value.rstrip(chars) or value
Esempio n. 28
0
    def filter_destination_hosts(self, hosts, vms_to_migrate):
        '''return vm and host with sufficient available resources'''

        cap_cores = self.model.get_resource_from_id(
            resource.ResourceType.cpu_cores)
        cap_disk = self.model.get_resource_from_id(resource.ResourceType.disk)
        cap_mem = self.model.get_resource_from_id(
            resource.ResourceType.memory)
        # large vm go first
        vms_to_migrate = sorted(vms_to_migrate, reverse=True,
                                key=lambda x: (cap_cores.get_capacity(x)))
        # find hosts for VMs
        destination_hosts = []
        for vm_to_migrate in vms_to_migrate:
            required_cores = cap_cores.get_capacity(vm_to_migrate)
            required_disk = cap_disk.get_capacity(vm_to_migrate)
            required_mem = cap_mem.get_capacity(vm_to_migrate)
            dest_migrate_info = {}
            for hvmap in hosts:
                host = hvmap['hv']
                if 'cores_used' not in hvmap:
                    # calculate the available resources
                    hvmap['cores_used'], hvmap['mem_used'],\
                        hvmap['disk_used'] = self.calculate_used_resource(
                            host, cap_cores, cap_mem, cap_disk)
                cores_available = (cap_cores.get_capacity(host) -
                                   hvmap['cores_used'])
                disk_available = (cap_disk.get_capacity(host) -
                                  hvmap['disk_used'])
                mem_available = cap_mem.get_capacity(host) - hvmap['mem_used']
                if (cores_available >= required_cores and
                        disk_available >= required_disk and
                        mem_available >= required_mem):
                    dest_migrate_info['vm'] = vm_to_migrate
                    dest_migrate_info['hv'] = host
                    hvmap['cores_used'] += required_cores
                    hvmap['mem_used'] += required_mem
                    hvmap['disk_used'] += required_disk
                    destination_hosts.append(dest_migrate_info)
                    break
        # check if all vms have target hosts
        if len(destination_hosts) != len(vms_to_migrate):
            LOG.warning(_LW("Not all target hosts could be found, it might "
                            "be because of there's no enough resource"))
            return None
        return destination_hosts
Esempio n. 29
0
 def _configure(self):
     try:
         self.__transport = om.get_transport(CONF)
         self.__notifier = self.build_notifier()
         if len(self.__endpoints):
             target = om.Target(
                 topic=self.topic_name,
                 # For compatibility, we can override it with 'host' opt
                 server=CONF.host or socket.getfqdn(),
                 version=self.__version,
             )
             self.__server = self.build_server(target)
         else:
             LOG.warning(_LW("No endpoint defined; can only publish events"))
     except Exception as e:
         LOG.exception(e)
         LOG.error(_LE("Messaging configuration error"))
Esempio n. 30
0
 def _configure(self):
     try:
         self.__transport = om.get_transport(CONF)
         self.__notifier = self.build_notifier()
         if len(self.__endpoints):
             target = om.Target(
                 topic=self.topic_name,
                 # For compatibility, we can override it with 'host' opt
                 server=CONF.host or socket.getfqdn(),
                 version=self.__version,
             )
             self.__server = self.build_server(target)
         else:
             LOG.warning(
                 _LW("No endpoint defined; can only publish events"))
     except Exception as e:
         LOG.exception(e)
         LOG.error(_LE("Messaging configuration error"))
Esempio n. 31
0
    def schedule(self, context, audit_id, solution):
        LOG.debug('Creating an action plan for the audit uuid: %s', audit_id)
        action_plan = self.create_action_plan(context, audit_id, solution)

        sorted_weighted_actions = self.get_sorted_actions_by_weight(
            context, action_plan, solution)
        action_graph = self.compute_action_graph(sorted_weighted_actions)

        self._create_efficacy_indicators(
            context, action_plan.id, solution.efficacy_indicators)

        if len(action_graph.nodes()) == 0:
            LOG.warning(_LW("The action plan is empty"))
            action_plan.state = objects.action_plan.State.SUCCEEDED
            action_plan.save()

        self.create_scheduled_actions(action_plan, action_graph)
        return action_plan
Esempio n. 32
0
    def unmap_from_id(self, node_uuid, vm_uuid):
        """Remove the instance (by id) from the hypervisor (by id)

        :rtype : object
        """

        try:
            self.lock.acquire()
            if str(node_uuid) in self._mapping_hypervisors:
                self._mapping_hypervisors[str(node_uuid)].remove(str(vm_uuid))
                # remove vm
                self.mapping_vm.pop(vm_uuid)
            else:
                LOG.warning(_LW(
                    "trying to delete the virtual machine %(vm)s but it was "
                    "not found on hypervisor %(hyp)s"),
                    {'vm': vm_uuid, 'hyp': node_uuid})
        finally:
            self.lock.release()
Esempio n. 33
0
    def get_or_create_instance(self, instance_uuid, node_uuid=None):
        try:
            if node_uuid:
                self.get_or_create_node(node_uuid)
        except exception.ComputeNodeNotFound:
            LOG.warning(
                _LW("Could not find compute node %(node)s for "
                    "instance %(instance)s"),
                dict(node=node_uuid, instance=instance_uuid))
        try:
            instance = self.cluster_data_model.get_instance_by_uuid(
                instance_uuid)
        except exception.InstanceNotFound:
            # The instance didn't exist yet so we create a new instance object
            LOG.debug("New instance created: %s", instance_uuid)
            instance = element.Instance(uuid=instance_uuid)

            self.cluster_data_model.add_instance(instance)

        return instance
Esempio n. 34
0
    def unmap_from_id(self, node_uuid, instance_uuid):
        """Remove the instance (by id) from the node (by id)

        :rtype : object
        """

        try:
            self.lock.acquire()
            if str(node_uuid) in self.compute_node_mapping:
                self.compute_node_mapping[str(node_uuid)].remove(
                    str(instance_uuid))
                # remove instance
                self.instance_mapping.pop(instance_uuid)
            else:
                LOG.warning(_LW(
                    "Trying to delete the instance %(instance)s but it was "
                    "not found on node %(node)s"),
                    {'instance': instance_uuid, 'node': node_uuid})
        finally:
            self.lock.release()
Esempio n. 35
0
    def schedule(self, context, audit_id, solution):
        LOG.debug('Create an action plan for the audit uuid: %s ', audit_id)
        priorities = self.config.weights
        action_plan = self._create_action_plan(context, audit_id, solution)

        actions = list(solution.actions)
        to_schedule = []
        for action in actions:
            json_action = self.create_action(
                action_plan_id=action_plan.id,
                action_type=action.get('action_type'),
                input_parameters=action.get('input_parameters'))
            to_schedule.append((priorities[action.get('action_type')],
                                json_action))

        self._create_efficacy_indicators(
            context, action_plan.id, solution.efficacy_indicators)

        # scheduling
        scheduled = sorted(to_schedule, key=lambda x: (x[0]))
        if len(scheduled) == 0:
            LOG.warning(_LW("The action plan is empty"))
            action_plan.first_action_id = None
            action_plan.save()
        else:
            # create the first action
            parent_action = self._create_action(context,
                                                scheduled[0][1],
                                                None)
            # remove first
            scheduled.pop(0)

            action_plan.first_action_id = parent_action.id
            action_plan.save()

            for s_action in scheduled:
                current_action = self._create_action(context, s_action[1],
                                                     parent_action)
                parent_action = current_action

        return action_plan
Esempio n. 36
0
    def unmap_by_uuid(self, node_uuid, instance_uuid):
        """Remove the instance (by id) from the node (by id)

        :rtype : object
        """
        try:
            self.lock.acquire()
            if str(node_uuid) in self.compute_node_mapping:
                self.compute_node_mapping[str(node_uuid)].remove(
                    str(instance_uuid))
                # remove instance
                self.instance_mapping.pop(instance_uuid)
            else:
                LOG.warning(
                    _LW("Trying to delete the instance %(instance)s but it "
                        "was not found on node %(node)s") % {
                            'instance': instance_uuid,
                            'node': node_uuid
                        })
        finally:
            self.lock.release()
 def filter_destination_hosts(self, hosts, instances_to_migrate):
     """Find instance and host with sufficient available resources"""
     # large instances go first
     instances_to_migrate = sorted(instances_to_migrate,
                                   reverse=True,
                                   key=lambda x: (x.vcpus))
     # find hosts for instances
     destination_hosts = []
     for instance_to_migrate in instances_to_migrate:
         required_cores = instance_to_migrate.vcpus
         required_disk = instance_to_migrate.disk
         required_mem = instance_to_migrate.memory
         dest_migrate_info = {}
         for nodemap in hosts:
             host = nodemap['node']
             if 'cores_used' not in nodemap:
                 # calculate the available resources
                 nodemap['cores_used'], nodemap['mem_used'],\
                     nodemap['disk_used'] = self.calculate_used_resource(
                         host)
             cores_available = (host.vcpus - nodemap['cores_used'])
             disk_available = (host.disk - nodemap['disk_used'])
             mem_available = (host.memory - nodemap['mem_used'])
             if (cores_available >= required_cores
                     and disk_available >= required_disk
                     and mem_available >= required_mem):
                 dest_migrate_info['instance'] = instance_to_migrate
                 dest_migrate_info['node'] = host
                 nodemap['cores_used'] += required_cores
                 nodemap['mem_used'] += required_mem
                 nodemap['disk_used'] += required_disk
                 destination_hosts.append(dest_migrate_info)
                 break
     # check if all instances have target hosts
     if len(destination_hosts) != len(instances_to_migrate):
         LOG.warning(
             _LW("Not all target hosts could be found; it might "
                 "be because there is not enough resource"))
         return None
     return destination_hosts
    def get_instance_load(self, instance):
        """Gathering instance load through ceilometer statistic.

        :param instance: instance for which statistic is gathered.
        :return: dict
        """
        LOG.debug('get_instance_load started')
        instance_load = {'uuid': instance.uuid, 'vcpus': instance.vcpus}
        for meter in self.metrics:
            avg_meter = self.ceilometer.statistic_aggregation(
                resource_id=instance.uuid,
                meter_name=meter,
                period=self.periods['instance'],
                aggregate='min')
            if avg_meter is None:
                LOG.warning(
                    _LW("No values returned by %(resource_id)s "
                        "for %(metric_name)s") %
                    dict(resource_id=instance.uuid, metric_name=meter))
                avg_meter = 0
            if meter == 'cpu_util':
                avg_meter /= float(100)
            instance_load[meter] = avg_meter
        return instance_load
Esempio n. 39
0
    def execute(self, original_model):
        LOG.info(_LI("Initializing Sercon Consolidation"))

        if original_model is None:
            raise exception.ClusterStateNotDefined()

        # todo(jed) clone model
        current_model = original_model

        self.efficacy = 100
        unsuccessful_migration = 0

        first_migration = True
        size_cluster = len(current_model.get_all_hypervisors())
        if size_cluster == 0:
            raise exception.ClusterEmpty()

        self.compute_attempts(size_cluster)

        for hypervisor_id in current_model.get_all_hypervisors():
            hypervisor = current_model.get_hypervisor_from_id(hypervisor_id)
            count = current_model.get_mapping(). \
                get_node_vms_from_id(hypervisor_id)
            if len(count) == 0:
                if hypervisor.state == hyper_state.HypervisorState.ONLINE:
                    self.add_change_service_state(
                        hypervisor_id,
                        hyper_state.HypervisorState.OFFLINE.value)

        while self.get_allowed_migration_attempts() >= unsuccessful_migration:
            if not first_migration:
                self.efficacy = self.calculate_migration_efficacy()
                if self.efficacy < float(self.target_efficacy):
                    break
            first_migration = False
            score = []

            score = self.score_of_nodes(current_model, score)
            ''' sort compute nodes by Score decreasing ''' ''
            sorted_score = sorted(score, reverse=True, key=lambda x: (x[1]))
            LOG.debug("Hypervisor(s) BFD {0}".format(sorted_score))
            ''' get Node to be released '''
            if len(score) == 0:
                LOG.warning(
                    _LW("The workloads of the compute nodes"
                        " of the cluster is zero"))
                break

            node_to_release, vm_score = self.node_and_vm_score(
                sorted_score, score, current_model)
            ''' sort VMs by Score '''
            sorted_vms = sorted(vm_score, reverse=True, key=lambda x: (x[1]))
            # BFD: Best Fit Decrease
            LOG.debug("VM(s) BFD {0}".format(sorted_vms))

            migrations = self.calculate_num_migrations(sorted_vms,
                                                       current_model,
                                                       node_to_release,
                                                       sorted_score)

            unsuccessful_migration = self.unsuccessful_migration_actualization(
                migrations, unsuccessful_migration)
        infos = {
            "number_of_migrations": self.number_of_migrations,
            "number_of_nodes_released": self.number_of_released_nodes,
            "efficacy": self.efficacy
        }
        LOG.debug(infos)
        self.solution.model = current_model
        self.solution.efficacy = self.efficacy
        return self.solution
Esempio n. 40
0
    def execute(self, original_model):
        LOG.info(_LI("Initializing Sercon Consolidation"))

        if original_model is None:
            raise exception.ClusterStateNotDefined()

        # todo(jed) clone model
        current_model = original_model

        self.efficacy = 100
        unsuccessful_migration = 0

        first_migration = True
        size_cluster = len(current_model.get_all_hypervisors())
        if size_cluster == 0:
            raise exception.ClusterEmpty()

        self.compute_attempts(size_cluster)

        for hypervisor_id in current_model.get_all_hypervisors():
            hypervisor = current_model.get_hypervisor_from_id(hypervisor_id)
            count = current_model.get_mapping(). \
                get_node_vms_from_id(hypervisor_id)
            if len(count) == 0:
                if hypervisor.state == hyper_state.HypervisorState.ENABLED:
                    self.add_change_service_state(hypervisor_id,
                                                  hyper_state.HypervisorState.
                                                  DISABLED.value)

        while self.get_allowed_migration_attempts() >= unsuccessful_migration:
            if not first_migration:
                self.efficacy = self.calculate_migration_efficacy()
                if self.efficacy < float(self.target_efficacy):
                    break
            first_migration = False
            score = []

            score = self.score_of_nodes(current_model, score)

            ''' sort compute nodes by Score decreasing '''''
            sorted_score = sorted(score, reverse=True, key=lambda x: (x[1]))
            LOG.debug("Hypervisor(s) BFD {0}".format(sorted_score))

            ''' get Node to be released '''
            if len(score) == 0:
                LOG.warning(_LW(
                    "The workloads of the compute nodes"
                    " of the cluster is zero"))
                break

            node_to_release, vm_score = self.node_and_vm_score(
                sorted_score, score, current_model)

            ''' sort VMs by Score '''
            sorted_vms = sorted(vm_score, reverse=True, key=lambda x: (x[1]))
            # BFD: Best Fit Decrease
            LOG.debug("VM(s) BFD {0}".format(sorted_vms))

            migrations = self.calculate_num_migrations(
                sorted_vms, current_model, node_to_release, sorted_score)

            unsuccessful_migration = self.unsuccessful_migration_actualization(
                migrations, unsuccessful_migration)
        infos = {
            "number_of_migrations": self.number_of_migrations,
            "number_of_nodes_released": self.number_of_released_nodes,
            "efficacy": self.efficacy
        }
        LOG.debug(infos)
        self.solution.model = current_model
        self.solution.efficacy = self.efficacy
        return self.solution
Esempio n. 41
0
    def execute(self, origin_model):
        LOG.info(_LI("Initializing Workload Balance Strategy"))

        if origin_model is None:
            raise wexc.ClusterStateNotDefined()

        current_model = origin_model
        src_hypervisors, target_hypervisors, avg_workload, workload_cache = (
            self.group_hosts_by_cpu_util(current_model))

        if not src_hypervisors:
            LOG.debug("No hosts require optimization")
            return self.solution

        if not target_hypervisors:
            LOG.warning(_LW("No hosts current have CPU utilization under %s "
                            "percent, therefore there are no possible target "
                            "hosts for any migration"),
                        self.threshold)
            return self.solution

        # choose the server with largest cpu_util
        src_hypervisors = sorted(src_hypervisors,
                                 reverse=True,
                                 key=lambda x: (x[self.METER_NAME]))

        vm_to_migrate = self.choose_vm_to_migrate(current_model,
                                                  src_hypervisors,
                                                  avg_workload,
                                                  workload_cache)
        if not vm_to_migrate:
            return self.solution
        source_hypervisor, vm_src = vm_to_migrate
        # find the hosts that have enough resource for the VM to be migrated
        destination_hosts = self.filter_destination_hosts(current_model,
                                                          target_hypervisors,
                                                          vm_src,
                                                          avg_workload,
                                                          workload_cache)
        # sort the filtered result by workload
        # pick up the lowest one as dest server
        if not destination_hosts:
            # for instance.
            LOG.warning(_LW("No proper target host could be found, it might "
                            "be because of there's no enough CPU/Memory/DISK"))
            return self.solution
        destination_hosts = sorted(destination_hosts,
                                   key=lambda x: (x["cpu_util"]))
        # always use the host with lowerest CPU utilization
        mig_dst_hypervisor = destination_hosts[0]['hv']
        # generate solution to migrate the vm to the dest server,
        if current_model.get_mapping().migrate_vm(vm_src,
                                                  source_hypervisor,
                                                  mig_dst_hypervisor):
            parameters = {'migration_type': 'live',
                          'src_hypervisor': source_hypervisor.uuid,
                          'dst_hypervisor': mig_dst_hypervisor.uuid}
            self.solution.add_action(action_type=self.MIGRATION,
                                     resource_id=vm_src.uuid,
                                     input_parameters=parameters)
        self.solution.model = current_model
        return self.solution
Esempio n. 42
0
    def do_execute(self):
        # todo(jed) clone model
        self.efficacy = 100
        unsuccessful_migration = 0

        first_migration = True
        size_cluster = len(self.model.get_all_hypervisors())
        if size_cluster == 0:
            raise exception.ClusterEmpty()

        self.compute_attempts(size_cluster)

        for hypervisor_id in self.model.get_all_hypervisors():
            hypervisor = self.model.get_hypervisor_from_id(hypervisor_id)
            count = self.model.get_mapping(). \
                get_node_vms_from_id(hypervisor_id)
            if len(count) == 0:
                if hypervisor.state == hyper_state.HypervisorState.ENABLED:
                    self.add_change_service_state(hypervisor_id,
                                                  hyper_state.HypervisorState.
                                                  DISABLED.value)

        while self.get_allowed_migration_attempts() >= unsuccessful_migration:
            if not first_migration:
                self.efficacy = self.calculate_migration_efficacy()
                if self.efficacy < float(self.target_efficacy):
                    break
            first_migration = False
            score = []

            score = self.score_of_nodes(score)

            # Sort compute nodes by Score decreasing
            sorted_score = sorted(score, reverse=True, key=lambda x: (x[1]))
            LOG.debug("Hypervisor(s) BFD %s", sorted_score)

            # Get Node to be released
            if len(score) == 0:
                LOG.warning(_LW(
                    "The workloads of the compute nodes"
                    " of the cluster is zero"))
                break

            node_to_release, vm_score = self.node_and_vm_score(
                sorted_score, score)

            # Sort VMs by Score
            sorted_vms = sorted(vm_score, reverse=True, key=lambda x: (x[1]))
            # BFD: Best Fit Decrease
            LOG.debug("VM(s) BFD %s", sorted_vms)

            migrations = self.calculate_num_migrations(
                sorted_vms, node_to_release, sorted_score)

            unsuccessful_migration = self.unsuccessful_migration_actualization(
                migrations, unsuccessful_migration)
        infos = {
            "number_of_migrations": self.number_of_migrations,
            "number_of_nodes_released": self.number_of_released_nodes,
            "efficacy": self.efficacy
        }
        LOG.debug(infos)
Esempio n. 43
0
    def schedule(self, context, audit_id, solution):
        LOG.debug('Creating an action plan for the audit uuid: %s', audit_id)
        weights = self.config.weights
        action_plan = self._create_action_plan(context, audit_id, solution)

        actions = list(solution.actions)
        to_schedule = []
        for action in actions:
            json_action = self.create_action(
                action_plan_id=action_plan.id,
                action_type=action.get('action_type'),
                input_parameters=action.get('input_parameters'))
            to_schedule.append(
                (weights[action.get('action_type')], json_action))

        self._create_efficacy_indicators(context, action_plan.id,
                                         solution.efficacy_indicators)

        # scheduling
        scheduled = sorted(to_schedule,
                           key=lambda weight: (weight[0]),
                           reverse=True)
        if len(scheduled) == 0:
            LOG.warning(_LW("The action plan is empty"))
            action_plan.state = objects.action_plan.State.SUCCEEDED
            action_plan.save()
        else:
            resource_action_map = {}
            scheduled_actions = [x[1] for x in scheduled]
            for action in scheduled_actions:
                a_type = action['action_type']
                if a_type != 'turn_host_to_acpi_s3_state':
                    plugin_action = self.load_child_class(
                        action.get("action_type"))
                    if not plugin_action:
                        raise exception.UnsupportedActionType(
                            action_type=action.get("action_type"))
                    db_action = self._create_action(context, action)
                    parents = plugin_action.validate_parents(
                        resource_action_map, action)
                    if parents:
                        db_action.parents = parents
                        db_action.save()
                # if we have an action that will make host unreachable, we need
                # to complete all actions (resize and migration type)
                # related to the host.
                # Note(alexchadin): turn_host_to_acpi_s3_state doesn't
                # actually exist. Placed code shows relations between
                # action types.
                # TODO(alexchadin): add turn_host_to_acpi_s3_state action type.
                else:
                    host_to_acpi_s3 = action['input_parameters']['resource_id']
                    host_actions = resource_action_map.get(host_to_acpi_s3)
                    action_parents = []
                    if host_actions:
                        resize_actions = [
                            x[0] for x in host_actions if x[1] == 'resize'
                        ]
                        migrate_actions = [
                            x[0] for x in host_actions if x[1] == 'migrate'
                        ]
                        resize_migration_parents = [
                            x.parents for x in [
                                objects.Action.get_by_uuid(
                                    context, resize_action)
                                for resize_action in resize_actions
                            ]
                        ]
                        # resize_migration_parents should be one level list
                        resize_migration_parents = [
                            parent for sublist in resize_migration_parents
                            for parent in sublist
                        ]
                        action_parents.extend(
                            [uuid for uuid in resize_actions])
                        action_parents.extend([
                            uuid for uuid in migrate_actions
                            if uuid not in resize_migration_parents
                        ])
                    db_action = self._create_action(context, action)
                    db_action.parents = action_parents
                    db_action.save()

        return action_plan
Esempio n. 44
0
    def __init__(self,
                 user_id=None,
                 project_id=None,
                 is_admin=None,
                 roles=None,
                 timestamp=None,
                 request_id=None,
                 auth_token=None,
                 auth_url=None,
                 overwrite=True,
                 user_name=None,
                 project_name=None,
                 domain_name=None,
                 domain_id=None,
                 auth_token_info=None,
                 **kwargs):
        """Stores several additional request parameters:

        :param domain_id: The ID of the domain.
        :param domain_name: The name of the domain.
        :param is_public_api: Specifies whether the request should be processed
                              without authentication.

        """
        user = kwargs.pop('user', None)
        tenant = kwargs.pop('tenant', None)
        super(RequestContext, self).__init__(
            auth_token=auth_token,
            user=user_id or user,
            tenant=project_id or tenant,
            domain=kwargs.pop('domain', None) or domain_name or domain_id,
            user_domain=kwargs.pop('user_domain', None),
            project_domain=kwargs.pop('project_domain', None),
            is_admin=is_admin,
            read_only=kwargs.pop('read_only', False),
            show_deleted=kwargs.pop('show_deleted', False),
            request_id=request_id,
            resource_uuid=kwargs.pop('resource_uuid', None),
            is_admin_project=kwargs.pop('is_admin_project', True),
            overwrite=overwrite,
            roles=roles)

        self.remote_address = kwargs.pop('remote_address', None)
        self.instance_lock_checked = kwargs.pop('instance_lock_checked', None)
        self.read_deleted = kwargs.pop('read_deleted', None)
        self.service_catalog = kwargs.pop('service_catalog', None)
        self.quota_class = kwargs.pop('quota_class', None)

        # oslo_context's RequestContext.to_dict() generates this field, we can
        # safely ignore this as we don't use it.
        kwargs.pop('user_identity', None)
        if kwargs:
            LOG.warning(_LW('Arguments dropped when creating context: %s'),
                        str(kwargs))

        # FIXME(dims): user_id and project_id duplicate information that is
        # already present in the oslo_context's RequestContext. We need to
        # get rid of them.
        self.auth_url = auth_url
        self.domain_name = domain_name
        self.domain_id = domain_id
        self.auth_token_info = auth_token_info
        self.user_id = user_id or user
        self.project_id = project_id
        if not timestamp:
            timestamp = timeutils.utcnow()
        if isinstance(timestamp, six.string_types):
            timestamp = timeutils.parse_isotime(timestamp)
        self.timestamp = timestamp
        self.user_name = user_name
        self.project_name = project_name
        self.is_admin = is_admin