Esempio n. 1
0
    def cleanup_all_local_data(self, path):
        """ Delete all the local data about VMs.

        :param path: A path to the local data directory.
        """
        vm_path = common.build_local_vm_path(path)
        self.cleanup_local_vm_data(vm_path, os.listdir(vm_path))
        host_path = common.build_local_host_path(path)
        if os.access(host_path, os.F_OK):
            os.remove(host_path)
Esempio n. 2
0
    def cleanup_all_local_data(self, path):
        """Delete all the local data about VMs.

        :param path: A path to the local data directory.
        """
        vm_path = common.build_local_vm_path(path)
        self.cleanup_local_vm_data(vm_path, os.listdir(vm_path))
        host_path = common.build_local_host_path(path)
        if os.access(host_path, os.F_OK):
            os.remove(host_path)
Esempio n. 3
0
    def execute(self, ctx=None):
        """ Execute a data collection iteration.

        1. Read the names of the files from the <local_data_directory>/vm
           directory to determine the list of VMs running on the host at the
           last data collection.

        2. Call the Nova API to obtain the list of VMs that are currently
           active on the host.

        3. Compare the old and new lists of VMs and determine the newly added
           or removed VMs.

        4. Delete the files from the <local_data_directory>/vm directory
           corresponding to the VMs that have been removed from the host.

        5. Fetch the latest data_collector_data_length data values from the
           central database for each newly added VM using the database
           connection information specified in the sql_connection option and
           save the data in the <local_data_directory>/vm directory.

        6. Call the Libvirt API to obtain the CPU time for each VM active on
           the host. Transform the data obtained from the Libvirt API into the
           average MHz according to the frequency of the host's CPU and time
           interval from the previous data collection.

        8. Store the converted data in the <local_data_directory>/vm
           directory in separate files for each VM, and submit the data to the
           central database.
        """
        LOG.info('Started an iteration')
        state = self.state

        vm_path = common.build_local_vm_path(CONF.local_data_directory)
        host_path = common.build_local_host_path(CONF.local_data_directory)
        data_length = CONF.data_collector_data_length
        vms_previous = self.get_previous_vms(vm_path)
        vms_current = self.get_current_vms(state['vir_connection'])

        vms_added = self.get_added_vms(vms_previous, vms_current.keys())
        added_vm_data = dict()
        if vms_added:
            LOG.debug('Added VMs: %s', str(vms_added))
            for i, vm in enumerate(vms_added):
                if vms_current[vm] != libvirt.VIR_DOMAIN_RUNNING:
                    LOG.debug('Added VM %s not in running state', vm)
                    del vms_added[i]
                    del vms_current[vm]

            added_vm_data = self.fetch_remote_data(state['db'],
                                                   data_length,
                                                   vms_added)
            LOG.debug('Fetched remote data: %s', str(added_vm_data))
            self.write_vm_data_locally(vm_path, added_vm_data, data_length)

        vms_removed = self.get_removed_vms(vms_previous, vms_current.keys())
        if vms_removed:
            LOG.debug('Removed VMs: %s', str(vms_removed))
            self.cleanup_local_vm_data(vm_path, vms_removed)
            for vm in vms_removed:
                del state['previous_cpu_time'][vm]
                del state['previous_cpu_mhz'][vm]

        LOG.info('Started VM data collection')
        current_time = time.time()
        (cpu_time, cpu_mhz) = self.get_cpu_mhz(state['vir_connection'],
                                               state['physical_core_mhz'],
                                               state['previous_cpu_time'],
                                               state['previous_time'],
                                               current_time,
                                               vms_current.keys(),
                                               state['previous_cpu_mhz'],
                                               added_vm_data)
        LOG.info('Completed VM data collection')

        LOG.info('Started host data collection')
        (host_cpu_time_total, host_cpu_time_busy, host_cpu_mhz) = \
            self.get_host_cpu_mhz(
                state['physical_cpu_mhz'],
                state['previous_host_cpu_time_total'],
                state['previous_host_cpu_time_busy']
            )
        LOG.info('Completed host data collection')

        if state['previous_time'] > 0:
            self.append_vm_data_locally(vm_path, cpu_mhz, data_length)
            self.append_vm_data_remotely(state['db'], cpu_mhz)

            total_vms_cpu_mhz = sum(cpu_mhz.values())
            host_cpu_mhz_hypervisor = host_cpu_mhz - total_vms_cpu_mhz
            if host_cpu_mhz_hypervisor < 0:
                host_cpu_mhz_hypervisor = 0
            total_cpu_mhz = total_vms_cpu_mhz + host_cpu_mhz_hypervisor
            self.append_host_data_locally(host_path, host_cpu_mhz_hypervisor,
                                          data_length)
            self.append_host_data_remotely(state['db'],
                                           state['hostname'],
                                           host_cpu_mhz_hypervisor)

            LOG.debug('Collected VM CPU MHz: %s', str(cpu_mhz))
            LOG.debug('Collected total VMs CPU MHz: %s',
                      str(total_vms_cpu_mhz))
            LOG.debug('Collected hypervisor CPU MHz: %s',
                      str(host_cpu_mhz_hypervisor))
            LOG.debug('Collected host CPU MHz: %s', str(host_cpu_mhz))
            LOG.debug('Collected total CPU MHz: %s', str(total_cpu_mhz))

            state['previous_overload'] = self.log_host_overload(
                state['db'],
                state['host_cpu_overload_threshold'],
                state['hostname'],
                state['previous_overload'],
                state['physical_cpu_mhz'],
                total_cpu_mhz)

        state['previous_time'] = current_time
        state['previous_cpu_time'] = cpu_time
        state['previous_cpu_mhz'] = cpu_mhz
        state['previous_host_cpu_time_total'] = host_cpu_time_total
        state['previous_host_cpu_time_busy'] = host_cpu_time_busy

        LOG.info('Completed an iteration')
        self.state = state
Esempio n. 4
0
    def execute(self, ctx=None):
        """Execute an iteration of the local manager.

        1. Read the data on resource usage by the VMs running on the host from
           the <local_data_directory>/vm directory.

        2. Call the function specified in the algorithm_underload_detection
           configuration option and pass the data on the resource usage by the
           VMs, as well as the frequency of the CPU as arguments.

        3. If the host is underloaded, send a request to the REST API of the
           global manager and pass a list of the UUIDs of all the VMs
           currently running on the host in the vm_uuids parameter, as well as
           the reason for migration as being 0.

        4. If the host is not underloaded, call the function specified in the
           algorithm_overload_detection configuration option and pass the data
           on the resource usage by the VMs, as well as the frequency of the
           host's CPU as arguments.

        5. If the host is overloaded, call the function specified in the
           algorithm_vm_selection configuration option and pass the data on
           the resource usage by the VMs, as well as the frequency of the
           host's CPU as arguments

        6. If the host is overloaded, send a request to the REST API of the
           global manager and pass a list of the UUIDs of the VMs selected by
           the VM selection algorithm in the vm_uuids parameter, as well as
           the reason for migration as being 1.
        """
        LOG.info('Started an iteration')
        state = self.state

        vm_path = common.build_local_vm_path(CONF.local_data_directory)
        vm_cpu_mhz = self.get_local_vm_data(vm_path)
        vm_ram = self.get_ram(state['vir_connection'], vm_cpu_mhz.keys())
        vm_cpu_mhz = self.cleanup_vm_data(vm_cpu_mhz, vm_ram.keys())

        if not vm_cpu_mhz:
            LOG.info('Skipped an iteration')
            return

        host_path = common.build_local_host_path(CONF.local_data_directory)
        host_cpu_mhz = self.get_local_host_data(host_path)

        host_cpu_utilization = self.vm_mhz_to_percentage(
            vm_cpu_mhz.values(),
            host_cpu_mhz,
            state['physical_cpu_mhz_total'])
        LOG.debug('The total physical CPU Mhz: %s',
                  str(state['physical_cpu_mhz_total']))
        LOG.debug('VM CPU MHz: %s', str(vm_cpu_mhz))
        LOG.debug('Host CPU MHz: %s', str(host_cpu_mhz))
        LOG.debug('CPU utilization: %s', str(host_cpu_utilization))

        if not host_cpu_utilization:
            LOG.info('Not enough data yet - skipping to the next iteration')
            LOG.info('Skipped an iteration')
            return

        time_step = CONF.data_collector_interval
        migration_time = common.calculate_migration_time(
            vm_ram, CONF.network_migration_bandwidth)

        if 'underload_detection' not in state:
            underload_detection_params = common.parse_parameters(
                CONF.local_manager.algorithm_underload_detection_parameters)
            underload_detection = common.call_function_by_name(
                CONF.local_manager.algorithm_underload_detection_factory,
                [time_step,
                 migration_time,
                 underload_detection_params])
            state['underload_detection'] = underload_detection
            state['underload_detection_state'] = {}

            overload_detection_params = common.parse_parameters(
                CONF.local_manager.algorithm_overload_detection_parameters)
            overload_detection = common.call_function_by_name(
                CONF.local_manager.algorithm_overload_detection_factory,
                [time_step,
                 migration_time,
                 overload_detection_params])
            state['overload_detection'] = overload_detection
            state['overload_detection_state'] = {}

            vm_selection_params = common.parse_parameters(
                CONF.local_manager.algorithm_vm_selection_parameters)
            vm_selection = common.call_function_by_name(
                CONF.local_manager.algorithm_vm_selection_factory,
                [time_step,
                 migration_time,
                 vm_selection_params])
            state['vm_selection'] = vm_selection
            state['vm_selection_state'] = {}
        else:
            underload_detection = state['underload_detection']
            overload_detection = state['overload_detection']
            vm_selection = state['vm_selection']

        LOG.info('Started underload detection')
        underload, state['underload_detection_state'] = underload_detection(
            host_cpu_utilization, state['underload_detection_state'])
        LOG.info('Completed underload detection')

        LOG.info('Started overload detection')
        overload, state['overload_detection_state'] = overload_detection(
            host_cpu_utilization, state['overload_detection_state'])
        LOG.info('Completed overload detection')

        if underload:
            LOG.info('Underload detected')
            # TODO(xylan): send rpc message to global manager
        else:
            if overload:
                LOG.info('Overload detected')

                LOG.info('Started VM selection')
                vm_uuids, state['vm_selection_state'] = vm_selection(
                    vm_cpu_mhz, vm_ram, state['vm_selection_state'])
                LOG.info('Completed VM selection')

                LOG.info('Selected VMs to migrate: %s', str(vm_uuids))
                # TODO(xylan): send rpc message to global manager
            else:
                LOG.info('No underload or overload detected')

        LOG.info('Completed an iteration')
        self.state = state
Esempio n. 5
0
    def execute(self, ctx=None):
        """ Execute an iteration of the local manager.

        1. Read the data on resource usage by the VMs running on the host from
           the <local_data_directory>/vm directory.

        2. Call the function specified in the algorithm_underload_detection
           configuration option and pass the data on the resource usage by the
           VMs, as well as the frequency of the CPU as arguments.

        3. If the host is underloaded, send a request to the REST API of the
           global manager and pass a list of the UUIDs of all the VMs
           currently running on the host in the vm_uuids parameter, as well as
           the reason for migration as being 0.

        4. If the host is not underloaded, call the function specified in the
           algorithm_overload_detection configuration option and pass the data
           on the resource usage by the VMs, as well as the frequency of the
           host's CPU as arguments.

        5. If the host is overloaded, call the function specified in the
           algorithm_vm_selection configuration option and pass the data on
           the resource usage by the VMs, as well as the frequency of the
           host's CPU as arguments

        6. If the host is overloaded, send a request to the REST API of the
           global manager and pass a list of the UUIDs of the VMs selected by
           the VM selection algorithm in the vm_uuids parameter, as well as
           the reason for migration as being 1.

        """
        LOG.info('Started an iteration')
        state = self.state

        vm_path = common.build_local_vm_path(CONF.local_data_directory)
        vm_cpu_mhz = self.get_local_vm_data(vm_path)
        vm_ram = self.get_ram(state['vir_connection'], vm_cpu_mhz.keys())
        vm_cpu_mhz = self.cleanup_vm_data(vm_cpu_mhz, vm_ram.keys())

        if not vm_cpu_mhz:
            LOG.info('Skipped an iteration')
            return

        host_path = common.build_local_host_path(CONF.local_data_directory)
        host_cpu_mhz = self.get_local_host_data(host_path)

        host_cpu_utilization = self.vm_mhz_to_percentage(
            vm_cpu_mhz.values(),
            host_cpu_mhz,
            state['physical_cpu_mhz_total'])
        LOG.debug('The total physical CPU Mhz: %s',
                  str(state['physical_cpu_mhz_total']))
        LOG.debug('VM CPU MHz: %s', str(vm_cpu_mhz))
        LOG.debug('Host CPU MHz: %s', str(host_cpu_mhz))
        LOG.debug('CPU utilization: %s', str(host_cpu_utilization))

        if not host_cpu_utilization:
            LOG.info('Not enough data yet - skipping to the next iteration')
            LOG.info('Skipped an iteration')
            return

        time_step = CONF.data_collector_interval
        migration_time = common.calculate_migration_time(
            vm_ram, CONF.network_migration_bandwidth)

        if 'underload_detection' not in state:
            underload_detection_params = common.parse_parameters(
                CONF.local_manager.algorithm_underload_detection_parameters)
            underload_detection = common.call_function_by_name(
                CONF.local_manager.algorithm_underload_detection_factory,
                [time_step,
                 migration_time,
                 underload_detection_params])
            state['underload_detection'] = underload_detection
            state['underload_detection_state'] = {}

            overload_detection_params = common.parse_parameters(
                CONF.local_manager.algorithm_overload_detection_parameters)
            overload_detection = common.call_function_by_name(
                CONF.local_manager.algorithm_overload_detection_factory,
                [time_step,
                 migration_time,
                 overload_detection_params])
            state['overload_detection'] = overload_detection
            state['overload_detection_state'] = {}

            vm_selection_params = common.parse_parameters(
                CONF.local_manager.algorithm_vm_selection_parameters)
            vm_selection = common.call_function_by_name(
                CONF.local_manager.algorithm_vm_selection_factory,
                [time_step,
                 migration_time,
                 vm_selection_params])
            state['vm_selection'] = vm_selection
            state['vm_selection_state'] = {}
        else:
            underload_detection = state['underload_detection']
            overload_detection = state['overload_detection']
            vm_selection = state['vm_selection']

        LOG.info('Started underload detection')
        underload, state['underload_detection_state'] = underload_detection(
            host_cpu_utilization, state['underload_detection_state'])
        LOG.info('Completed underload detection')

        LOG.info('Started overload detection')
        overload, state['overload_detection_state'] = overload_detection(
            host_cpu_utilization, state['overload_detection_state'])
        LOG.info('Completed overload detection')

        if underload:
            LOG.info('Underload detected')
            # TODO(xylan): send rpc message to global manager
        else:
            if overload:
                LOG.info('Overload detected')

                LOG.info('Started VM selection')
                vm_uuids, state['vm_selection_state'] = vm_selection(
                    vm_cpu_mhz, vm_ram, state['vm_selection_state'])
                LOG.info('Completed VM selection')

                LOG.info('Selected VMs to migrate: %s', str(vm_uuids))
                # TODO(xylan): send rpc message to global manager
            else:
                LOG.info('No underload or overload detected')

        LOG.info('Completed an iteration')
        self.state = state
Esempio n. 6
0
    def execute(self, ctx=None):
        """Execute a data collection iteration.

        1. Read the names of the files from the <local_data_directory>/vm
           directory to determine the list of VMs running on the host at the
           last data collection.

        2. Call the Nova API to obtain the list of VMs that are currently
           active on the host.

        3. Compare the old and new lists of VMs and determine the newly added
           or removed VMs.

        4. Delete the files from the <local_data_directory>/vm directory
           corresponding to the VMs that have been removed from the host.

        5. Fetch the latest data_collector_data_length data values from the
           central database for each newly added VM using the database
           connection information specified in the sql_connection option and
           save the data in the <local_data_directory>/vm directory.

        6. Call the Libvirt API to obtain the CPU time for each VM active on
           the host. Transform the data obtained from the Libvirt API into the
           average MHz according to the frequency of the host's CPU and time
           interval from the previous data collection.

        8. Store the converted data in the <local_data_directory>/vm
           directory in separate files for each VM, and submit the data to the
           central database.
        """
        LOG.info('Started an iteration')
        state = self.state

        vm_path = common.build_local_vm_path(CONF.local_data_directory)
        host_path = common.build_local_host_path(CONF.local_data_directory)
        data_length = CONF.data_collector_data_length
        vms_previous = self.get_previous_vms(vm_path)
        vms_current = self.get_current_vms(state['vir_connection'])

        vms_added = self.get_added_vms(vms_previous, vms_current.keys())
        added_vm_data = dict()
        if vms_added:
            LOG.debug('Added VMs: %s', str(vms_added))
            for i, vm in enumerate(vms_added):
                if vms_current[vm] != libvirt.VIR_DOMAIN_RUNNING:
                    LOG.debug('Added VM %s not in running state', vm)
                    del vms_added[i]
                    del vms_current[vm]

            added_vm_data = self.fetch_remote_data(state['db'],
                                                   data_length,
                                                   vms_added)
            LOG.debug('Fetched remote data: %s', str(added_vm_data))
            self.write_vm_data_locally(vm_path, added_vm_data, data_length)

        vms_removed = self.get_removed_vms(vms_previous, vms_current.keys())
        if vms_removed:
            LOG.debug('Removed VMs: %s', str(vms_removed))
            self.cleanup_local_vm_data(vm_path, vms_removed)
            for vm in vms_removed:
                del state['previous_cpu_time'][vm]
                del state['previous_cpu_mhz'][vm]

        LOG.info('Started VM data collection')
        current_time = time.time()
        (cpu_time, cpu_mhz) = self.get_cpu_mhz(state['vir_connection'],
                                               state['physical_core_mhz'],
                                               state['previous_cpu_time'],
                                               state['previous_time'],
                                               current_time,
                                               vms_current.keys(),
                                               state['previous_cpu_mhz'],
                                               added_vm_data)
        LOG.info('Completed VM data collection')

        LOG.info('Started host data collection')
        (host_cpu_time_total, host_cpu_time_busy, host_cpu_mhz) = \
            self.get_host_cpu_mhz(
                state['physical_cpu_mhz'],
                state['previous_host_cpu_time_total'],
                state['previous_host_cpu_time_busy']
            )
        LOG.info('Completed host data collection')

        if state['previous_time'] > 0:
            self.append_vm_data_locally(vm_path, cpu_mhz, data_length)
            self.append_vm_data_remotely(state['db'], cpu_mhz)

            total_vms_cpu_mhz = sum(cpu_mhz.values())
            host_cpu_mhz_hypervisor = host_cpu_mhz - total_vms_cpu_mhz
            if host_cpu_mhz_hypervisor < 0:
                host_cpu_mhz_hypervisor = 0
            total_cpu_mhz = total_vms_cpu_mhz + host_cpu_mhz_hypervisor
            self.append_host_data_locally(host_path, host_cpu_mhz_hypervisor,
                                          data_length)
            self.append_host_data_remotely(state['db'],
                                           state['hostname'],
                                           host_cpu_mhz_hypervisor)

            LOG.debug('Collected VM CPU MHz: %s', str(cpu_mhz))
            LOG.debug('Collected total VMs CPU MHz: %s',
                      str(total_vms_cpu_mhz))
            LOG.debug('Collected hypervisor CPU MHz: %s',
                      str(host_cpu_mhz_hypervisor))
            LOG.debug('Collected host CPU MHz: %s', str(host_cpu_mhz))
            LOG.debug('Collected total CPU MHz: %s', str(total_cpu_mhz))

            state['previous_overload'] = self.log_host_overload(
                state['db'],
                state['host_cpu_overload_threshold'],
                state['hostname'],
                state['previous_overload'],
                state['physical_cpu_mhz'],
                total_cpu_mhz)

        state['previous_time'] = current_time
        state['previous_cpu_time'] = cpu_time
        state['previous_cpu_mhz'] = cpu_mhz
        state['previous_host_cpu_time_total'] = host_cpu_time_total
        state['previous_host_cpu_time_busy'] = host_cpu_time_busy

        LOG.info('Completed an iteration')
        self.state = state