def cleanup_all_local_data(path): """ Delete all the local data about VMs. :param path: A path to the local data directory. :type path: str """ vm_path = common.build_local_vm_path(path) cleanup_local_vm_data(vm_path, os.listdir(vm_path)) host_path = common.build_local_host_path(path) if os.access(host_path, os.F_OK): os.remove(host_path)
def build_local_host_path(x=str_(of='abc123_-/')): assert common.build_local_host_path(x) == os.path.join(x, 'host')
def execute(config, state): """ Execute a data collection iteration. 1. Read the names of the files from the <local_data_directory>/vm directory to determine the list of VMs running on the host at the last data collection. 2. Call the Nova API to obtain the list of VMs that are currently active on the host. 3. Compare the old and new lists of VMs and determine the newly added or removed VMs. 4. Delete the files from the <local_data_directory>/vm directory corresponding to the VMs that have been removed from the host. 5. Fetch the latest data_collector_data_length data values from the central database for each newly added VM using the database connection information specified in the sql_connection option and save the data in the <local_data_directory>/vm directory. 6. Call the Libvirt API to obtain the CPU time for each VM active on the host. Transform the data obtained from the Libvirt API into the average MHz according to the frequency of the host's CPU and time interval from the previous data collection. 8. Store the converted data in the <local_data_directory>/vm directory in separate files for each VM, and submit the data to the central database. :param config: A config dictionary. :type config: dict(str: *) :param state: A state dictionary. :type state: dict(str: *) :return: The updated state dictionary. :rtype: dict(str: *) """ log.info("Started an iteration") vm_path = common.build_local_vm_path(config["local_data_directory"]) host_path = common.build_local_host_path(config["local_data_directory"]) data_length = int(config["data_collector_data_length"]) vms_previous = get_previous_vms(vm_path) vms_current = get_current_vms(state["vir_connection"]) vms_added = get_added_vms(vms_previous, vms_current.keys()) added_vm_data = dict() if vms_added: if log.isEnabledFor(logging.DEBUG): log.debug("Added VMs: %s", str(vms_added)) for i, vm in enumerate(vms_added): if vms_current[vm] != libvirt.VIR_DOMAIN_RUNNING: del vms_added[i] del vms_current[vm] if log.isEnabledFor(logging.DEBUG): log.debug("Added VM %s skipped as migrating in", vm) added_vm_data = fetch_remote_data(state["db"], data_length, vms_added) if log.isEnabledFor(logging.DEBUG): log.debug("Fetched remote data: %s", str(added_vm_data)) write_vm_data_locally(vm_path, added_vm_data, data_length) vms_removed = get_removed_vms(vms_previous, vms_current.keys()) if vms_removed: if log.isEnabledFor(logging.DEBUG): log.debug("Removed VMs: %s", str(vms_removed)) cleanup_local_vm_data(vm_path, vms_removed) for vm in vms_removed: del state["previous_cpu_time"][vm] del state["previous_cpu_mhz"][vm] log.info("Started VM data collection") current_time = time.time() (cpu_time, cpu_mhz) = get_cpu_mhz( state["vir_connection"], state["physical_core_mhz"], state["previous_cpu_time"], state["previous_time"], current_time, vms_current.keys(), state["previous_cpu_mhz"], added_vm_data, ) log.info("Completed VM data collection") log.info("Started host data collection") (host_cpu_time_total, host_cpu_time_busy, host_cpu_mhz) = get_host_cpu_mhz( state["physical_cpu_mhz"], state["previous_host_cpu_time_total"], state["previous_host_cpu_time_busy"] ) log.info("Completed host data collection") if state["previous_time"] > 0: append_vm_data_locally(vm_path, cpu_mhz, data_length) append_vm_data_remotely(state["db"], cpu_mhz) total_vms_cpu_mhz = sum(cpu_mhz.values()) host_cpu_mhz_hypervisor = host_cpu_mhz - total_vms_cpu_mhz if host_cpu_mhz_hypervisor < 0: host_cpu_mhz_hypervisor = 0 total_cpu_mhz = total_vms_cpu_mhz + host_cpu_mhz_hypervisor append_host_data_locally(host_path, host_cpu_mhz_hypervisor, data_length) append_host_data_remotely(state["db"], state["hostname"], host_cpu_mhz_hypervisor) if log.isEnabledFor(logging.DEBUG): log.debug("Collected VM CPU MHz: %s", str(cpu_mhz)) log.debug("Collected total VMs CPU MHz: %s", str(total_vms_cpu_mhz)) log.debug("Collected hypervisor CPU MHz: %s", str(host_cpu_mhz_hypervisor)) log.debug("Collected host CPU MHz: %s", str(host_cpu_mhz)) log.debug("Collected total CPU MHz: %s", str(total_cpu_mhz)) state["previous_overload"] = log_host_overload( state["db"], state["host_cpu_overload_threshold"], state["hostname"], state["previous_overload"], state["physical_cpu_mhz"], total_cpu_mhz, ) state["previous_time"] = current_time state["previous_cpu_time"] = cpu_time state["previous_cpu_mhz"] = cpu_mhz state["previous_host_cpu_time_total"] = host_cpu_time_total state["previous_host_cpu_time_busy"] = host_cpu_time_busy log.info("Completed an iteration") return state
def execute(config, state): """ Execute an iteration of the local manager. 1. Read the data on resource usage by the VMs running on the host from the <local_data_directory>/vm directory. 2. Call the function specified in the algorithm_underload_detection configuration option and pass the data on the resource usage by the VMs, as well as the frequency of the CPU as arguments. 3. If the host is underloaded, send a request to the REST API of the global manager and pass a list of the UUIDs of all the VMs currently running on the host in the vm_uuids parameter, as well as the reason for migration as being 0. 4. If the host is not underloaded, call the function specified in the algorithm_overload_detection configuration option and pass the data on the resource usage by the VMs, as well as the frequency of the host's CPU as arguments. 5. If the host is overloaded, call the function specified in the algorithm_vm_selection configuration option and pass the data on the resource usage by the VMs, as well as the frequency of the host's CPU as arguments 6. If the host is overloaded, send a request to the REST API of the global manager and pass a list of the UUIDs of the VMs selected by the VM selection algorithm in the vm_uuids parameter, as well as the reason for migration as being 1. :param config: A config dictionary. :type config: dict(str: *) :param state: A state dictionary. :type state: dict(str: *) :return: The updated state dictionary. :rtype: dict(str: *) """ log.info('Started an iteration') vm_path = common.build_local_vm_path(config['local_data_directory']) vm_cpu_mhz = get_local_vm_data(vm_path) vm_ram = get_ram(state['vir_connection'], vm_cpu_mhz.keys()) vm_cpu_mhz = cleanup_vm_data(vm_cpu_mhz, vm_ram.keys()) if not vm_cpu_mhz: if log.isEnabledFor(logging.INFO): log.info('The host is idle') log.info('Skipped an iteration') return state host_path = common.build_local_host_path(config['local_data_directory']) host_cpu_mhz = get_local_host_data(host_path) host_cpu_utilization = vm_mhz_to_percentage( vm_cpu_mhz.values(), host_cpu_mhz, state['physical_cpu_mhz_total']) if log.isEnabledFor(logging.DEBUG): log.debug('The total physical CPU Mhz: %s', str(state['physical_cpu_mhz_total'])) log.debug('VM CPU MHz: %s', str(vm_cpu_mhz)) log.debug('Host CPU MHz: %s', str(host_cpu_mhz)) log.debug('CPU utilization: %s', str(host_cpu_utilization)) if not host_cpu_utilization: log.info('Not enough data yet - skipping to the next iteration') log.info('Skipped an iteration') return state time_step = int(config['data_collector_interval']) migration_time = common.calculate_migration_time( vm_ram, float(config['network_migration_bandwidth'])) if 'underload_detection' not in state: underload_detection_params = common.parse_parameters( config['algorithm_underload_detection_parameters']) underload_detection = common.call_function_by_name( config['algorithm_underload_detection_factory'], [time_step, migration_time, underload_detection_params]) state['underload_detection'] = underload_detection state['underload_detection_state'] = {} overload_detection_params = common.parse_parameters( config['algorithm_overload_detection_parameters']) overload_detection = common.call_function_by_name( config['algorithm_overload_detection_factory'], [time_step, migration_time, overload_detection_params]) state['overload_detection'] = overload_detection state['overload_detection_state'] = {} vm_selection_params = common.parse_parameters( config['algorithm_vm_selection_parameters']) vm_selection = common.call_function_by_name( config['algorithm_vm_selection_factory'], [time_step, migration_time, vm_selection_params]) state['vm_selection'] = vm_selection state['vm_selection_state'] = {} else: underload_detection = state['underload_detection'] overload_detection = state['overload_detection'] vm_selection = state['vm_selection'] if log.isEnabledFor(logging.INFO): log.info('Started underload detection') underload, state['underload_detection_state'] = underload_detection( host_cpu_utilization, state['underload_detection_state']) if log.isEnabledFor(logging.INFO): log.info('Completed underload detection') if log.isEnabledFor(logging.INFO): log.info('Started overload detection') overload, state['overload_detection_state'] = overload_detection( host_cpu_utilization, state['overload_detection_state']) if log.isEnabledFor(logging.INFO): log.info('Completed overload detection') if underload: if log.isEnabledFor(logging.INFO): log.info('Underload detected') try: r = requests.put('http://' + config['global_manager_host'] + ':' + config['global_manager_port'], {'username': state['hashed_username'], 'password': state['hashed_password'], 'time': time.time(), 'host': state['hostname'], 'reason': 0}) if log.isEnabledFor(logging.INFO): log.info('Received response: [%s] %s', r.status_code, r.content) except requests.exceptions.ConnectionError: log.exception('Exception at underload request:') else: if overload: if log.isEnabledFor(logging.INFO): log.info('Overload detected') log.info('Started VM selection') vm_uuids, state['vm_selection_state'] = vm_selection( vm_cpu_mhz, vm_ram, state['vm_selection_state']) log.info('Completed VM selection') if log.isEnabledFor(logging.INFO): log.info('Selected VMs to migrate: %s', str(vm_uuids)) try: r = requests.put('http://' + config['global_manager_host'] + ':' + config['global_manager_port'], {'username': state['hashed_username'], 'password': state['hashed_password'], 'time': time.time(), 'host': state['hostname'], 'reason': 1, 'vm_uuids': ','.join(vm_uuids)}) if log.isEnabledFor(logging.INFO): log.info('Received response: [%s] %s', r.status_code, r.content) except requests.exceptions.ConnectionError: log.exception('Exception at overload request:') else: if log.isEnabledFor(logging.INFO): log.info('No underload or overload detected') if log.isEnabledFor(logging.INFO): log.info('Completed an iteration') return state
def execute(config, state): """ Execute a data collection iteration. 1. Read the names of the files from the <local_data_directory>/vm directory to determine the list of VMs running on the host at the last data collection. 2. Call the Nova API to obtain the list of VMs that are currently active on the host. 3. Compare the old and new lists of VMs and determine the newly added or removed VMs. 4. Delete the files from the <local_data_directory>/vm directory corresponding to the VMs that have been removed from the host. 5. Fetch the latest data_collector_data_length data values from the central database for each newly added VM using the database connection information specified in the sql_connection option and save the data in the <local_data_directory>/vm directory. 6. Call the Libvirt API to obtain the CPU time for each VM active on the host. Transform the data obtained from the Libvirt API into the average MHz according to the frequency of the host's CPU and time interval from the previous data collection. 8. Store the converted data in the <local_data_directory>/vm directory in separate files for each VM, and submit the data to the central database. :param config: A config dictionary. :type config: dict(str: *) :param state: A state dictionary. :type state: dict(str: *) :return: The updated state dictionary. :rtype: dict(str: *) """ log.info('Started an iteration') vm_path = common.build_local_vm_path(config['local_data_directory']) host_path = common.build_local_host_path(config['local_data_directory']) data_length = int(config['data_collector_data_length']) vms_previous = get_previous_vms(vm_path) vms_current = get_current_vms(state['vir_connection']) vms_added = get_added_vms(vms_previous, vms_current.keys()) added_vm_data = dict() if vms_added: if log.isEnabledFor(logging.DEBUG): log.debug('Added VMs: %s', str(vms_added)) for i, vm in enumerate(vms_added): if vms_current[vm] != libvirt.VIR_DOMAIN_RUNNING: del vms_added[i] del vms_current[vm] if log.isEnabledFor(logging.DEBUG): log.debug('Added VM %s skipped as migrating in', vm) added_vm_data = fetch_remote_data(state['db'], data_length, vms_added) if log.isEnabledFor(logging.DEBUG): log.debug('Fetched remote data: %s', str(added_vm_data)) write_vm_data_locally(vm_path, added_vm_data, data_length) vms_removed = get_removed_vms(vms_previous, vms_current.keys()) if vms_removed: if log.isEnabledFor(logging.DEBUG): log.debug('Removed VMs: %s', str(vms_removed)) cleanup_local_vm_data(vm_path, vms_removed) for vm in vms_removed: del state['previous_cpu_time'][vm] del state['previous_cpu_mhz'][vm] log.info('Started VM data collection') current_time = time.time() (cpu_time, cpu_mhz) = get_cpu_mhz(state['vir_connection'], state['physical_core_mhz'], state['previous_cpu_time'], state['previous_time'], current_time, vms_current.keys(), state['previous_cpu_mhz'], added_vm_data) log.info('Completed VM data collection') log.info('Started host data collection') (host_cpu_time_total, host_cpu_time_busy, host_cpu_mhz) = get_host_cpu_mhz(state['physical_cpu_mhz'], state['previous_host_cpu_time_total'], state['previous_host_cpu_time_busy']) log.info('Completed host data collection') if state['previous_time'] > 0: append_vm_data_locally(vm_path, cpu_mhz, data_length) append_vm_data_remotely(state['db'], cpu_mhz) total_vms_cpu_mhz = sum(cpu_mhz.values()) host_cpu_mhz_hypervisor = host_cpu_mhz - total_vms_cpu_mhz if host_cpu_mhz_hypervisor < 0: host_cpu_mhz_hypervisor = 0 total_cpu_mhz = total_vms_cpu_mhz + host_cpu_mhz_hypervisor append_host_data_locally(host_path, host_cpu_mhz_hypervisor, data_length) append_host_data_remotely(state['db'], state['hostname'], host_cpu_mhz_hypervisor) if log.isEnabledFor(logging.DEBUG): log.debug('Collected VM CPU MHz: %s', str(cpu_mhz)) log.debug('Collected total VMs CPU MHz: %s', str(total_vms_cpu_mhz)) log.debug('Collected hypervisor CPU MHz: %s', str(host_cpu_mhz_hypervisor)) log.debug('Collected host CPU MHz: %s', str(host_cpu_mhz)) log.debug('Collected total CPU MHz: %s', str(total_cpu_mhz)) state['previous_overload'] = log_host_overload( state['db'], state['host_cpu_overload_threshold'], state['hostname'], state['previous_overload'], state['physical_cpu_mhz'], total_cpu_mhz) state['previous_time'] = current_time state['previous_cpu_time'] = cpu_time state['previous_cpu_mhz'] = cpu_mhz state['previous_host_cpu_time_total'] = host_cpu_time_total state['previous_host_cpu_time_busy'] = host_cpu_time_busy log.info('Completed an iteration') return state
def build_local_host_path( x=str_(of='abc123_-/') ): assert common.build_local_host_path(x) == os.path.join(x, 'host')