def test_build(self, get_os_version_mock, get_os_type_mock): get_os_type_mock.return_value = "suse" get_os_version_mock.return_value = "11" hardware = Hardware() result = hardware.get() osdisks = hardware.osdisks() for dev_item in result['mounts']: self.assertTrue(dev_item['available'] >= 0) self.assertTrue(dev_item['used'] >= 0) self.assertTrue(dev_item['percent'] != None) self.assertTrue(dev_item['device'] != None) self.assertTrue(dev_item['mountpoint'] != None) self.assertTrue(dev_item['type'] != None) self.assertTrue(dev_item['size'] > 0) for os_disk_item in osdisks: self.assertTrue(os_disk_item['available'] >= 0) self.assertTrue(os_disk_item['used'] >= 0) self.assertTrue(os_disk_item['percent'] != None) self.assertTrue(os_disk_item['device'] != None) self.assertTrue(os_disk_item['mountpoint'] != None) self.assertTrue(os_disk_item['type'] != None) self.assertTrue(os_disk_item['size'] > 0) self.assertTrue(len(result['mounts']) == len(osdisks))
def test_osdisks_parsing(self, shell_call_mock, isfile_mock, chk_writable_mount_mock): df_output =\ """Filesystem Type 1024-blocks Used Available Capacity Mounted on /dev/mapper/docker-253:0-4980899-d45c264d37ab18c8ed14f890f4d59ac2b81e1c52919eb36a79419787209515f3 xfs 31447040 1282384 30164656 5% / tmpfs tmpfs 32938336 4 32938332 1% /dev tmpfs tmpfs 32938336 0 32938336 0% /sys/fs/cgroup /dev/mapper/fedora-root ext4 224161316 12849696 199901804 7% /etc/resolv.conf /dev/mapper/fedora-root ext4 224161316 12849696 199901804 7% /etc/hostname /dev/mapper/fedora-root ext4 224161316 12849696 199901804 7% /etc/hosts shm tmpfs 65536 0 65536 0% /dev/shm /dev/mapper/fedora-root ext4 224161316 12849696 199901804 7% /run/secrets """ def isfile_side_effect(path): assume_files = ["/etc/resolv.conf", "/etc/hostname", "/etc/hosts"] return path in assume_files def chk_writable_mount_side_effect(path): assume_read_only = ["/run/secrets"] return path not in assume_read_only isfile_mock.side_effect = isfile_side_effect chk_writable_mount_mock.side_effect = chk_writable_mount_side_effect shell_call_mock.return_value = (0, df_output, '') result = Hardware.osdisks() self.assertEquals(1, len(result)) expected_mounts_left = ["/"] mounts_left = [item["mountpoint"] for item in result] self.assertEquals(expected_mounts_left, mounts_left)
def test_osdisks_blacklist(self, shell_call_mock, isfile_mock, chk_writable_mount_mock): df_output = \ """Filesystem Type 1024-blocks Used Available Capacity Mounted on /dev/mapper/docker-253:0-4980899-d45c264d37ab18c8ed14f890f4d59ac2b81e1c52919eb36a79419787209515f3 xfs 31447040 1282384 30164656 5% / tmpfs tmpfs 32938336 4 32938332 1% /dev tmpfs tmpfs 32938336 0 32938336 0% /sys/fs/cgroup /dev/mapper/fedora-root ext4 224161316 12849696 199901804 7% /etc/resolv.conf /dev/mapper/fedora-root ext4 224161316 12849696 199901804 7% /etc/hostname /dev/mapper/fedora-root ext4 224161316 12849696 199901804 7% /etc/hosts shm tmpfs 65536 0 65536 0% /dev/shm /dev/mapper/fedora-root ext4 224161316 12849696 199901804 7% /run/secrets /dev/mapper/fedora-root ext4 224161316 12849696 199901804 7% /mnt/blacklisted_mount /dev/mapper/fedora-root ext4 224161316 12849696 199901804 7% /mnt/blacklisted_mount/sub-dir """ def isfile_side_effect(path): assume_files = ["/etc/resolv.conf", "/etc/hostname", "/etc/hosts"] return path in assume_files def chk_writable_mount_side_effect(path): assume_read_only = ["/run/secrets"] return path not in assume_read_only isfile_mock.side_effect = isfile_side_effect chk_writable_mount_mock.side_effect = chk_writable_mount_side_effect config_dict = { "agent": { "ignore_mount_points": "/mnt/blacklisted_mount" } } shell_call_mock.return_value = (0, df_output, '') def conf_get(section, key, default=""): if section in config_dict and key in config_dict[section]: return config_dict[section][key] return default def has_option(section, key): return section in config_dict and key in config_dict[section] conf = Mock() attr = { 'get.side_effect': conf_get, 'has_option.side_effect': has_option } conf.configure_mock(**attr) result = Hardware.osdisks(conf) self.assertEquals(1, len(result)) expected_mounts_left = ["/"] mounts_left = [item["mountpoint"] for item in result] self.assertEquals(expected_mounts_left, mounts_left)
class HostStatusReporter(threading.Thread): """ The thread reports host status to server if it changed from previous report every 'host_status_report_interval' seconds. """ def __init__(self, initializer_module): self.initializer_module = initializer_module self.report_interval = initializer_module.config.host_status_report_interval self.stop_event = initializer_module.stop_event self.config = initializer_module.config self.host_info = HostInfo(initializer_module.config) self.last_report = {} self.server_responses_listener = initializer_module.server_responses_listener self.hardware = Hardware(config=initializer_module.config, cache_info=False) threading.Thread.__init__(self) def run(self): while not self.stop_event.is_set(): try: if self.initializer_module.is_registered: report = self.get_report() if self.initializer_module.is_registered and not Utils.are_dicts_equal( report, self.last_report, keys_to_skip=["agentTimeStampAtReporting"]): correlation_id = self.initializer_module.connection.send( message=report, destination=Constants.HOST_STATUS_REPORTS_ENDPOINT) self.server_responses_listener.listener_functions_on_success[ correlation_id] = lambda headers, message: self.save_last_report( report) except ConnectionIsAlreadyClosed: # server and agent disconnected during sending data. Not an issue pass except: logger.exception( "Exception in HostStatusReporter. Re-running it") self.stop_event.wait(self.report_interval) logger.info("HostStatusReporter has successfully finished") def save_last_report(self, report): self.last_report = report def get_report(self): host_info_dict = {} self.host_info.register(host_info_dict) report = { 'agentEnv': host_info_dict, 'mounts': self.hardware.osdisks(), } return report def clean_cache(self): self.last_report = {}
def build(self, id='-1', state_interval=-1, componentsMapped=False): global clusterId, clusterDefinitionRevision, firstContact timestamp = int(time.time()*1000) queueResult = self.actionQueue.result() nodeStatus = { "status" : "HEALTHY", "cause" : "NONE" } heartbeat = { 'responseId' : int(id), 'timestamp' : timestamp, 'hostname' : hostname(self.config), 'nodeStatus' : nodeStatus } rec_status = self.actionQueue.controller.recovery_manager.get_recovery_status() heartbeat['recoveryReport'] = rec_status commandsInProgress = False if not self.actionQueue.commandQueue.empty(): commandsInProgress = True if len(queueResult) != 0: heartbeat['reports'] = queueResult['reports'] heartbeat['componentStatus'] = queueResult['componentStatus'] if len(heartbeat['reports']) > 0: # There may be IN_PROGRESS tasks commandsInProgress = True pass # For first request/heartbeat assume no components are mapped if int(id) == 0: componentsMapped = False logger.info("Building Heartbeat: {responseId = %s, timestamp = %s, commandsInProgress = %s, componentsMapped = %s}", str(id), str(timestamp), repr(commandsInProgress), repr(componentsMapped)) if logger.isEnabledFor(logging.DEBUG): logger.debug("Heartbeat: %s", pformat(heartbeat)) hostInfo = HostInfo(self.config) if (int(id) >= 0) and state_interval > 0 and (int(id) % state_interval) == 0: nodeInfo = { } # for now, just do the same work as registration # this must be the last step before returning heartbeat hostInfo.register(nodeInfo, componentsMapped, commandsInProgress) heartbeat['agentEnv'] = nodeInfo mounts = Hardware.osdisks(self.config) heartbeat['mounts'] = mounts if logger.isEnabledFor(logging.DEBUG): logger.debug("agentEnv: %s", str(nodeInfo)) logger.debug("mounts: %s", str(mounts)) if self.collector is not None: heartbeat['alerts'] = self.collector.alerts() return heartbeat
def build(self, id='-1', state_interval=-1, componentsMapped=False): global clusterId, clusterDefinitionRevision, firstContact timestamp = int(time.time() * 1000) queueResult = self.actionQueue.result() nodeStatus = {"status": "HEALTHY", "cause": "NONE"} heartbeat = { 'responseId': int(id), 'timestamp': timestamp, 'hostname': hostname(self.config), 'nodeStatus': nodeStatus } commandsInProgress = False if not self.actionQueue.commandQueue.empty(): commandsInProgress = True if len(queueResult) != 0: heartbeat['reports'] = queueResult['reports'] heartbeat['componentStatus'] = queueResult['componentStatus'] if len(heartbeat['reports']) > 0: # There may be IN_PROGRESS tasks commandsInProgress = True pass # For first request/heartbeat assume no components are mapped if int(id) == 0: componentsMapped = False logger.info( "Building Heartbeat: {responseId = %s, timestamp = %s, commandsInProgress = %s, componentsMapped = %s}", str(id), str(timestamp), repr(commandsInProgress), repr(componentsMapped)) if logger.isEnabledFor(logging.DEBUG): logger.debug("Heartbeat: %s", pformat(heartbeat)) hostInfo = HostInfo(self.config) if (int(id) >= 0) and state_interval > 0 and (int(id) % state_interval) == 0: nodeInfo = {} # for now, just do the same work as registration # this must be the last step before returning heartbeat hostInfo.register(nodeInfo, componentsMapped, commandsInProgress) heartbeat['agentEnv'] = nodeInfo mounts = Hardware.osdisks() heartbeat['mounts'] = mounts if logger.isEnabledFor(logging.DEBUG): logger.debug("agentEnv: %s", str(nodeInfo)) logger.debug("mounts: %s", str(mounts)) if self.collector is not None: heartbeat['alerts'] = self.collector.alerts() return heartbeat
def test_build(self): hardware = Hardware() result = hardware.get() osdisks = hardware.osdisks() for dev_item in result['mounts']: self.assertTrue(dev_item['available'] >= 0) self.assertTrue(dev_item['used'] >= 0) self.assertTrue(dev_item['percent'] != None) self.assertTrue(dev_item['device'] != None) self.assertTrue(dev_item['mountpoint'] != None) self.assertTrue(dev_item['size'] > 0) for os_disk_item in osdisks: self.assertTrue(os_disk_item['available'] >= 0) self.assertTrue(os_disk_item['used'] >= 0) self.assertTrue(os_disk_item['percent'] != None) self.assertTrue(os_disk_item['device'] != None) self.assertTrue(os_disk_item['mountpoint'] != None) self.assertTrue(os_disk_item['size'] > 0) self.assertTrue(len(result['mounts']) == len(osdisks))
def test_build(self): hardware = Hardware(AmbariConfig().getConfig()) result = hardware.get() osdisks = hardware.osdisks() for dev_item in result['mounts']: self.assertTrue(dev_item['available'] >= 0) self.assertTrue(dev_item['used'] >= 0) self.assertTrue(dev_item['percent'] != None) self.assertTrue(dev_item['device'] != None) self.assertTrue(dev_item['mountpoint'] != None) self.assertTrue(dev_item['type'] != None) self.assertTrue(dev_item['size'] > 0) for os_disk_item in osdisks: self.assertTrue(os_disk_item['available'] >= 0) self.assertTrue(os_disk_item['used'] >= 0) self.assertTrue(os_disk_item['percent'] != None) self.assertTrue(os_disk_item['device'] != None) self.assertTrue(os_disk_item['mountpoint'] != None) self.assertTrue(os_disk_item['type'] != None) self.assertTrue(os_disk_item['size'] > 0) self.assertTrue(len(result['mounts']) == len(osdisks))
def test_osdisks_remote(self, communicate_mock, popen_mock, get_os_version_mock, get_os_type_mock): get_os_type_mock.return_value = "suse" get_os_version_mock.return_value = "11" Hardware.osdisks() popen_mock.assert_called_with(['timeout', '10', "df","-kPT"], stdout=-1) config = AmbariConfig() Hardware.osdisks(config) popen_mock.assert_called_with(['timeout', '10', "df","-kPT"], stdout=-1) config.add_section(AmbariConfig.AMBARI_PROPERTIES_CATEGORY) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY, "true") Hardware.osdisks(config) popen_mock.assert_called_with(['timeout', '10', "df","-kPT"], stdout=-1) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY, "false") Hardware.osdisks(config) popen_mock.assert_called_with(['timeout', '10', "df","-kPT", "-l"], stdout=-1) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "0") Hardware.osdisks(config) popen_mock.assert_called_with(['timeout', '10', "df","-kPT","-l"], stdout=-1) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "1") Hardware.osdisks(config) popen_mock.assert_called_with(["timeout","1","df","-kPT","-l"], stdout=-1) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "2") Hardware.osdisks(config) popen_mock.assert_called_with(["timeout","2","df","-kPT","-l"], stdout=-1)
def test_osdisks_remote(self, shell_call_mock, get_os_version_mock, get_os_type_mock): get_os_type_mock.return_value = "suse" get_os_version_mock.return_value = "11" Hardware.osdisks() timeout = 10 shell_call_mock.assert_called_with( ['timeout', str(timeout), "df", "-kPT"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout, quiet=True) config = AmbariConfig() Hardware.osdisks(config) shell_call_mock.assert_called_with( ['timeout', str(timeout), "df", "-kPT"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout, quiet=True) config.add_section(AmbariConfig.AMBARI_PROPERTIES_CATEGORY) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY, "true") Hardware.osdisks(config) shell_call_mock.assert_called_with( ['timeout', str(timeout), "df", "-kPT"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout, quiet=True) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY, "false") Hardware.osdisks(config) shell_call_mock.assert_called_with( ['timeout', str(timeout), "df", "-kPT", "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout, quiet=True) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "0") Hardware.osdisks(config) shell_call_mock.assert_called_with( ['timeout', str(timeout), "df", "-kPT", "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout, quiet=True) timeout = 1 config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, str(timeout)) Hardware.osdisks(config) shell_call_mock.assert_called_with( ['timeout', str(timeout), "df", "-kPT", "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout, quiet=True) timeout = 2 config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, str(timeout)) Hardware.osdisks(config) shell_call_mock.assert_called_with( ['timeout', str(timeout), "df", "-kPT", "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout, quiet=True)
def test_osdisks_remote(self, communicate_mock, popen_mock, get_os_version_mock, get_os_type_mock): get_os_type_mock.return_value = "suse" get_os_version_mock.return_value = "11" Hardware.osdisks() popen_mock.assert_called_with(['timeout', '10', "df", "-kPT"], stdout=-1) config = AmbariConfig() Hardware.osdisks(config) popen_mock.assert_called_with(['timeout', '10', "df", "-kPT"], stdout=-1) config.add_section(AmbariConfig.AMBARI_PROPERTIES_CATEGORY) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY, "true") Hardware.osdisks(config) popen_mock.assert_called_with(['timeout', '10', "df", "-kPT"], stdout=-1) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY, "false") Hardware.osdisks(config) popen_mock.assert_called_with(['timeout', '10', "df", "-kPT", "-l"], stdout=-1) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "0") Hardware.osdisks(config) popen_mock.assert_called_with(['timeout', '10', "df", "-kPT", "-l"], stdout=-1) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "1") Hardware.osdisks(config) popen_mock.assert_called_with(["timeout", "1", "df", "-kPT", "-l"], stdout=-1) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "2") Hardware.osdisks(config) popen_mock.assert_called_with(["timeout", "2", "df", "-kPT", "-l"], stdout=-1)
def build(self, id='-1', add_state=False, componentsMapped=False): global clusterId, clusterDefinitionRevision, firstContact timestamp = int(time.time() * 1000) queueResult = self.actionQueue.result() recovery_timestamp = self.actionQueue.controller.recovery_manager.recovery_timestamp nodeStatus = {"status": "HEALTHY", "cause": "NONE"} heartbeat = { 'responseId': int(id), 'timestamp': timestamp, 'hostname': hostname(self.config), 'nodeStatus': nodeStatus, 'recoveryTimestamp': recovery_timestamp } rec_status = self.actionQueue.controller.recovery_manager.get_recovery_status( ) heartbeat['recoveryReport'] = rec_status commandsInProgress = False if not self.actionQueue.commandQueue.empty(): commandsInProgress = True if len(queueResult) != 0: heartbeat['reports'] = queueResult['reports'] heartbeat['componentStatus'] = queueResult['componentStatus'] if len(heartbeat['reports']) > 0: # There may be IN_PROGRESS tasks commandsInProgress = True pass # For first request/heartbeat assume no components are mapped if int(id) == 0: componentsMapped = False logger.debug( "Building Heartbeat: {responseId = %s, timestamp = %s, " "commandsInProgress = %s, componentsMapped = %s," "recoveryTimestamp = %s}", str(id), str(timestamp), repr(commandsInProgress), repr(componentsMapped), str(recovery_timestamp)) logger.debug("Heartbeat: %s", pformat(heartbeat)) hostInfo = HostInfo(self.config) if add_state: logger.info("Adding host info/state to heartbeat message.") nodeInfo = {} # for now, just do the same work as registration # this must be the last step before returning heartbeat hostInfo.register(nodeInfo, componentsMapped, commandsInProgress) heartbeat['agentEnv'] = nodeInfo mounts = Hardware.osdisks(self.config) heartbeat['mounts'] = mounts logger.debug("agentEnv: %s", str(nodeInfo)) logger.debug("mounts: %s", str(mounts)) if self.collector is not None: heartbeat['alerts'] = self.collector.alerts() return heartbeat