def test_read_write_component(self): config = AmbariConfig().getConfig() tmpdir = tempfile.gettempdir() config.set('agent', 'prefix', tmpdir) tags1 = { "global": "version1", "core-site": "version2" } handler = ActualConfigHandler(config, {}) handler.write_actual(tags1) handler.write_actual_component('FOO', tags1) output1 = handler.read_actual_component('FOO') output2 = handler.read_actual_component('GOO') self.assertEquals(tags1, output1) self.assertEquals(None, output2) tags2 = { "global": "version1", "core-site": "version2" } handler.write_actual(tags2) output3 = handler.read_actual() output4 = handler.read_actual_component('FOO') self.assertEquals(tags2, output3) self.assertEquals(tags1, output4) os.remove(os.path.join(tmpdir, "FOO_" + ActualConfigHandler.CONFIG_NAME)) os.remove(os.path.join(tmpdir, ActualConfigHandler.CONFIG_NAME))
def test_osdisks_remote(self, communicate_mock, popen_mock, get_os_version_mock, get_os_type_mock): get_os_type_mock.return_value = "suse" get_os_version_mock.return_value = "11" Hardware.osdisks() popen_mock.assert_called_with(['timeout', '10', "df","-kPT"], stdout=-1) config = AmbariConfig() Hardware.osdisks(config) popen_mock.assert_called_with(['timeout', '10', "df","-kPT"], stdout=-1) config.add_section(AmbariConfig.AMBARI_PROPERTIES_CATEGORY) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY, "true") Hardware.osdisks(config) popen_mock.assert_called_with(['timeout', '10', "df","-kPT"], stdout=-1) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY, "false") Hardware.osdisks(config) popen_mock.assert_called_with(['timeout', '10', "df","-kPT", "-l"], stdout=-1) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "0") Hardware.osdisks(config) popen_mock.assert_called_with(['timeout', '10', "df","-kPT","-l"], stdout=-1) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "1") Hardware.osdisks(config) popen_mock.assert_called_with(["timeout","1","df","-kPT","-l"], stdout=-1) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "2") Hardware.osdisks(config) popen_mock.assert_called_with(["timeout","2","df","-kPT","-l"], stdout=-1)
def test_status_command_without_globals_section(self, stopped_method, read_stack_version_method): config = AmbariConfig().getConfig() config.set('agent', 'prefix', TestStackVersionsFileHandler.dummyVersionsFile) queue = ActionQueue(config) statusCommand = { "serviceName" : 'HDFS', "commandType" : "STATUS_COMMAND", "clusterName" : "", "componentName" : "DATANODE", 'configurations':{} } queue.stopped = stopped_method stopped_method.side_effect = [False, False, True, True, True] read_stack_version_method.return_value="1.3.0" queue.IDLE_SLEEP_TIME = 0.001 queue.put(statusCommand) queue.run() returned_result = queue.resultQueue.get() returned_result[1]['status'] = 'INSTALLED' # Patch live value self.assertEquals(returned_result, ('STATUS_COMMAND', {'clusterName': '', 'componentName': 'DATANODE', 'msg': '', 'serviceName': 'HDFS', 'stackVersion': '1.3.0', 'status': 'INSTALLED'}))
def test_registration_build(self, get_os_version_mock, get_os_type_mock, run_os_cmd_mock): config = AmbariConfig().getConfig() tmpdir = tempfile.gettempdir() config.set('agent', 'prefix', tmpdir) config.set('agent', 'current_ping_port', '33777') get_os_type_mock.return_value = "suse" get_os_version_mock.return_value = "11" run_os_cmd_mock.return_value = (3, "", "") ver_file = os.path.join(tmpdir, "version") with open(ver_file, "w") as text_file: text_file.write("1.3.0") register = Register(config) data = register.build(1) #print ("Register: " + pprint.pformat(data)) self.assertEquals(len(data['hardwareProfile']) > 0, True, "hardwareProfile should contain content") self.assertEquals(data['hostname'] != "", True, "hostname should not be empty") self.assertEquals(data['publicHostname'] != "", True, "publicHostname should not be empty") self.assertEquals(data['responseId'], 1) self.assertEquals(data['timestamp'] > 1353678475465L, True, "timestamp should not be empty") self.assertEquals(len(data['agentEnv']) > 0, True, "agentEnv should not be empty") self.assertEquals(data['agentVersion'], '1.3.0', "agentVersion should not be empty") print data['agentEnv']['umask'] self.assertEquals(not data['agentEnv']['umask']== "", True, "agents umask should not be empty") self.assertEquals(data['currentPingPort'] == 33777, True, "current ping port should be 33777") self.assertEquals(data['prefix'], config.get('agent', 'prefix'), 'The prefix path does not match') self.assertEquals(len(data), 9) os.remove(ver_file)
def test_build(self, read_actual_component_mock): for component in LiveStatus.COMPONENTS: config = AmbariConfig().getConfig() config.set('agent', 'prefix', "ambari_agent" + os.sep + "dummy_files") livestatus = LiveStatus('', component['serviceName'], component['componentName'], {}, config, {}) livestatus.versionsHandler.versionsFilePath = "ambari_agent" + os.sep + "dummy_files" + os.sep + "dummy_current_stack" result = livestatus.build() print "LiveStatus of {0}: {1}".format(component['serviceName'], str(result)) self.assertEquals(len(result) > 0, True, 'Livestatus should not be empty') if component['componentName'] == 'GANGLIA_SERVER': self.assertEquals(result['stackVersion'],'{"stackName":"HDP","stackVersion":"1.2.2"}', 'Livestatus should contain component stack version') # Test build status for CLIENT component (in LiveStatus.CLIENT_COMPONENTS) read_actual_component_mock.return_value = "some tags" livestatus = LiveStatus('c1', 'HDFS', 'HDFS_CLIENT', { }, config, {}) result = livestatus.build() self.assertTrue(len(result) > 0, 'Livestatus should not be empty') self.assertTrue(result.has_key('configurationTags')) # Test build status with forsed_component_status ## Alive livestatus = LiveStatus('c1', 'HDFS', 'HDFS_CLIENT', { }, config, {}) result = livestatus.build(forsed_component_status = LiveStatus.LIVE_STATUS) self.assertTrue(len(result) > 0, 'Livestatus should not be empty') self.assertTrue(result['status'], LiveStatus.LIVE_STATUS) ## Dead livestatus = LiveStatus('c1', 'HDFS', 'HDFS_CLIENT', { }, config, {}) result = livestatus.build(forsed_component_status = LiveStatus.DEAD_STATUS) self.assertTrue(len(result) > 0, 'Livestatus should not be empty') self.assertTrue(result['status'], LiveStatus.DEAD_STATUS) livestatus = LiveStatus('c1', 'TEZ', 'TEZ_CLIENT', { }, config, {}) result = livestatus.build(forsed_component_status = LiveStatus.LIVE_STATUS) self.assertTrue(len(result) > 0, 'Livestatus should not be empty') self.assertTrue(result['status'], LiveStatus.LIVE_STATUS)
def test_add_reg_listener_to_controller(self, FileCache_mock): FileCache_mock.return_value = None dummy_controller = MagicMock() config = AmbariConfig().getConfig() tempdir = tempfile.gettempdir() config.set('agent', 'prefix', tempdir) CustomServiceOrchestrator(config, dummy_controller) self.assertTrue(dummy_controller.registration_listeners.append.called)
def test_server_hostname(self): hostname.cached_server_hostname = None config = AmbariConfig() default_server_hostname = config.get('server', 'hostname') config.set('server', 'hostname', 'ambari-host') self.assertEquals('ambari-host', hostname.server_hostname(config), "hostname should equal the socket-based hostname") config.set('server', 'hostname', default_server_hostname) pass
def test_dump_command_to_json(self, FileCache_mock, unlink_mock, isfile_mock, hostname_mock, decompress_cluster_host_info_mock): FileCache_mock.return_value = None hostname_mock.return_value = "test.hst" command = { 'commandType': 'EXECUTION_COMMAND', 'role': u'DATANODE', 'roleCommand': u'INSTALL', 'commandId': '1-1', 'taskId': 3, 'clusterName': u'cc', 'serviceName': u'HDFS', 'configurations':{'global' : {}}, 'configurationTags':{'global' : { 'tag': 'v1' }}, 'clusterHostInfo':{'namenode_host' : ['1'], 'slave_hosts' : ['0', '1'], 'all_hosts' : ['h1.hortonworks.com', 'h2.hortonworks.com'], 'all_ping_ports': ['8670:0,1']}, 'hostLevelParams':{} } decompress_cluster_host_info_mock.return_value = {'namenode_host' : ['h2.hortonworks.com'], 'slave_hosts' : ['h1.hortonworks.com', 'h2.hortonworks.com'], 'all_hosts' : ['h1.hortonworks.com', 'h2.hortonworks.com'], 'all_ping_ports': ['8670', '8670']} config = AmbariConfig() tempdir = tempfile.gettempdir() config.set('agent', 'prefix', tempdir) dummy_controller = MagicMock() orchestrator = CustomServiceOrchestrator(config, dummy_controller) isfile_mock.return_value = True # Test dumping EXECUTION_COMMAND json_file = orchestrator.dump_command_to_json(command) self.assertTrue(os.path.exists(json_file)) self.assertTrue(os.path.getsize(json_file) > 0) if get_platform() != PLATFORM_WINDOWS: self.assertEqual(oct(os.stat(json_file).st_mode & 0777), '0600') self.assertTrue(json_file.endswith("command-3.json")) self.assertTrue(decompress_cluster_host_info_mock.called) os.unlink(json_file) # Test dumping STATUS_COMMAND command['commandType']='STATUS_COMMAND' decompress_cluster_host_info_mock.reset_mock() json_file = orchestrator.dump_command_to_json(command) self.assertTrue(os.path.exists(json_file)) self.assertTrue(os.path.getsize(json_file) > 0) if get_platform() != PLATFORM_WINDOWS: self.assertEqual(oct(os.stat(json_file).st_mode & 0777), '0600') self.assertTrue(json_file.endswith("status_command.json")) self.assertFalse(decompress_cluster_host_info_mock.called) os.unlink(json_file) # Testing side effect of dump_command_to_json self.assertEquals(command['public_hostname'], "test.hst") self.assertEquals(command['agentConfigParams']['agent']['parallel_execution'], 0) self.assertTrue(unlink_mock.called)
def run_simulation(): Controller.logger = MagicMock() sendRequest_method = MagicMock() tmpfile = tempfile.gettempdir() config = AmbariConfig().getConfig() config.set('agent', 'prefix', tmpfile) scriptsDir = os.path.join(os.getcwd(), os.pardir,os.pardir, os.pardir, 'main', 'upgrade_stack') config.set('stack', 'upgradeScriptsDir', scriptsDir) ver_file = os.path.join(tmpfile, "version") with open(ver_file, "w") as text_file: text_file.write(agent_version) controller = Controller.Controller(config) controller.sendRequest = sendRequest_method controller.netutil.HEARTBEAT_IDDLE_INTERVAL_SEC = 0.1 controller.netutil.HEARTBEAT_NOT_IDDLE_INTERVAL_SEC = 0.1 controller.range = 1 for responce in responces: queue.put(responce) def send_stub(url, data): logger.info("Controller sends data to %s :" % url) logger.info(pprint.pformat(data)) if not queue.empty(): responce = queue.get() else: responce = responces[-1] logger.info("There is no predefined responce available, sleeping for 30 sec") time.sleep(30) responce = json.loads(responce) responseId.inc() responce["responseId"] = responseId.val() responce = json.dumps(responce) logger.info("Returning data to Controller:" + responce) return responce sendRequest_method.side_effect = send_stub logger.setLevel(logging.DEBUG) formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d - \ %(message)s") stream_handler = logging.StreamHandler() stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) logger.info("Starting") controller.start() controller.actionQueue.IDLE_SLEEP_TIME = 0.1 controller.run()
def test_server_hostnames(self): hostname.cached_server_hostnames = [] config = AmbariConfig() default_server_hostname = config.get('server', 'hostname') config.set('server', 'hostname', 'ambari-host') server_hostnames = hostname.server_hostnames(config) self.assertEquals(['ambari-host'], server_hostnames, "expected host name ['ambari-host']; got {0}".format(server_hostnames)) config.set('server', 'hostname', default_server_hostname) pass
def test_ambari_config_get(self): config = AmbariConfig() #default self.assertEqual(config.get("security", "keysdir"), "/tmp/ambari-agent") #non-default config.set("security", "keysdir", "/tmp/non-default-path") self.assertEqual(config.get("security", "keysdir"), "/tmp/non-default-path") #whitespace handling config.set("security", "keysdir", " /tmp/non-stripped") self.assertEqual(config.get("security", "keysdir"), "/tmp/non-stripped")
def test_process_command(self, execute_status_command_mock, execute_command_mock, print_exc_mock): dummy_controller = MagicMock() config = AmbariConfig() config.set('agent', 'tolerate_download_failures', "true") actionQueue = ActionQueue(config, dummy_controller) execution_command = { 'commandType' : ActionQueue.EXECUTION_COMMAND, } status_command = { 'commandType' : ActionQueue.STATUS_COMMAND, } wrong_command = { 'commandType' : "SOME_WRONG_COMMAND", } # Try wrong command actionQueue.process_command(wrong_command) self.assertFalse(execute_command_mock.called) self.assertFalse(execute_status_command_mock.called) self.assertFalse(print_exc_mock.called) execute_command_mock.reset_mock() execute_status_command_mock.reset_mock() print_exc_mock.reset_mock() # Try normal execution actionQueue.process_command(execution_command) self.assertTrue(execute_command_mock.called) self.assertFalse(execute_status_command_mock.called) self.assertFalse(print_exc_mock.called) execute_command_mock.reset_mock() execute_status_command_mock.reset_mock() print_exc_mock.reset_mock() actionQueue.process_command(status_command) self.assertFalse(execute_command_mock.called) self.assertTrue(execute_status_command_mock.called) self.assertFalse(print_exc_mock.called) execute_command_mock.reset_mock() execute_status_command_mock.reset_mock() print_exc_mock.reset_mock() # Try exception to check proper logging def side_effect(self): raise Exception("TerribleException") execute_command_mock.side_effect = side_effect actionQueue.process_command(execution_command) self.assertTrue(print_exc_mock.called) print_exc_mock.reset_mock() execute_status_command_mock.side_effect = side_effect actionQueue.process_command(execution_command) self.assertTrue(print_exc_mock.called)
def test_read_write(self): config = AmbariConfig().getConfig() tmpdir = tempfile.gettempdir() config.set('agent', 'prefix', tmpdir) tags = { "global": "version1", "core-site": "version2" } handler = ActualConfigHandler(config, tags) handler.write_actual(tags) output = handler.read_actual() self.assertEquals(tags, output) os.remove(os.path.join(tmpdir, ActualConfigHandler.CONFIG_NAME))
def test_read_agent_version(self, get_os_version_mock, get_os_type_mock): config = AmbariConfig().getConfig() tmpdir = tempfile.gettempdir() config.set('agent', 'prefix', tmpdir) config.set('agent', 'current_ping_port', '33777') ver_file = os.path.join(tmpdir, "version") reference_version = "1.3.0" with open(ver_file, "w") as text_file: text_file.write(reference_version) version = self.controller.read_agent_version(config) os.remove(ver_file) self.assertEqual(reference_version, version)
def test_command_in_progress(self): config = AmbariConfig().getConfig() tmpfile = tempfile.gettempdir() config.set('agent', 'prefix', tmpfile) actionQueue = ActionQueue(config) actionQueue.IDLE_SLEEP_TIME = 0.01 executor_started_event = threading.Event() end_executor_event = threading.Event() actionQueue.puppetExecutor = FakeExecutor(executor_started_event, end_executor_event) before_start_result = actionQueue.result() command = { 'commandId': 17, 'role' : "role", 'taskId' : "taskId", 'clusterName' : "clusterName", 'serviceName' : "serviceName", 'status' : 'IN_PROGRESS', 'hostname' : "localhost.localdomain", 'hostLevelParams': "hostLevelParams", 'clusterHostInfo': "clusterHostInfo", 'roleCommand': "roleCommand", 'configurations': "configurations", 'commandType': "EXECUTION_COMMAND", 'configurations':{'global' : {}} } actionQueue.put(command) actionQueue.start() executor_started_event.wait() #print ("ii: " + pprint.pformat(actionQueue.commandInProgress)) in_progress_result = actionQueue.result() end_executor_event.set() actionQueue.stop() actionQueue.join() after_start_result = actionQueue.result() self.assertEquals(len(before_start_result['componentStatus']), 0) self.assertEquals(len(before_start_result['reports']), 0) self.assertEquals(len(in_progress_result['componentStatus']), 0) self.assertEquals(len(in_progress_result['reports']), 1) self.assertEquals(in_progress_result['reports'][0]['status'], "IN_PROGRESS") self.assertEquals(in_progress_result['reports'][0]['stdout'], "Dummy output") self.assertEquals(in_progress_result['reports'][0]['exitCode'], 777) self.assertEquals(in_progress_result['reports'][0]['stderr'], 'Dummy err') self.assertEquals(len(after_start_result['componentStatus']), 0) self.assertEquals(len(after_start_result['reports']), 1) self.assertEquals(after_start_result['reports'][0]['status'], "COMPLETED") self.assertEquals(after_start_result['reports'][0]['stdout'], "returned stdout") self.assertEquals(after_start_result['reports'][0]['exitCode'], 0) self.assertEquals(after_start_result['reports'][0]['stderr'], 'returned stderr')
def test_build(self): for component in LiveStatus.COMPONENTS: config = AmbariConfig().getConfig() config.set('agent', 'prefix', "dummy_files") livestatus = LiveStatus('', component['serviceName'], component['componentName'], {}, config) livestatus.versionsHandler.versionsFilePath = os.path.join("dummy_files","dummy_current_stack") result = livestatus.build() print "LiveStatus of {0}: {1}".format(component['serviceName'], str(result)) self.assertEquals(len(result) > 0, True, 'Livestatus should not be empty') if component['componentName'] == 'GANGLIA_SERVER': self.assertEquals(result['stackVersion'],'{"stackName":"HDP","stackVersion":"1.2.2"}', 'Livestatus should contain component stack version')
def test_read_empty(self): config = AmbariConfig().getConfig() tmpdir = tempfile.gettempdir() config.set('agent', 'prefix', tmpdir) handler = ActualConfigHandler(config, {}) conf_file = open(os.path.join(tmpdir, ActualConfigHandler.CONFIG_NAME), 'w') conf_file.write("") conf_file.close() output = handler.read_actual() self.assertEquals(None, output) os.remove(os.path.join(tmpdir, ActualConfigHandler.CONFIG_NAME))
def test_update_log_level(self, basicConfig_mock, setLevel_mock): config = AmbariConfig().getConfig() # Testing with default setup (config file does not contain loglevel entry) # Log level should not be changed config.set('agent', 'loglevel', None) main.update_log_level(config) self.assertFalse(setLevel_mock.called) setLevel_mock.reset_mock() # Testing debug mode config.set('agent', 'loglevel', 'DEBUG') main.update_log_level(config) setLevel_mock.assert_called_with(logging.DEBUG) setLevel_mock.reset_mock() # Testing any other mode config.set('agent', 'loglevel', 'INFO') main.update_log_level(config) setLevel_mock.assert_called_with(logging.INFO) setLevel_mock.reset_mock() config.set('agent', 'loglevel', 'WRONG') main.update_log_level(config) setLevel_mock.assert_called_with(logging.INFO)
def test_configtags(self): config = AmbariConfig().getConfig() tmpfile = tempfile.gettempdir() config.set('agent', 'prefix', tmpfile) actionQueue = ActionQueue(config) actionQueue.IDLE_SLEEP_TIME = 0.01 executor_started_event = threading.Event() end_executor_event = threading.Event() actionQueue.puppetExecutor = FakeExecutor(executor_started_event, end_executor_event) command = { 'commandId': 17, 'role' : "role", 'taskId' : "taskId", 'clusterName' : "clusterName", 'serviceName' : "serviceName", 'status' : 'IN_PROGRESS', 'hostname' : "localhost.localdomain", 'hostLevelParams': "hostLevelParams", 'clusterHostInfo': "clusterHostInfo", 'roleCommand': "roleCommand", 'configurations': "configurations", 'commandType': "EXECUTION_COMMAND", 'configurations':{'global' : {}}, 'configurationTags':{'global' : { 'tag': 'v1' }} } actionQueue.put(command) actionQueue.start() executor_started_event.wait() end_executor_event.set() actionQueue.stop() actionQueue.join() after_start_result = actionQueue.result() configname = os.path.join(tmpfile, 'config.json') self.assertEquals(len(after_start_result['componentStatus']), 0) self.assertEquals(len(after_start_result['reports']), 1) self.assertEquals(after_start_result['reports'][0]['status'], "COMPLETED") self.assertEquals(after_start_result['reports'][0]['stdout'], "returned stdout") self.assertEquals(after_start_result['reports'][0]['exitCode'], 0) self.assertEquals(after_start_result['reports'][0]['stderr'], 'returned stderr') self.assertEquals(len(after_start_result['reports'][0]['configurationTags']), 1) self.assertEquals(True, os.path.isfile(configname)) os.remove(configname)
def test_write_empty_client_components(self, write_file_mock): config = AmbariConfig().getConfig() tmpdir = tempfile.gettempdir() config.set('agent', 'prefix', tmpdir) tags0 = {"global": "version0", "core-site": "version0"} tags1 = {"global": "version1", "core-site": "version2"} tags2 = {"global": "version33", "core-site": "version33"} clientsToUpdateConfigs1 = [] configTags = {'HDFS_CLIENT': tags0, 'HBASE_CLIENT': tags1} handler = ActualConfigHandler(config, configTags) self.assertEquals(tags0, handler.read_actual_component('HDFS_CLIENT')) self.assertEquals(tags1, handler.read_actual_component('HBASE_CLIENT')) handler.write_client_components('HDFS', tags2, clientsToUpdateConfigs1) self.assertEquals(tags0, handler.read_actual_component('HDFS_CLIENT')) self.assertEquals(tags1, handler.read_actual_component('HBASE_CLIENT')) self.assertFalse(write_file_mock.called)
def test_unregistration_build(self): config = AmbariConfig().getConfig() tmpdir = tempfile.gettempdir() config.set('agent', 'prefix', tmpdir) ver_file = os.path.join(tmpdir, "version") with open(ver_file, "w") as text_file: text_file.write("1.3.0") register = Unregister(config) data = register.build(1) self.assertEquals(data['hostname'] != "", True, "hostname should not be empty") self.assertEquals(data['responseId'], 1) self.assertEquals(data['timestamp'] > 1353678475465L, True, "timestamp should not be empty") self.assertEquals(data['agentVersion'], '1.3.0', "agentVersion should not be empty") self.assertEquals(len(data), 4) os.remove(ver_file)
def test_read_actual_component_inmemory(self, read_file_mock, write_file_mock): config = AmbariConfig().getConfig() tmpdir = tempfile.gettempdir() config.set('agent', 'prefix', tmpdir) tags1 = { "global": "version1", "core-site": "version2" } read_file_mock.return_value = tags1 handler = ActualConfigHandler(config, {}) handler.write_actual_component('NAMENODE', tags1) self.assertTrue(write_file_mock.called) self.assertEquals(tags1, handler.read_actual_component('NAMENODE')) self.assertFalse(read_file_mock.called) self.assertEquals(tags1, handler.read_actual_component('DATANODE')) self.assertTrue(read_file_mock.called) self.assertEquals(1, read_file_mock.call_count) self.assertEquals(tags1, handler.read_actual_component('DATANODE')) self.assertEquals(1, read_file_mock.call_count)
def test_server_hostname_override(self): hostname.cached_server_hostname = None fd = tempfile.mkstemp(text=True) tmpname = fd[1] os.close(fd[0]) os.chmod(tmpname, os.stat(tmpname).st_mode | stat.S_IXUSR) tmpfile = file(tmpname, "w+") config = AmbariConfig() try: tmpfile.write("#!/bin/sh\n\necho 'test.example.com'") tmpfile.close() config.set('server', 'hostname_script', tmpname) self.assertEquals(hostname.server_hostname(config), 'test.example.com', "expected hostname 'test.example.com'") finally: os.remove(tmpname) config.remove_option('server', 'hostname_script') pass
def test_build_predefined(self, getStatus_mock, read_actual_component_mock): read_actual_component_mock.return_value = "actual_component" """ Tests that if live status us defined (using default parameter), then no StatusCheck is executed """ config = AmbariConfig().getConfig() config.set('agent', 'prefix', "ambari_agent" + os.sep + "dummy_files") livestatus = LiveStatus('', 'SOME_UNKNOWN_SERVICE', 'SOME_UNKNOWN_COMPONENT', {}, config, {}) livestatus.versionsHandler.versionsFilePath = "ambari_agent" + \ os.sep + "dummy_files" + os.sep + "dummy_current_stack" result = livestatus.build(forsed_component_status = "STARTED") result_str = pprint.pformat(result) self.assertEqual(result_str, "{'clusterName': '',\n " "'componentName': 'SOME_UNKNOWN_COMPONENT',\n " "'configurationTags': 'actual_component',\n " "'msg': '',\n 'serviceName': 'SOME_UNKNOWN_SERVICE',\n " "'stackVersion': '',\n 'status': 'STARTED'}") self.assertFalse(getStatus_mock.called)
def test_registration_build(self, get_os_version_mock, get_os_type_mock, run_os_cmd_mock): config = AmbariConfig().getConfig() tmpdir = tempfile.gettempdir() config.set('agent', 'prefix', tmpdir) config.set('agent', 'current_ping_port', '33777') get_os_type_mock.return_value = "suse" get_os_version_mock.return_value = "11" run_os_cmd_mock.return_value = (3, "", "") ver_file = os.path.join(tmpdir, "version") with open(ver_file, "w") as text_file: text_file.write("1.3.0") register = Register(config) data = register.build(1) #print ("Register: " + pprint.pformat(data)) self.assertEquals( len(data['hardwareProfile']) > 0, True, "hardwareProfile should contain content") self.assertEquals(data['hostname'] != "", True, "hostname should not be empty") self.assertEquals(data['publicHostname'] != "", True, "publicHostname should not be empty") self.assertEquals(data['responseId'], 1) self.assertEquals(data['timestamp'] > 1353678475465L, True, "timestamp should not be empty") self.assertEquals( len(data['agentEnv']) > 0, True, "agentEnv should not be empty") self.assertEquals(data['agentVersion'], '1.3.0', "agentVersion should not be empty") print data['agentEnv']['umask'] self.assertEquals(not data['agentEnv']['umask'] == "", True, "agents umask should not be empty") self.assertEquals(data['currentPingPort'] == 33777, True, "current ping port should be 33777") self.assertEquals(data['prefix'], config.get('agent', 'prefix'), 'The prefix path does not match') self.assertEquals(len(data), 9) os.remove(ver_file)
def test_update_log_level(self, basicConfig_mock, setLevel_mock): config = AmbariConfig().getConfig() # Testing with default setup (config file does not contain loglevel entry) # Log level should not be changed main.update_log_level(config) self.assertFalse(setLevel_mock.called) setLevel_mock.reset_mock() # Testing debug mode config.set('agent', 'loglevel', 'DEBUG') main.update_log_level(config) setLevel_mock.assert_called_with(logging.DEBUG) setLevel_mock.reset_mock() # Testing any other mode config.set('agent', 'loglevel', 'INFO') main.update_log_level(config) setLevel_mock.assert_called_with(logging.INFO) setLevel_mock.reset_mock() config.set('agent', 'loglevel', 'WRONG') main.update_log_level(config) setLevel_mock.assert_called_with(logging.INFO)
def test_build_predefined(self, getStatus_mock, read_actual_component_mock): read_actual_component_mock.return_value = "actual_component" """ Tests that if live status us defined (using default parameter), then no StatusCheck is executed """ config = AmbariConfig().getConfig() config.set('agent', 'prefix', "ambari_agent" + os.sep + "dummy_files") livestatus = LiveStatus('', 'SOME_UNKNOWN_SERVICE', 'SOME_UNKNOWN_COMPONENT', {}, config, {}) livestatus.versionsHandler.versionsFilePath = "ambari_agent" + \ os.sep + "dummy_files" + os.sep + "dummy_current_stack" result = livestatus.build(forsed_component_status="STARTED") result_str = pprint.pformat(result) self.assertEqual( result_str, "{'clusterName': '',\n " "'componentName': 'SOME_UNKNOWN_COMPONENT',\n " "'configurationTags': 'actual_component',\n " "'msg': '',\n 'serviceName': 'SOME_UNKNOWN_SERVICE',\n " "'stackVersion': '',\n 'status': 'STARTED'}") self.assertFalse(getStatus_mock.called)
def test_ambari_config_get(self): config = AmbariConfig() #default self.assertEqual(config.get("security", "keysdir"), "/tmp/ambari-agent") #non-default config.set("security", "keysdir", "/tmp/non-default-path") self.assertEqual(config.get("security", "keysdir"), "/tmp/non-default-path") #whitespace handling config.set("security", "keysdir", " /tmp/non-stripped") self.assertEqual(config.get("security", "keysdir"), "/tmp/non-stripped") # test default value open_files_ulimit = config.get_ulimit_open_files() self.assertEqual(open_files_ulimit, 0) # set a value open_files_ulimit = 128000 config.set_ulimit_open_files(open_files_ulimit) self.assertEqual(config.get_ulimit_open_files(), open_files_ulimit)
def test_execute_python_executor(self, read_stack_version_mock, resolve_script_path_mock, get_py_executor_mock): dummy_controller = MagicMock() cfg = AmbariConfig() cfg.set('agent', 'tolerate_download_failures', 'true') cfg.set('agent', 'prefix', '.') cfg.set('agent', 'cache_dir', 'background_tasks') actionQueue = ActionQueue(cfg, dummy_controller) pyex = PythonExecutor(actionQueue.customServiceOrchestrator.tmp_dir, actionQueue.customServiceOrchestrator.config) patch_output_file(pyex) get_py_executor_mock.return_value = pyex actionQueue.customServiceOrchestrator.dump_command_to_json = MagicMock( ) result = {} lock = threading.RLock() complete_done = threading.Condition(lock) def command_complete_w(process_condensed_result, handle): with lock: result['command_complete'] = { 'condensed_result': copy.copy(process_condensed_result), 'handle': copy.copy(handle), 'command_status': actionQueue.commandStatuses.get_command_status( handle.command['taskId']) } complete_done.notifyAll() actionQueue.on_background_command_complete_callback = wraped( actionQueue.on_background_command_complete_callback, None, command_complete_w) actionQueue.put([self.background_command]) actionQueue.processBackgroundQueueSafeEmpty() actionQueue.processStatusCommandQueueSafeEmpty() with lock: complete_done.wait(0.1) finished_status = result['command_complete']['command_status'] self.assertEqual(finished_status['status'], ActionQueue.COMPLETED_STATUS) self.assertEqual(finished_status['stdout'], 'process_out') self.assertEqual(finished_status['stderr'], 'process_err') self.assertEqual(finished_status['exitCode'], 0) runningCommand = actionQueue.commandStatuses.current_state.get( self.background_command['taskId']) self.assertTrue(runningCommand is not None) report = actionQueue.result() self.assertEqual(len(report['reports']), 1) self.assertEqual(report['reports'][0]['stdout'], 'process_out')
def test_server_hostnames_multiple_override(self): hostname.cached_server_hostnames = [] fd = tempfile.mkstemp(text=True) tmpname = fd[1] os.close(fd[0]) os.chmod(tmpname, os.stat(tmpname).st_mode | stat.S_IXUSR) tmpfile = file(tmpname, "w+") config = AmbariConfig() try: tmpfile.write("#!/bin/sh\n\necho 'host1.example.com, host2.example.com, host3.example.com'") tmpfile.close() config.set('server', 'hostname_script', tmpname) expected_hostnames = ['host1.example.com', 'host2.example.com', 'host3.example.com'] server_hostnames = hostname.server_hostnames(config) self.assertEquals(server_hostnames, expected_hostnames, "expected hostnames {0}; got {1}".format(expected_hostnames, server_hostnames)) finally: os.remove(tmpname) config.remove_option('server', 'hostname_script') pass
def test_hostname_override(self): hostname.cached_hostname = None hostname.cached_public_hostname = None fd = tempfile.mkstemp(text=True) tmpname = fd[1] os.close(fd[0]) os.chmod(tmpname, os.stat(tmpname).st_mode | stat.S_IXUSR) tmpfile = file(tmpname, "w+") config = AmbariConfig() try: tmpfile.write("#!/bin/sh\n\necho 'test.example.com'") tmpfile.close() config.set('agent', 'hostname_script', tmpname) self.assertEquals(hostname.hostname(config), 'test.example.com', "expected hostname 'test.example.com'") finally: os.remove(tmpname) config.remove_option('agent', 'hostname_script') pass
def test_write_actual_component_and_client_components(self): config = AmbariConfig().getConfig() tmpdir = tempfile.gettempdir() config.set('agent', 'prefix', tmpdir) tags1 = { "global": "version1", "core-site": "version2" } tags2 = { "global": "version33", "core-site": "version33" } handler = ActualConfigHandler(config, {}) handler.write_actual_component('HDFS_CLIENT', tags1) handler.write_actual_component('HBASE_CLIENT', tags1) self.assertEquals(tags1, handler.read_actual_component('HDFS_CLIENT')) self.assertEquals(tags1, handler.read_actual_component('HBASE_CLIENT')) handler.write_actual_component('DATANODE', tags2) self.assertEquals(tags2, handler.read_actual_component('DATANODE')) self.assertEquals(tags1, handler.read_actual_component('HDFS_CLIENT')) handler.write_client_components('HDFS', tags2) self.assertEquals(tags2, handler.read_actual_component('HDFS_CLIENT')) self.assertEquals(tags1, handler.read_actual_component('HBASE_CLIENT')) os.remove(os.path.join(tmpdir, "DATANODE_" + ActualConfigHandler.CONFIG_NAME)) os.remove(os.path.join(tmpdir, "HBASE_CLIENT_" + ActualConfigHandler.CONFIG_NAME)) os.remove(os.path.join(tmpdir, "HDFS_CLIENT_" + ActualConfigHandler.CONFIG_NAME))
def test_build(self, read_actual_component_mock): for component in LiveStatus.COMPONENTS: config = AmbariConfig().getConfig() config.set('agent', 'prefix', "ambari_agent" + os.sep + "dummy_files") livestatus = LiveStatus('', component['serviceName'], component['componentName'], {}, config) livestatus.versionsHandler.versionsFilePath = "ambari_agent" + os.sep + "dummy_files" + os.sep + "dummy_current_stack" result = livestatus.build() print "LiveStatus of {0}: {1}".format(component['serviceName'], str(result)) self.assertEquals( len(result) > 0, True, 'Livestatus should not be empty') if component['componentName'] == 'GANGLIA_SERVER': self.assertEquals( result['stackVersion'], '{"stackName":"HDP","stackVersion":"1.2.2"}', 'Livestatus should contain component stack version') # Test build status for CLIENT component (in LiveStatus.CLIENT_COMPONENTS) read_actual_component_mock.return_value = "some tags" livestatus = LiveStatus('c1', 'HDFS', 'HDFS_CLIENT', {}, config) result = livestatus.build() self.assertTrue(len(result) > 0, 'Livestatus should not be empty') self.assertTrue(result.has_key('configurationTags')) # Test build status with forsed_component_status ## Alive livestatus = LiveStatus('c1', 'HDFS', 'HDFS_CLIENT', {}, config) result = livestatus.build( forsed_component_status=LiveStatus.LIVE_STATUS) self.assertTrue(len(result) > 0, 'Livestatus should not be empty') self.assertTrue(result['status'], LiveStatus.LIVE_STATUS) ## Dead livestatus = LiveStatus('c1', 'HDFS', 'HDFS_CLIENT', {}, config) result = livestatus.build( forsed_component_status=LiveStatus.DEAD_STATUS) self.assertTrue(len(result) > 0, 'Livestatus should not be empty') self.assertTrue(result['status'], LiveStatus.DEAD_STATUS)
def test_write_actual_component_and_client_components(self): config = AmbariConfig().getConfig() tmpdir = tempfile.gettempdir() config.set('agent', 'prefix', tmpdir) tags1 = { "global": "version1", "core-site": "version2" } tags2 = { "global": "version33", "core-site": "version33" } clientsToUpdateConfigs1 = ["*"] handler = ActualConfigHandler(config, {}) handler.write_actual_component('HDFS_CLIENT', tags1) handler.write_actual_component('HBASE_CLIENT', tags1) self.assertEquals(tags1, handler.read_actual_component('HDFS_CLIENT')) self.assertEquals(tags1, handler.read_actual_component('HBASE_CLIENT')) handler.write_actual_component('DATANODE', tags2) self.assertEquals(tags2, handler.read_actual_component('DATANODE')) self.assertEquals(tags1, handler.read_actual_component('HDFS_CLIENT')) handler.write_client_components('HDFS', tags2, clientsToUpdateConfigs1) self.assertEquals(tags2, handler.read_actual_component('HDFS_CLIENT')) self.assertEquals(tags1, handler.read_actual_component('HBASE_CLIENT')) os.remove(os.path.join(tmpdir, "DATANODE_" + ActualConfigHandler.CONFIG_NAME)) os.remove(os.path.join(tmpdir, "HBASE_CLIENT_" + ActualConfigHandler.CONFIG_NAME)) os.remove(os.path.join(tmpdir, "HDFS_CLIENT_" + ActualConfigHandler.CONFIG_NAME))
def test_registration_build(self, get_os_version_mock, get_os_type_mock, run_os_cmd_mock, Popen_mock): config = AmbariConfig().getConfig() tmpdir = tempfile.gettempdir() config.set('agent', 'prefix', tmpdir) config.set('agent', 'current_ping_port', '33777') get_os_type_mock.return_value = "suse" get_os_version_mock.return_value = "11" run_os_cmd_mock.return_value = (3, "", "") register = Register(config) reference_version = '2.1.0' data = register.build(reference_version, 1) self.assertEquals(len(data['hardwareProfile']) > 0, True, "hardwareProfile should contain content") self.assertEquals(data['hostname'] != "", True, "hostname should not be empty") self.assertEquals(data['publicHostname'] != "", True, "publicHostname should not be empty") self.assertEquals(data['responseId'], 1) self.assertEquals(data['timestamp'] > 1353678475465L, True, "timestamp should not be empty") self.assertEquals(len(data['agentEnv']) > 0, True, "agentEnv should not be empty") self.assertEquals(data['agentVersion'], reference_version, "agentVersion should not be empty") print data['agentEnv']['umask'] self.assertEquals(not data['agentEnv']['umask']== "", True, "agents umask should not be empty") self.assertEquals(data['currentPingPort'] == 33777, True, "current ping port should be 33777") self.assertEquals(data['prefix'], config.get('agent', 'prefix'), 'The prefix path does not match') self.assertEquals(len(data), 9)
def test_osdisks_remote(self, communicate_mock, popen_mock, get_os_version_mock, get_os_type_mock): get_os_type_mock.return_value = "suse" get_os_version_mock.return_value = "11" Hardware.osdisks() popen_mock.assert_called_with(['timeout', '10', "df", "-kPT"], stdout=-1) config = AmbariConfig() Hardware.osdisks(config) popen_mock.assert_called_with(['timeout', '10', "df", "-kPT"], stdout=-1) config.add_section(AmbariConfig.AMBARI_PROPERTIES_CATEGORY) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY, "true") Hardware.osdisks(config) popen_mock.assert_called_with(['timeout', '10', "df", "-kPT"], stdout=-1) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY, "false") Hardware.osdisks(config) popen_mock.assert_called_with(['timeout', '10', "df", "-kPT", "-l"], stdout=-1) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "0") Hardware.osdisks(config) popen_mock.assert_called_with(['timeout', '10', "df", "-kPT", "-l"], stdout=-1) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "1") Hardware.osdisks(config) popen_mock.assert_called_with(["timeout", "1", "df", "-kPT", "-l"], stdout=-1) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "2") Hardware.osdisks(config) popen_mock.assert_called_with(["timeout", "2", "df", "-kPT", "-l"], stdout=-1)
def test_server_hostnames_override(self): hostname.cached_server_hostnames = [] fd = tempfile.mkstemp(text=True) tmpname = fd[1] os.close(fd[0]) os.chmod(tmpname, os.stat(tmpname).st_mode | stat.S_IXUSR) tmpfile = file(tmpname, "w+") config = AmbariConfig() try: tmpfile.write("#!/bin/sh\n\necho 'test.example.com'") tmpfile.close() config.set('server', 'hostname_script', tmpname) server_hostnames = hostname.server_hostnames(config) self.assertEquals( server_hostnames, ['test.example.com'], "expected hostname ['test.example.com']; got {0}".format( server_hostnames)) finally: os.remove(tmpname) config.remove_option('server', 'hostname_script') pass
def __test_execute_python_executor(self, resolve_script_path_mock, get_py_executor_mock): dummy_controller = MagicMock() cfg = AmbariConfig() cfg.set('agent', 'tolerate_download_failures', 'true') cfg.set('agent', 'prefix', '.') cfg.set('agent', 'cache_dir', 'background_tasks') initializer_module = InitializerModule() initializer_module.init() initializer_module.config = cfg initializer_module.metadata_cache.cache_update({CLUSTER_ID:{'clusterLevelParams':{}}}, 'abc') initializer_module.configurations_cache.cache_update({CLUSTER_ID:{}}, 'abc') initializer_module.host_level_params_cache.cache_update({CLUSTER_ID:{}}, 'abc') CustomServiceOrchestrator.runCommand = default_run_command actionQueue = ActionQueue(initializer_module) pyex = PythonExecutor(actionQueue.customServiceOrchestrator.tmp_dir, actionQueue.customServiceOrchestrator.config) patch_output_file(pyex) get_py_executor_mock.return_value = pyex actionQueue.customServiceOrchestrator.dump_command_to_json = MagicMock() result = {} lock = threading.RLock() complete_done = threading.Condition(lock) def command_complete_w(process_condensed_result, handle): with lock: result['command_complete'] = {'condensed_result' : copy.copy(process_condensed_result), 'handle' : copy.copy(handle), 'command_status' : actionQueue.commandStatuses.get_command_status(handle.command['taskId']) } complete_done.notifyAll() actionQueue.on_background_command_complete_callback = wraped(actionQueue.on_background_command_complete_callback, None, command_complete_w) actionQueue.put([self.background_command]) actionQueue.processBackgroundQueueSafeEmpty(); with lock: complete_done.wait(0.1) finished_status = result['command_complete']['command_status'] self.assertEqual(finished_status['status'], ActionQueue.COMPLETED_STATUS) self.assertEqual(finished_status['stdout'], 'process_out') self.assertEqual(finished_status['stderr'], 'process_err') self.assertEqual(finished_status['exitCode'], 0) runningCommand = actionQueue.commandStatuses.current_state.get(self.background_command['taskId']) self.assertTrue(runningCommand is not None) report = actionQueue.result() self.assertEqual(len(reports), 1) self.assertEqual(reports[0]['stdout'], 'process_out')
def test_do_not_log_execution_commands(self, command_status_dict_mock, cso_runCommand_mock, mock_log_command_output): custom_service_orchestrator_execution_result_dict = { 'stdout': 'out', 'stderr': 'stderr', 'structuredOut': '', 'exitcode': 0 } cso_runCommand_mock.return_value = custom_service_orchestrator_execution_result_dict config = AmbariConfig() tempdir = tempfile.gettempdir() config.set('agent', 'prefix', tempdir) config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache") config.set('agent', 'tolerate_download_failures', "true") config.set('logging', 'log_command_executes', 1) dummy_controller = MagicMock() initializer_module = InitializerModule() initializer_module.init() actionQueue = ActionQueue(initializer_module) actionQueue.execute_command(self.datanode_restart_command_no_logging) reports = actionQueue.commandStatuses.generate_report()[CLUSTER_ID] expected = {'status': 'COMPLETED', 'stderr': 'stderr', 'stdout': 'out\n\nCommand completed successfully!\n', 'clusterId': CLUSTER_ID, 'structuredOut': '""', 'roleCommand': u'CUSTOM_COMMAND', 'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1', 'taskId': 9, 'exitCode': 0} # Agent caches configurationTags if custom_command RESTART completed mock_log_command_output.assert_not_called( [call("out\n\nCommand completed successfully!\n", "9"), call("stderr", "9")], any_order=True) self.assertEqual(len(reports), 1) self.assertEqual(expected, reports[0])
def test_do_not_log_execution_commands(self, status_update_callback_mock, command_status_dict_mock, cso_runCommand_mock, mock_log_command_output): custom_service_orchestrator_execution_result_dict = { 'stdout': 'out', 'stderr': 'stderr', 'structuredOut': '', 'exitcode': 0 } cso_runCommand_mock.return_value = custom_service_orchestrator_execution_result_dict config = AmbariConfig() tempdir = tempfile.gettempdir() config.set('agent', 'prefix', tempdir) config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache") config.set('agent', 'tolerate_download_failures', "true") config.set('logging', 'log_command_executes', 1) dummy_controller = MagicMock() actionQueue = ActionQueue(config, dummy_controller) actionQueue.execute_command(self.datanode_restart_command_no_logging) report = actionQueue.result() expected = {'status': 'COMPLETED', 'configurationTags': {'global': {'tag': 'v123'}}, 'stderr': 'stderr', 'stdout': 'out\n\nCommand completed successfully!\n', 'clusterName': u'cc', 'structuredOut': '""', 'roleCommand': u'CUSTOM_COMMAND', 'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1', 'taskId': 9, 'customCommand': 'RESTART', 'exitCode': 0} # Agent caches configurationTags if custom_command RESTART completed mock_log_command_output.assert_not_called( [call("out\n\nCommand completed successfully!\n", "9"), call("stderr", "9")], any_order=True) self.assertEqual(len(report['reports']), 1) self.assertEqual(expected, report['reports'][0])
def test_execute_python_executor(self, read_stack_version_mock, resolve_script_path_mock, get_py_executor_mock): dummy_controller = MagicMock() cfg = AmbariConfig() cfg.set('agent', 'tolerate_download_failures', 'true') cfg.set('agent', 'prefix', '.') cfg.set('agent', 'cache_dir', 'background_tasks') actionQueue = ActionQueue(cfg, dummy_controller) pyex = PythonExecutor(actionQueue.customServiceOrchestrator.tmp_dir, actionQueue.customServiceOrchestrator.config) patch_output_file(pyex) get_py_executor_mock.return_value = pyex actionQueue.customServiceOrchestrator.dump_command_to_json = MagicMock() result = {} lock = threading.RLock() complete_done = threading.Condition(lock) def command_complete_w(process_condensed_result, handle): with lock: result['command_complete'] = {'condensed_result' : copy.copy(process_condensed_result), 'handle' : copy.copy(handle), 'command_status' : actionQueue.commandStatuses.get_command_status(handle.command['taskId']) } complete_done.notifyAll() actionQueue.on_background_command_complete_callback = wraped(actionQueue.on_background_command_complete_callback, None, command_complete_w) actionQueue.put([self.background_command]) actionQueue.processBackgroundQueueSafeEmpty(); actionQueue.processStatusCommandQueueSafeEmpty(); with lock: complete_done.wait(0.1) finished_status = result['command_complete']['command_status'] self.assertEqual(finished_status['status'], ActionQueue.COMPLETED_STATUS) self.assertEqual(finished_status['stdout'], 'process_out') self.assertEqual(finished_status['stderr'], 'process_err') self.assertEqual(finished_status['exitCode'], 0) runningCommand = actionQueue.commandStatuses.current_state.get(self.background_command['taskId']) self.assertTrue(runningCommand is not None) report = actionQueue.result() self.assertEqual(len(report['reports']),1) self.assertEqual(report['reports'][0]['stdout'],'process_out')
def test_log_execution_commands(self, status_update_callback_mock, command_status_dict_mock, cso_runCommand_mock): custom_service_orchestrator_execution_result_dict = { 'stdout': 'out', 'stderr': 'stderr', 'structuredOut' : '', 'exitcode' : 0 } cso_runCommand_mock.return_value = custom_service_orchestrator_execution_result_dict config = AmbariConfig() tempdir = tempfile.gettempdir() config.set('agent', 'prefix', tempdir) config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache") config.set('agent', 'tolerate_download_failures', "true") config.set('logging', 'log_command_executes', 1) dummy_controller = MagicMock() actionQueue = ActionQueue(config, dummy_controller) actionQueue.execute_command(self.datanode_restart_command) report = actionQueue.result() expected = {'status': 'COMPLETED', 'configurationTags': {'global': {'tag': 'v123'}}, 'stderr': 'stderr', 'stdout': 'out', 'clusterName': u'cc', 'structuredOut': '""', 'roleCommand': u'CUSTOM_COMMAND', 'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1', 'taskId': 9, 'customCommand': 'RESTART', 'exitCode': 0} # Agent caches configurationTags if custom_command RESTART completed self.assertEqual(len(report['reports']), 1) self.assertEqual(expected, report['reports'][0])
def test_ambari_config_get_command_file_retention_policy(self): config = AmbariConfig() # unset value yields, "keep" if config.has_option( "agent", AmbariConfig.COMMAND_FILE_RETENTION_POLICY_PROPERTY): config.remove_option( "agent", AmbariConfig.COMMAND_FILE_RETENTION_POLICY_PROPERTY) self.assertEqual(config.command_file_retention_policy, AmbariConfig.COMMAND_FILE_RETENTION_POLICY_KEEP) config.set("agent", AmbariConfig.COMMAND_FILE_RETENTION_POLICY_PROPERTY, AmbariConfig.COMMAND_FILE_RETENTION_POLICY_KEEP) self.assertEqual(config.command_file_retention_policy, AmbariConfig.COMMAND_FILE_RETENTION_POLICY_KEEP) config.set("agent", AmbariConfig.COMMAND_FILE_RETENTION_POLICY_PROPERTY, AmbariConfig.COMMAND_FILE_RETENTION_POLICY_REMOVE) self.assertEqual(config.command_file_retention_policy, AmbariConfig.COMMAND_FILE_RETENTION_POLICY_REMOVE) config.set( "agent", AmbariConfig.COMMAND_FILE_RETENTION_POLICY_PROPERTY, AmbariConfig.COMMAND_FILE_RETENTION_POLICY_REMOVE_ON_SUCCESS) self.assertEqual( config.command_file_retention_policy, AmbariConfig.COMMAND_FILE_RETENTION_POLICY_REMOVE_ON_SUCCESS) # Invalid value yields, "keep" config.set("agent", AmbariConfig.COMMAND_FILE_RETENTION_POLICY_PROPERTY, "invalid_value") self.assertEqual(config.command_file_retention_policy, AmbariConfig.COMMAND_FILE_RETENTION_POLICY_KEEP)
def test_store_config_tags_on_install_client_command(self, command_status_dict_mock, cso_runCommand_mock): custom_service_orchestrator_execution_result_dict = { 'stdout': 'out', 'stderr': 'stderr', 'structuredOut' : '', 'exitcode' : 0 } cso_runCommand_mock.return_value = custom_service_orchestrator_execution_result_dict tez_client_install_command = { 'commandType': 'EXECUTION_COMMAND', 'role': u'TEZ_CLIENT', 'roleCommand': u'INSTALL', 'commandId': '1-1', 'taskId': 9, 'clusterName': u'cc', 'serviceName': u'TEZ', 'configurations': {'global' : {}}, 'configurationTags': {'global' : { 'tag': 'v123' }}, 'hostLevelParams': {}, 'clusterId': CLUSTER_ID, } LiveStatus.CLIENT_COMPONENTS = ({'serviceName': 'TEZ', 'componentName': 'TEZ_CLIENT'},) config = AmbariConfig() tempdir = tempfile.gettempdir() config.set('agent', 'prefix', tempdir) config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache") config.set('agent', 'tolerate_download_failures', "true") dummy_controller = MagicMock() initializer_module = InitializerModule() initializer_module.init() actionQueue = ActionQueue(initializer_module) actionQueue.execute_command(tez_client_install_command)
def test_refresh_queues_custom_command(self, command_status_dict_mock, cso_runCommand_mock): custom_service_orchestrator_execution_result_dict = { 'stdout': 'out', 'stderr': 'stderr', 'structuredOut' : '', 'exitcode' : 0 } cso_runCommand_mock.return_value = custom_service_orchestrator_execution_result_dict config = AmbariConfig() tempdir = tempfile.gettempdir() config.set('agent', 'prefix', tempdir) config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache") config.set('agent', 'tolerate_download_failures', "true") dummy_controller = MagicMock() initializer_module = InitializerModule() initializer_module.init() actionQueue = ActionQueue(initializer_module) actionQueue.execute_command(self.yarn_refresh_queues_custom_command) reports = actionQueue.commandStatuses.generate_report()[CLUSTER_ID] expected = {'status': 'COMPLETED', 'stderr': 'stderr', 'stdout': 'out\n\nCommand completed successfully!\n', 'clusterId': CLUSTER_ID, 'structuredOut': '""', 'roleCommand': u'CUSTOM_COMMAND', 'serviceName': u'YARN', 'role': u'RESOURCEMANAGER', 'actionId': '1-1', 'taskId': 9, 'exitCode': 0} self.assertEqual(len(reports), 1) self.assertEqual(expected, reports[0])
def test_store_configuration_tags_no_clients(self, status_update_callback_mock, command_status_dict_mock, cso_runCommand_mock, write_client_components_mock): custom_service_orchestrator_execution_result_dict = { 'stdout': 'out', 'stderr': 'stderr', 'structuredOut' : '', 'exitcode' : 0 } cso_runCommand_mock.return_value = custom_service_orchestrator_execution_result_dict config = AmbariConfig().getConfig() tempdir = tempfile.gettempdir() config.set('agent', 'prefix', tempdir) config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache") config.set('agent', 'tolerate_download_failures', "true") dummy_controller = MagicMock() actionQueue = ActionQueue(config, dummy_controller) actionQueue.execute_command(self.datanode_restart_command_no_clients_update) report = actionQueue.result() expected = {'status': 'COMPLETED', 'configurationTags': {'global': {'tag': 'v123'}}, 'stderr': 'stderr', 'stdout': 'out', 'clusterName': u'cc', 'structuredOut': '""', 'roleCommand': u'CUSTOM_COMMAND', 'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1', 'taskId': 9, 'customCommand': 'RESTART', 'exitCode': 0} # Agent caches configurationTags if custom_command RESTART completed self.assertEqual(len(report['reports']), 1) self.assertEqual(expected, report['reports'][0]) self.assertFalse(write_client_components_mock.called)
def test_cancel_backgound_command(self, read_stack_version_mock, resolve_hook_script_path_mock, resolve_script_path_mock, FileCache_mock, kill_process_with_children_mock, get_py_executor_mock): FileCache_mock.return_value = None FileCache_mock.cache_dir = MagicMock() resolve_hook_script_path_mock.return_value = None # shell.kill_process_with_children = MagicMock() dummy_controller = MagicMock() cfg = AmbariConfig() cfg.set('agent', 'tolerate_download_failures', 'true') cfg.set('agent', 'prefix', '.') cfg.set('agent', 'cache_dir', 'background_tasks') actionQueue = ActionQueue(cfg, dummy_controller) dummy_controller.actionQueue = actionQueue orchestrator = CustomServiceOrchestrator(cfg, dummy_controller) orchestrator.file_cache = MagicMock() def f(a, b): return "" orchestrator.file_cache.get_service_base_dir = f actionQueue.customServiceOrchestrator = orchestrator import TestActionQueue import copy pyex = PythonExecutor(actionQueue.customServiceOrchestrator.tmp_dir, actionQueue.customServiceOrchestrator.config) TestActionQueue.patch_output_file(pyex) pyex.prepare_process_result = MagicMock() get_py_executor_mock.return_value = pyex orchestrator.dump_command_to_json = MagicMock() lock = threading.RLock() complete_done = threading.Condition(lock) complete_was_called = {} def command_complete_w(process_condenced_result, handle): with lock: complete_was_called['visited'] = '' complete_done.wait(3) actionQueue.on_background_command_complete_callback = TestActionQueue.wraped( actionQueue.on_background_command_complete_callback, command_complete_w, None) execute_command = copy.deepcopy( TestActionQueue.TestActionQueue.background_command) actionQueue.put([execute_command]) actionQueue.processBackgroundQueueSafeEmpty() time.sleep(.1) orchestrator.cancel_command(19, '') self.assertTrue(kill_process_with_children_mock.called) kill_process_with_children_mock.assert_called_with(33) with lock: complete_done.notifyAll() with lock: self.assertTrue(complete_was_called.has_key('visited')) time.sleep(.1) runningCommand = actionQueue.commandStatuses.get_command_status(19) self.assertTrue(runningCommand is not None) self.assertEqual(runningCommand['status'], ActionQueue.FAILED_STATUS)
def test_execute_command(self): config = AmbariConfig() tempdir = tempfile.gettempdir() config.set('agent', 'prefix', tempdir) config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache") config.set('agent', 'tolerate_download_failures', "true") config.set('heartbeat', 'log_symbols_count', "900000") initializer_module = InitializerModule() initializer_module.init() initializer_module.config = config with patch("__builtin__.open") as open_mock: # Make file read calls visible def open_side_effect(file, mode): if mode == 'r': file_mock = MagicMock() file_mock.read.return_value = "Read from " + str(file) return file_mock else: return self.original_open(file, mode) open_mock.side_effect = open_side_effect actionQueue = ActionQueue(initializer_module) unfreeze_flag = threading.Event() python_execution_result_dict = { 'stdout': 'out', 'stderr': 'stderr', 'structuredOut' : '' } def side_effect(command, tmpoutfile, tmperrfile, override_output_files=True, retry=False): unfreeze_flag.wait() return python_execution_result_dict def patched_aq_execute_command(command): # We have to perform patching for separate thread in the same thread with patch.object(CustomServiceOrchestrator, "runCommand") as runCommand_mock: runCommand_mock.side_effect = side_effect actionQueue.execute_command(command) ### Test install/start/stop command ### # # Test successful execution with configuration tags python_execution_result_dict['status'] = 'COMPLETE' python_execution_result_dict['exitcode'] = 0 # We call method in a separate thread execution_thread = Thread(target=patched_aq_execute_command , args=(self.datanode_install_command,)) execution_thread.start() # check in progress report # wait until ready while True: time.sleep(0.1) reports = actionQueue.commandStatuses.generate_report()[CLUSTER_ID] if len(reports) != 0: break expected = {'status': 'IN_PROGRESS', 'stderr': 'Read from {0}'.format(os.path.join(tempdir, "errors-3.txt")), 'stdout': 'Read from {0}'.format(os.path.join(tempdir, "output-3.txt")), 'structuredOut' : 'Read from {0}'.format(os.path.join(tempdir, "structured-out-3.json")), 'clusterId': CLUSTER_ID, 'roleCommand': u'INSTALL', 'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1', 'taskId': 3, 'exitCode': 777} # Continue command execution unfreeze_flag.set() # wait until ready while reports[0]['status'] == 'IN_PROGRESS': time.sleep(0.1) reports = actionQueue.commandStatuses.generate_report()[CLUSTER_ID] # check report expected = {'status': 'COMPLETED', 'stderr': 'stderr', 'stdout': 'out\n\nCommand completed successfully!\n', 'clusterId': CLUSTER_ID, 'structuredOut': '""', 'roleCommand': u'INSTALL', 'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1', 'taskId': 3, 'exitCode': 0} self.assertEqual(len(reports), 1) self.assertEqual(reports[0], expected) # now should not have reports (read complete/failed reports are deleted) actionQueue.commandStatuses.clear_reported_reports({CLUSTER_ID: reports}) reports = actionQueue.commandStatuses.generate_report()[CLUSTER_ID] self.assertEqual(len(reports), 0) # # Test failed execution python_execution_result_dict['status'] = 'FAILED' python_execution_result_dict['exitcode'] = 13 # We call method in a separate thread execution_thread = Thread(target=patched_aq_execute_command , args=(self.datanode_install_command,)) execution_thread.start() unfreeze_flag.set() # check in progress report # wait until ready reports = actionQueue.commandStatuses.generate_report()[CLUSTER_ID] while len(reports) == 0 or \ reports[0]['status'] == 'IN_PROGRESS': time.sleep(0.1) reports = actionQueue.commandStatuses.generate_report()[CLUSTER_ID] actionQueue.commandStatuses.clear_reported_reports({CLUSTER_ID: reports}) # check report expected = {'status': 'FAILED', 'stderr': 'stderr', 'stdout': 'out\n\nCommand completed successfully!\n\n\nCommand failed after 1 tries\n', 'clusterId': CLUSTER_ID, 'structuredOut': '""', 'roleCommand': u'INSTALL', 'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1', 'taskId': 3, 'exitCode': 13} self.assertEqual(len(reports), 1) self.assertEqual(reports[0], expected) # now should not have reports (read complete/failed reports are deleted) actionQueue.commandStatuses.clear_reported_reports({CLUSTER_ID: reports}) reports = actionQueue.commandStatuses.generate_report()[CLUSTER_ID] self.assertEqual(len(reports), 0) ### Test upgrade command ### python_execution_result_dict['status'] = 'COMPLETE' python_execution_result_dict['exitcode'] = 0 execution_thread = Thread(target=patched_aq_execute_command , args=(self.datanode_upgrade_command,)) execution_thread.start() unfreeze_flag.set() # wait until ready report = actionQueue.commandStatuses.generate_report()[CLUSTER_ID] while len(reports) == 0 or \ reports[0]['status'] == 'IN_PROGRESS': time.sleep(0.1) reports = actionQueue.commandStatuses.generate_report()[CLUSTER_ID] actionQueue.commandStatuses.clear_reported_reports({CLUSTER_ID: reports}) # check report expected = {'status': 'COMPLETED', 'stderr': 'stderr', 'stdout': 'out\n\nCommand completed successfully!\n\n\nCommand failed after 1 tries\n\n\nCommand completed successfully!\n', 'clusterId': CLUSTER_ID, 'structuredOut': '""', 'roleCommand': 'UPGRADE', 'serviceName': 'serviceName', 'role': 'role', 'actionId': 17, 'taskId': 'taskId', 'exitCode': 0} self.assertEqual(len(reports), 1) self.assertEqual(reports[0], expected) # now should not have reports (read complete/failed reports are deleted) actionQueue.commandStatuses.clear_reported_reports({CLUSTER_ID: reports}) reports = actionQueue.commandStatuses.generate_report()[CLUSTER_ID] self.assertEqual(len(reports), 0)
class TestSecurity(unittest.TestCase): @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value)) def setUp(self): # disable stdout out = StringIO.StringIO() sys.stdout = out # Create config self.config = AmbariConfig() self.config.set('security', 'ssl_verify_cert', '0') # Instantiate CachedHTTPSConnection (skip connect() call) with patch.object(security.VerifiedHTTPSConnection, "connect"): self.cachedHTTPSConnection = security.CachedHTTPSConnection(self.config, "example.com") def tearDown(self): # enable stdout sys.stdout = sys.__stdout__ ### CachedHTTPSConnection ### @patch.object(security.VerifiedHTTPSConnection, "connect") def test_CachedHTTPSConnection_connect(self, vhc_connect_mock): self.config.set('server', 'hostname', 'dummy.server.hostname') self.config.set('server', 'secured_url_port', '443') # Testing not connected case self.cachedHTTPSConnection.connected = False self.cachedHTTPSConnection.connect() self.assertTrue(vhc_connect_mock.called) vhc_connect_mock.reset_mock() # Testing already connected case self.cachedHTTPSConnection.connect() self.assertFalse(vhc_connect_mock.called) @patch.object(security.CachedHTTPSConnection, "connect") def test_forceClear(self, connect_mock): # Testing if httpsconn instance changed old = self.cachedHTTPSConnection.httpsconn self.cachedHTTPSConnection.forceClear() self.assertNotEqual(old, self.cachedHTTPSConnection.httpsconn) @patch.object(security.CachedHTTPSConnection, "connect") def test_request(self, connect_mock): httpsconn_mock = MagicMock(create = True) self.cachedHTTPSConnection.httpsconn = httpsconn_mock dummy_request = MagicMock(create = True) dummy_request.get_method.return_value = "dummy_get_method" dummy_request.get_full_url.return_value = "dummy_full_url" dummy_request.get_data.return_value = "dummy_get_data" dummy_request.headers = "dummy_headers" responce_mock = MagicMock(create = True) responce_mock.read.return_value = "dummy responce" httpsconn_mock.getresponse.return_value = responce_mock # Testing normal case responce = self.cachedHTTPSConnection.request(dummy_request) self.assertEqual(responce, responce_mock.read.return_value) httpsconn_mock.request.assert_called_once_with( dummy_request.get_method.return_value, dummy_request.get_full_url.return_value, dummy_request.get_data.return_value, dummy_request.headers) # Testing case of exception try: def side_eff(): raise Exception("Dummy exception") httpsconn_mock.read.side_effect = side_eff responce = self.cachedHTTPSConnection.request(dummy_request) self.fail("Should raise IOError") except Exception, err: # Expected pass
def test_osdisks_remote(self, shell_call_mock, get_os_version_mock, get_os_type_mock): get_os_type_mock.return_value = "suse" get_os_version_mock.return_value = "11" Hardware.osdisks() timeout = 10 shell_call_mock.assert_called_with( ['timeout', str(timeout), "df", "-kPT"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout, quiet=True) config = AmbariConfig() Hardware.osdisks(config) shell_call_mock.assert_called_with( ['timeout', str(timeout), "df", "-kPT"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout, quiet=True) config.add_section(AmbariConfig.AMBARI_PROPERTIES_CATEGORY) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY, "true") Hardware.osdisks(config) shell_call_mock.assert_called_with( ['timeout', str(timeout), "df", "-kPT"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout, quiet=True) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY, "false") Hardware.osdisks(config) shell_call_mock.assert_called_with( ['timeout', str(timeout), "df", "-kPT", "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout, quiet=True) config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "0") Hardware.osdisks(config) shell_call_mock.assert_called_with( ['timeout', str(timeout), "df", "-kPT", "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout, quiet=True) timeout = 1 config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, str(timeout)) Hardware.osdisks(config) shell_call_mock.assert_called_with( ['timeout', str(timeout), "df", "-kPT", "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout, quiet=True) timeout = 2 config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, str(timeout)) Hardware.osdisks(config) shell_call_mock.assert_called_with( ['timeout', str(timeout), "df", "-kPT", "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout, quiet=True)
class TestSecurity(unittest.TestCase): @patch.object(OSCheck, "os_distribution", new=MagicMock(return_value=os_distro_value)) def setUp(self): # disable stdout out = StringIO.StringIO() sys.stdout = out # Create config self.config = AmbariConfig() self.config.set('security', 'ssl_verify_cert', '0') # Instantiate CachedHTTPSConnection (skip connect() call) with patch.object(security.VerifiedHTTPSConnection, "connect"): self.cachedHTTPSConnection = security.CachedHTTPSConnection( self.config, "example.com") def tearDown(self): # enable stdout sys.stdout = sys.__stdout__ ### VerifiedHTTPSConnection ### @patch.object(security.CertificateManager, "initSecurity") @patch("socket.create_connection") @patch("ssl.wrap_socket") def test_VerifiedHTTPSConnection_connect(self, wrap_socket_mock, create_connection_mock, init_security_mock): init_security_mock.return_value = None self.config.set('security', 'keysdir', '/dummy-keysdir') connection = security.VerifiedHTTPSConnection( "example.com", self.config.get('server', 'secured_url_port'), self.config) connection._tunnel_host = False connection.sock = None connection.connect() self.assertTrue(wrap_socket_mock.called) ### VerifiedHTTPSConnection with no certificates creation @patch.object(security.CertificateManager, "initSecurity") @patch("socket.create_connection") @patch("ssl.wrap_socket") def test_Verified_HTTPSConnection_non_secure_connect( self, wrap_socket_mock, create_connection_mock, init_security_mock): connection = security.VerifiedHTTPSConnection( "example.com", self.config.get('server', 'secured_url_port'), self.config) connection._tunnel_host = False connection.sock = None connection.connect() self.assertFalse(init_security_mock.called) ### VerifiedHTTPSConnection with two-way SSL authentication enabled @patch.object(security.CertificateManager, "initSecurity") @patch("socket.create_connection") @patch("ssl.wrap_socket") def test_Verified_HTTPSConnection_two_way_ssl_connect( self, wrap_socket_mock, create_connection_mock, init_security_mock): wrap_socket_mock.side_effect = ssl.SSLError() connection = security.VerifiedHTTPSConnection( "example.com", self.config.get('server', 'secured_url_port'), self.config) self.config.isTwoWaySSLConnection = MagicMock(return_value=True) connection._tunnel_host = False connection.sock = None try: connection.connect() except ssl.SSLError: pass self.assertTrue(init_security_mock.called) ### CachedHTTPSConnection ### @patch.object(security.VerifiedHTTPSConnection, "connect") def test_CachedHTTPSConnection_connect(self, vhc_connect_mock): self.config.set('server', 'hostname', 'dummy.server.hostname') self.config.set('server', 'secured_url_port', '443') # Testing not connected case self.cachedHTTPSConnection.connected = False self.cachedHTTPSConnection.connect() self.assertTrue(vhc_connect_mock.called) vhc_connect_mock.reset_mock() # Testing already connected case self.cachedHTTPSConnection.connect() self.assertFalse(vhc_connect_mock.called) @patch.object(security.CachedHTTPSConnection, "connect") def test_forceClear(self, connect_mock): # Testing if httpsconn instance changed old = self.cachedHTTPSConnection.httpsconn self.cachedHTTPSConnection.forceClear() self.assertNotEqual(old, self.cachedHTTPSConnection.httpsconn) @patch.object(security.CachedHTTPSConnection, "connect") def test_request(self, connect_mock): httpsconn_mock = MagicMock(create=True) self.cachedHTTPSConnection.httpsconn = httpsconn_mock dummy_request = MagicMock(create=True) dummy_request.get_method.return_value = "dummy_get_method" dummy_request.get_full_url.return_value = "dummy_full_url" dummy_request.get_data.return_value = "dummy_get_data" dummy_request.headers = "dummy_headers" responce_mock = MagicMock(create=True) responce_mock.read.return_value = "dummy responce" httpsconn_mock.getresponse.return_value = responce_mock # Testing normal case responce = self.cachedHTTPSConnection.request(dummy_request) self.assertEqual(responce, responce_mock.read.return_value) httpsconn_mock.request.assert_called_once_with( dummy_request.get_method.return_value, dummy_request.get_full_url.return_value, dummy_request.get_data.return_value, dummy_request.headers) # Testing case of exception try: def side_eff(): raise Exception("Dummy exception") httpsconn_mock.read.side_effect = side_eff responce = self.cachedHTTPSConnection.request(dummy_request) self.fail("Should raise IOError") except Exception, err: # Expected pass
def test_auto_execute_command(self, status_update_callback_mock, open_mock): # Make file read calls visible def open_side_effect(file, mode): if mode == 'r': file_mock = MagicMock() file_mock.read.return_value = "Read from " + str(file) return file_mock else: return self.original_open(file, mode) open_mock.side_effect = open_side_effect config = AmbariConfig() tempdir = tempfile.gettempdir() config.set('agent', 'prefix', tempdir) config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache") config.set('agent', 'tolerate_download_failures', "true") dummy_controller = MagicMock() dummy_controller.recovery_manager = RecoveryManager(tempfile.mktemp()) dummy_controller.recovery_manager.update_config(5, 5, 1, 11, True, False, "", -1) actionQueue = ActionQueue(config, dummy_controller) unfreeze_flag = threading.Event() python_execution_result_dict = { 'stdout': 'out', 'stderr': 'stderr', 'structuredOut' : '' } def side_effect(command, tmpoutfile, tmperrfile, override_output_files=True, retry=False): unfreeze_flag.wait() return python_execution_result_dict def patched_aq_execute_command(command): # We have to perform patching for separate thread in the same thread with patch.object(CustomServiceOrchestrator, "runCommand") as runCommand_mock: runCommand_mock.side_effect = side_effect actionQueue.process_command(command) python_execution_result_dict['status'] = 'COMPLETE' python_execution_result_dict['exitcode'] = 0 self.assertFalse(actionQueue.tasks_in_progress_or_pending()) # We call method in a separate thread execution_thread = Thread(target = patched_aq_execute_command , args = (self.datanode_auto_start_command, )) execution_thread.start() # check in progress report # wait until ready while True: time.sleep(0.1) if actionQueue.tasks_in_progress_or_pending(): break # Continue command execution unfreeze_flag.set() # wait until ready check_queue = True while check_queue: report = actionQueue.result() if not actionQueue.tasks_in_progress_or_pending(): break time.sleep(0.1) self.assertEqual(len(report['reports']), 0) ## Test failed execution python_execution_result_dict['status'] = 'FAILED' python_execution_result_dict['exitcode'] = 13 # We call method in a separate thread execution_thread = Thread(target = patched_aq_execute_command , args = (self.datanode_auto_start_command, )) execution_thread.start() unfreeze_flag.set() # check in progress report # wait until ready while check_queue: report = actionQueue.result() if not actionQueue.tasks_in_progress_or_pending(): break time.sleep(0.1) self.assertEqual(len(report['reports']), 0)
class TestCustomServiceOrchestrator(TestCase): def setUp(self): # disable stdout out = StringIO.StringIO() sys.stdout = out # generate sample config tmpdir = tempfile.gettempdir() exec_tmp_dir = os.path.join(tmpdir, 'tmp') self.config = AmbariConfig() self.config.config = ConfigParser.RawConfigParser() self.config.add_section('agent') self.config.set('agent', 'prefix', tmpdir) self.config.set('agent', 'cache_dir', "/cachedir") self.config.add_section('python') self.config.set('python', 'custom_actions_dir', tmpdir) @patch.object(OSCheck, "os_distribution", new=MagicMock(return_value=os_distro_value)) @patch.object(FileCache, "__init__") def test_add_reg_listener_to_controller(self, FileCache_mock): FileCache_mock.return_value = None dummy_controller = MagicMock() config = AmbariConfig() tempdir = tempfile.gettempdir() config.set('agent', 'prefix', tempdir) CustomServiceOrchestrator(config, dummy_controller) self.assertTrue(dummy_controller.registration_listeners.append.called) @patch.object(OSCheck, "os_distribution", new=MagicMock(return_value=os_distro_value)) @patch.object(CustomServiceOrchestrator, 'decompressClusterHostInfo') @patch("ambari_agent.hostname.public_hostname") @patch("os.path.isfile") @patch("os.unlink") @patch.object(FileCache, "__init__") def test_dump_command_to_json(self, FileCache_mock, unlink_mock, isfile_mock, hostname_mock, decompress_cluster_host_info_mock): FileCache_mock.return_value = None hostname_mock.return_value = "test.hst" command = { 'commandType': 'EXECUTION_COMMAND', 'role': u'DATANODE', 'roleCommand': u'INSTALL', 'commandId': '1-1', 'taskId': 3, 'clusterName': u'cc', 'serviceName': u'HDFS', 'configurations': { 'global': {} }, 'configurationTags': { 'global': { 'tag': 'v1' } }, 'clusterHostInfo': { 'namenode_host': ['1'], 'slave_hosts': ['0', '1'], 'all_hosts': ['h1.hortonworks.com', 'h2.hortonworks.com'], 'all_ping_ports': ['8670:0,1'] }, 'hostLevelParams': {} } decompress_cluster_host_info_mock.return_value = { 'namenode_host': ['h2.hortonworks.com'], 'slave_hosts': ['h1.hortonworks.com', 'h2.hortonworks.com'], 'all_hosts': ['h1.hortonworks.com', 'h2.hortonworks.com'], 'all_ping_ports': ['8670', '8670'] } config = AmbariConfig() tempdir = tempfile.gettempdir() config.set('agent', 'prefix', tempdir) dummy_controller = MagicMock() orchestrator = CustomServiceOrchestrator(config, dummy_controller) isfile_mock.return_value = True # Test dumping EXECUTION_COMMAND json_file = orchestrator.dump_command_to_json(command) self.assertTrue(os.path.exists(json_file)) self.assertTrue(os.path.getsize(json_file) > 0) if get_platform() != PLATFORM_WINDOWS: self.assertEqual(oct(os.stat(json_file).st_mode & 0777), '0600') self.assertTrue(json_file.endswith("command-3.json")) self.assertTrue(decompress_cluster_host_info_mock.called) os.unlink(json_file) # Test dumping STATUS_COMMAND command['commandType'] = 'STATUS_COMMAND' decompress_cluster_host_info_mock.reset_mock() json_file = orchestrator.dump_command_to_json(command) self.assertTrue(os.path.exists(json_file)) self.assertTrue(os.path.getsize(json_file) > 0) if get_platform() != PLATFORM_WINDOWS: self.assertEqual(oct(os.stat(json_file).st_mode & 0777), '0600') self.assertTrue(json_file.endswith("status_command.json")) self.assertFalse(decompress_cluster_host_info_mock.called) os.unlink(json_file) # Testing side effect of dump_command_to_json self.assertEquals(command['public_hostname'], "test.hst") self.assertEquals( command['agentConfigParams']['agent']['parallel_execution'], 0) self.assertTrue(unlink_mock.called) @patch.object(OSCheck, "os_distribution", new=MagicMock(return_value=os_distro_value)) @patch("ambari_agent.hostname.public_hostname") @patch("os.path.isfile") @patch("os.unlink") @patch.object(FileCache, "__init__") def test_dump_command_to_json_with_retry(self, FileCache_mock, unlink_mock, isfile_mock, hostname_mock): FileCache_mock.return_value = None hostname_mock.return_value = "test.hst" command = { 'commandType': 'EXECUTION_COMMAND', 'role': u'DATANODE', 'roleCommand': u'INSTALL', 'commandId': '1-1', 'taskId': 3, 'clusterName': u'cc', 'serviceName': u'HDFS', 'configurations': { 'global': {} }, 'configurationTags': { 'global': { 'tag': 'v1' } }, 'clusterHostInfo': { 'namenode_host': ['1'], 'slave_hosts': ['0', '1'], 'all_racks': [u'/default-rack:0'], 'ambari_server_host': 'a.b.c', 'ambari_server_port': '123', 'ambari_server_use_ssl': 'false', 'all_ipv4_ips': [u'192.168.12.101:0'], 'all_hosts': ['h1.hortonworks.com', 'h2.hortonworks.com'], 'all_ping_ports': ['8670:0,1'] }, 'hostLevelParams': {} } config = AmbariConfig() tempdir = tempfile.gettempdir() config.set('agent', 'prefix', tempdir) dummy_controller = MagicMock() orchestrator = CustomServiceOrchestrator(config, dummy_controller) isfile_mock.return_value = True # Test dumping EXECUTION_COMMAND json_file = orchestrator.dump_command_to_json(command) self.assertTrue(os.path.exists(json_file)) self.assertTrue(os.path.getsize(json_file) > 0) if get_platform() != PLATFORM_WINDOWS: self.assertEqual(oct(os.stat(json_file).st_mode & 0777), '0600') self.assertTrue(json_file.endswith("command-3.json")) os.unlink(json_file) # Test dumping STATUS_COMMAND json_file = orchestrator.dump_command_to_json(command, True) self.assertTrue(os.path.exists(json_file)) self.assertTrue(os.path.getsize(json_file) > 0) if get_platform() != PLATFORM_WINDOWS: self.assertEqual(oct(os.stat(json_file).st_mode & 0777), '0600') self.assertTrue(json_file.endswith("command-3.json")) os.unlink(json_file) # Testing side effect of dump_command_to_json self.assertEquals(command['public_hostname'], "test.hst") self.assertEquals( command['agentConfigParams']['agent']['parallel_execution'], 0) self.assertTrue(unlink_mock.called) @patch.object(OSCheck, "os_distribution", new=MagicMock(return_value=os_distro_value)) @patch("os.path.exists") @patch.object(FileCache, "__init__") def test_resolve_script_path(self, FileCache_mock, exists_mock): FileCache_mock.return_value = None dummy_controller = MagicMock() config = AmbariConfig() orchestrator = CustomServiceOrchestrator(config, dummy_controller) # Testing existing path exists_mock.return_value = True path = orchestrator.\ resolve_script_path(os.path.join("HBASE", "package"), os.path.join("scripts", "hbase_master.py")) self.assertEqual( os.path.join("HBASE", "package", "scripts", "hbase_master.py"), path) # Testing not existing path exists_mock.return_value = False try: orchestrator.resolve_script_path( "/HBASE", os.path.join("scripts", "hbase_master.py")) self.fail('ExpectedException not thrown') except AgentException: pass # Expected @patch.object(FileCache, "get_custom_resources_subdir") @patch.object(CustomServiceOrchestrator, "resolve_script_path") @patch.object(CustomServiceOrchestrator, "resolve_hook_script_path") @patch.object(FileCache, "get_host_scripts_base_dir") @patch.object(FileCache, "get_service_base_dir") @patch.object(FileCache, "get_hook_base_dir") @patch.object(CustomServiceOrchestrator, "dump_command_to_json") @patch.object(PythonExecutor, "run_file") @patch.object(FileCache, "__init__") def test_runCommand( self, FileCache_mock, run_file_mock, dump_command_to_json_mock, get_hook_base_dir_mock, get_service_base_dir_mock, get_host_scripts_base_dir_mock, resolve_hook_script_path_mock, resolve_script_path_mock, get_custom_resources_subdir_mock): FileCache_mock.return_value = None command = { 'commandType': 'EXECUTION_COMMAND', 'role': 'REGION_SERVER', 'hostLevelParams': { 'stack_name': 'HDP', 'stack_version': '2.0.7', 'jdk_location': 'some_location' }, 'commandParams': { 'script_type': 'PYTHON', 'script': 'scripts/hbase_regionserver.py', 'command_timeout': '600', 'service_package_folder': 'HBASE' }, 'taskId': '3', 'roleCommand': 'INSTALL' } get_host_scripts_base_dir_mock.return_value = "/host_scripts" get_service_base_dir_mock.return_value = "/basedir/" resolve_script_path_mock.return_value = "/basedir/scriptpath" resolve_hook_script_path_mock.return_value = \ ('/hooks_dir/prefix-command/scripts/hook.py', '/hooks_dir/prefix-command') dummy_controller = MagicMock() orchestrator = CustomServiceOrchestrator(self.config, dummy_controller) unix_process_id = 111 orchestrator.commands_in_progress = { command['taskId']: unix_process_id } get_hook_base_dir_mock.return_value = "/hooks/" # normal run case run_file_mock.return_value = { 'stdout': 'sss', 'stderr': 'eee', 'exitcode': 0, } ret = orchestrator.runCommand(command, "out.txt", "err.txt") self.assertEqual(ret['exitcode'], 0) self.assertTrue(run_file_mock.called) self.assertEqual(run_file_mock.call_count, 3) # running a status command run_file_mock.reset_mock() def return_traceback(*args, **kwargs): return { 'stderr': traceback.format_exc(), 'stdout': '', 'exitcode': 0, } run_file_mock.side_effect = return_traceback status_command = dict(command) status_command['commandType'] = 'STATUS_COMMAND' del status_command['taskId'] del status_command['roleCommand'] ret = orchestrator.runCommand(status_command, "out.txt", "err.txt") self.assertEqual('None\n', ret['stderr']) run_file_mock.reset_mock() # Case when we force another command run_file_mock.return_value = { 'stdout': 'sss', 'stderr': 'eee', 'exitcode': 0, } ret = orchestrator.runCommand( command, "out.txt", "err.txt", forced_command_name=CustomServiceOrchestrator.SCRIPT_TYPE_PYTHON) ## Check that override_output_files was true only during first call print run_file_mock self.assertEquals(run_file_mock.call_args_list[0][0][8], True) self.assertEquals(run_file_mock.call_args_list[1][0][8], False) self.assertEquals(run_file_mock.call_args_list[2][0][8], False) ## Check that forced_command_name was taken into account self.assertEqual(run_file_mock.call_args_list[0][0][1][0], CustomServiceOrchestrator.SCRIPT_TYPE_PYTHON) run_file_mock.reset_mock() # unknown script type case command['commandParams']['script_type'] = "SOME_TYPE" ret = orchestrator.runCommand(command, "out.txt", "err.txt") self.assertEqual(ret['exitcode'], 1) self.assertFalse(run_file_mock.called) self.assertTrue("Unknown script type" in ret['stdout']) #By default returns empty dictionary self.assertEqual(ret['structuredOut'], '{}') pass @patch("ambari_commons.shell.kill_process_with_children") @patch.object(CustomServiceOrchestrator, "resolve_script_path") @patch.object(CustomServiceOrchestrator, "resolve_hook_script_path") @patch.object(FileCache, "get_host_scripts_base_dir") @patch.object(FileCache, "get_service_base_dir") @patch.object(FileCache, "get_hook_base_dir") @patch.object(CustomServiceOrchestrator, "dump_command_to_json") @patch.object(PythonExecutor, "run_file") @patch.object(FileCache, "__init__") def test_cancel_command( self, FileCache_mock, run_file_mock, dump_command_to_json_mock, get_hook_base_dir_mock, get_service_base_dir_mock, get_host_scripts_base_dir_mock, resolve_hook_script_path_mock, resolve_script_path_mock, kill_process_with_children_mock): FileCache_mock.return_value = None command = { 'role': 'REGION_SERVER', 'hostLevelParams': { 'stack_name': 'HDP', 'stack_version': '2.0.7', 'jdk_location': 'some_location' }, 'commandParams': { 'script_type': 'PYTHON', 'script': 'scripts/hbase_regionserver.py', 'command_timeout': '600', 'service_package_folder': 'HBASE' }, 'taskId': '3', 'roleCommand': 'INSTALL' } get_host_scripts_base_dir_mock.return_value = "/host_scripts" get_service_base_dir_mock.return_value = "/basedir/" resolve_script_path_mock.return_value = "/basedir/scriptpath" resolve_hook_script_path_mock.return_value = \ ('/hooks_dir/prefix-command/scripts/hook.py', '/hooks_dir/prefix-command') dummy_controller = MagicMock() orchestrator = CustomServiceOrchestrator(self.config, dummy_controller) unix_process_id = 111 orchestrator.commands_in_progress = { command['taskId']: unix_process_id } get_hook_base_dir_mock.return_value = "/hooks/" run_file_mock_return_value = { 'stdout': 'killed', 'stderr': 'killed', 'exitcode': 1, } def side_effect(*args, **kwargs): time.sleep(0.2) return run_file_mock_return_value run_file_mock.side_effect = side_effect _, out = tempfile.mkstemp() _, err = tempfile.mkstemp() pool = ThreadPool(processes=1) async_result = pool.apply_async(orchestrator.runCommand, (command, out, err)) time.sleep(0.1) orchestrator.cancel_command(command['taskId'], 'reason') ret = async_result.get() self.assertEqual(ret['exitcode'], 1) self.assertEquals(ret['stdout'], 'killed\nCommand aborted. Reason: \'reason\'') self.assertEquals(ret['stderr'], 'killed\nCommand aborted. Reason: \'reason\'') self.assertTrue(kill_process_with_children_mock.called) self.assertFalse( command['taskId'] in orchestrator.commands_in_progress.keys()) self.assertTrue(os.path.exists(out)) self.assertTrue(os.path.exists(err)) try: os.remove(out) os.remove(err) except: pass @patch.object(OSCheck, "os_distribution", new=MagicMock(return_value=os_distro_value)) @patch.object(CustomServiceOrchestrator, "get_py_executor") @patch("ambari_commons.shell.kill_process_with_children") @patch.object(FileCache, "__init__") @patch.object(CustomServiceOrchestrator, "resolve_script_path") @patch.object(CustomServiceOrchestrator, "resolve_hook_script_path") def test_cancel_backgound_command(self, resolve_hook_script_path_mock, resolve_script_path_mock, FileCache_mock, kill_process_with_children_mock, get_py_executor_mock): FileCache_mock.return_value = None FileCache_mock.cache_dir = MagicMock() resolve_hook_script_path_mock.return_value = None dummy_controller = MagicMock() cfg = AmbariConfig() cfg.set('agent', 'tolerate_download_failures', 'true') cfg.set('agent', 'prefix', '.') cfg.set('agent', 'cache_dir', 'background_tasks') actionQueue = ActionQueue(cfg, dummy_controller) dummy_controller.actionQueue = actionQueue orchestrator = CustomServiceOrchestrator(cfg, dummy_controller) orchestrator.file_cache = MagicMock() def f(a, b): return "" orchestrator.file_cache.get_service_base_dir = f actionQueue.customServiceOrchestrator = orchestrator import TestActionQueue import copy pyex = PythonExecutor(actionQueue.customServiceOrchestrator.tmp_dir, actionQueue.customServiceOrchestrator.config) TestActionQueue.patch_output_file(pyex) pyex.prepare_process_result = MagicMock() get_py_executor_mock.return_value = pyex orchestrator.dump_command_to_json = MagicMock() lock = threading.RLock() complete_done = threading.Condition(lock) complete_was_called = {} def command_complete_w(process_condenced_result, handle): with lock: complete_was_called['visited'] = '' complete_done.wait(3) actionQueue.on_background_command_complete_callback = TestActionQueue.wraped( actionQueue.on_background_command_complete_callback, command_complete_w, None) execute_command = copy.deepcopy( TestActionQueue.TestActionQueue.background_command) actionQueue.put([execute_command]) actionQueue.processBackgroundQueueSafeEmpty() time.sleep(.1) orchestrator.cancel_command(19, 'reason') self.assertTrue(kill_process_with_children_mock.called) kill_process_with_children_mock.assert_called_with(33) with lock: complete_done.notifyAll() with lock: self.assertTrue(complete_was_called.has_key('visited')) time.sleep(.1) runningCommand = actionQueue.commandStatuses.get_command_status(19) self.assertTrue(runningCommand is not None) self.assertEqual(runningCommand['status'], ActionQueue.FAILED_STATUS) @patch.object(AmbariConfig, "get") @patch.object(CustomServiceOrchestrator, "dump_command_to_json") @patch.object(PythonExecutor, "run_file") @patch.object(FileCache, "__init__") @patch.object(FileCache, "get_custom_actions_base_dir") def test_runCommand_custom_action(self, get_custom_actions_base_dir_mock, FileCache_mock, run_file_mock, dump_command_to_json_mock, ambari_config_get): ambari_config_get.return_value = "0" FileCache_mock.return_value = None get_custom_actions_base_dir_mock.return_value = "some path" _, script = tempfile.mkstemp() command = { 'role': 'any', 'commandParams': { 'script_type': 'PYTHON', 'script': 'some_custom_action.py', 'command_timeout': '600', 'jdk_location': 'some_location' }, 'taskId': '3', 'roleCommand': 'ACTIONEXECUTE' } dummy_controller = MagicMock() orchestrator = CustomServiceOrchestrator(self.config, dummy_controller) unix_process_id = 111 orchestrator.commands_in_progress = { command['taskId']: unix_process_id } # normal run case run_file_mock.return_value = { 'stdout': 'sss', 'stderr': 'eee', 'exitcode': 0, } ret = orchestrator.runCommand(command, "out.txt", "err.txt") self.assertEqual(ret['exitcode'], 0) self.assertTrue(run_file_mock.called) # Hoooks are not supported for custom actions, # that's why run_file() should be called only once self.assertEqual(run_file_mock.call_count, 1) @patch("os.path.isfile") @patch.object(FileCache, "__init__") def test_resolve_hook_script_path(self, FileCache_mock, isfile_mock): FileCache_mock.return_value = None dummy_controller = MagicMock() orchestrator = CustomServiceOrchestrator(self.config, dummy_controller) # Testing None param res1 = orchestrator.resolve_hook_script_path(None, "prefix", "command", "script_type") self.assertEqual(res1, None) # Testing existing hook script isfile_mock.return_value = True res2 = orchestrator.resolve_hook_script_path("hooks_dir", "prefix", "command", "script_type") self.assertEqual( res2, (os.path.join('hooks_dir', 'prefix-command', 'scripts', 'hook.py'), os.path.join('hooks_dir', 'prefix-command'))) # Testing not existing hook script isfile_mock.return_value = False res3 = orchestrator.resolve_hook_script_path("hooks_dir", "prefix", "command", "script_type") self.assertEqual(res3, None) @patch.object(CustomServiceOrchestrator, "runCommand") @patch.object(FileCache, "__init__") def test_requestComponentStatus(self, FileCache_mock, runCommand_mock): FileCache_mock.return_value = None status_command = { "serviceName": 'HDFS', "commandType": "STATUS_COMMAND", "clusterName": "", "componentName": "DATANODE", 'configurations': {} } dummy_controller = MagicMock() orchestrator = CustomServiceOrchestrator(self.config, dummy_controller) # Test alive case runCommand_mock.return_value = {"exitcode": 0} status = orchestrator.requestComponentStatus(status_command) self.assertEqual(runCommand_mock.return_value, status) # Test dead case runCommand_mock.return_value = {"exitcode": 1} status = orchestrator.requestComponentStatus(status_command) self.assertEqual(runCommand_mock.return_value, status) @patch.object(CustomServiceOrchestrator, "get_py_executor") @patch.object(CustomServiceOrchestrator, "dump_command_to_json") @patch.object(FileCache, "__init__") @patch.object(FileCache, "get_custom_actions_base_dir") def test_runCommand_background_action(self, get_custom_actions_base_dir_mock, FileCache_mock, dump_command_to_json_mock, get_py_executor_mock): FileCache_mock.return_value = None get_custom_actions_base_dir_mock.return_value = "some path" _, script = tempfile.mkstemp() command = { 'role': 'any', 'commandParams': { 'script_type': 'PYTHON', 'script': 'some_custom_action.py', 'command_timeout': '600', 'jdk_location': 'some_location' }, 'taskId': '13', 'roleCommand': 'ACTIONEXECUTE', 'commandType': 'BACKGROUND_EXECUTION_COMMAND', '__handle': BackgroundCommandExecutionHandle({'taskId': '13'}, 13, MagicMock(), MagicMock()) } dummy_controller = MagicMock() orchestrator = CustomServiceOrchestrator(self.config, dummy_controller) import TestActionQueue pyex = PythonExecutor(orchestrator.tmp_dir, orchestrator.config) TestActionQueue.patch_output_file(pyex) pyex.condenseOutput = MagicMock() get_py_executor_mock.return_value = pyex orchestrator.dump_command_to_json = MagicMock() ret = orchestrator.runCommand(command, "out.txt", "err.txt") self.assertEqual(ret['exitcode'], 777) def tearDown(self): # enable stdout sys.stdout = sys.__stdout__
def test_dump_command_to_json(self, FileCache_mock, unlink_mock, isfile_mock, hostname_mock, decompress_cluster_host_info_mock): FileCache_mock.return_value = None hostname_mock.return_value = "test.hst" command = { 'commandType': 'EXECUTION_COMMAND', 'role': u'DATANODE', 'roleCommand': u'INSTALL', 'commandId': '1-1', 'taskId': 3, 'clusterName': u'cc', 'serviceName': u'HDFS', 'configurations': { 'global': {} }, 'configurationTags': { 'global': { 'tag': 'v1' } }, 'clusterHostInfo': { 'namenode_host': ['1'], 'slave_hosts': ['0', '1'], 'all_hosts': ['h1.hortonworks.com', 'h2.hortonworks.com'], 'all_ping_ports': ['8670:0,1'] }, 'hostLevelParams': {} } decompress_cluster_host_info_mock.return_value = { 'namenode_host': ['h2.hortonworks.com'], 'slave_hosts': ['h1.hortonworks.com', 'h2.hortonworks.com'], 'all_hosts': ['h1.hortonworks.com', 'h2.hortonworks.com'], 'all_ping_ports': ['8670', '8670'] } config = AmbariConfig().getConfig() tempdir = tempfile.gettempdir() config.set('agent', 'prefix', tempdir) dummy_controller = MagicMock() orchestrator = CustomServiceOrchestrator(config, dummy_controller) isfile_mock.return_value = True # Test dumping EXECUTION_COMMAND json_file = orchestrator.dump_command_to_json(command) self.assertTrue(os.path.exists(json_file)) self.assertTrue(os.path.getsize(json_file) > 0) if get_platform() != PLATFORM_WINDOWS: self.assertEqual(oct(os.stat(json_file).st_mode & 0777), '0600') self.assertTrue(json_file.endswith("command-3.json")) self.assertTrue(decompress_cluster_host_info_mock.called) os.unlink(json_file) # Test dumping STATUS_COMMAND command['commandType'] = 'STATUS_COMMAND' decompress_cluster_host_info_mock.reset_mock() json_file = orchestrator.dump_command_to_json(command) self.assertTrue(os.path.exists(json_file)) self.assertTrue(os.path.getsize(json_file) > 0) if get_platform() != PLATFORM_WINDOWS: self.assertEqual(oct(os.stat(json_file).st_mode & 0777), '0600') self.assertTrue(json_file.endswith("status_command.json")) self.assertFalse(decompress_cluster_host_info_mock.called) os.unlink(json_file) # Testing side effect of dump_command_to_json self.assertEquals(command['public_hostname'], "test.hst") self.assertTrue(unlink_mock.called)
def test_execute_command(self, runCommand_mock, status_update_callback_mock, open_mock): # Make file read calls visible def open_side_effect(file, mode): if mode == 'r': file_mock = MagicMock() file_mock.read.return_value = "Read from " + str(file) return file_mock else: return self.original_open(file, mode) open_mock.side_effect = open_side_effect config = AmbariConfig().getConfig() tempdir = tempfile.gettempdir() config.set('agent', 'prefix', tempdir) config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache") config.set('agent', 'tolerate_download_failures', "true") dummy_controller = MagicMock() actionQueue = ActionQueue(config, dummy_controller) unfreeze_flag = threading.Event() python_execution_result_dict = { 'stdout': 'out', 'stderr': 'stderr', 'structuredOut' : '' } def side_effect(command, tmpoutfile, tmperrfile): unfreeze_flag.wait() return python_execution_result_dict def patched_aq_execute_command(command): # We have to perform patching for separate thread in the same thread # with patch.object(CustomServiceOrchestrator, "runCommand") as runCommand_mock: runCommand_mock.side_effect = side_effect actionQueue.execute_command(command) ### Test install/start/stop command ### ## Test successful execution with configuration tags python_execution_result_dict['status'] = 'COMPLETE' python_execution_result_dict['exitcode'] = 0 # We call method in a separate thread execution_thread = Thread(target = patched_aq_execute_command , args = (self.datanode_install_command, )) execution_thread.start() # check in progress report # wait until ready while True: time.sleep(0.1) report = actionQueue.result() if len(report['reports']) != 0: break expected = {'status': 'IN_PROGRESS', 'stderr': 'Read from {0}/errors-3.txt'.format(tempdir), 'stdout': 'Read from {0}/output-3.txt'.format(tempdir), 'structuredOut' : 'Read from {0}/structured-out-3.json'.format(tempdir), 'clusterName': u'cc', 'roleCommand': u'INSTALL', 'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1', 'taskId': 3, 'exitCode': 777} self.assertEqual(report['reports'][0], expected) # Continue command execution unfreeze_flag.set() # wait until ready while report['reports'][0]['status'] == 'IN_PROGRESS': time.sleep(0.1) report = actionQueue.result() # check report configname = os.path.join(tempdir, 'config.json') expected = {'status': 'COMPLETED', 'stderr': 'stderr', 'stdout': 'out', 'clusterName': u'cc', 'structuredOut': '""', 'roleCommand': u'INSTALL', 'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1', 'taskId': 3, 'configurationTags': {'global': {'tag': 'v1'}}, 'exitCode': 0} self.assertEqual(len(report['reports']), 1) self.assertEqual(report['reports'][0], expected) self.assertTrue(os.path.isfile(configname)) # Check that we had 2 status update calls ( IN_PROGRESS and COMPLETE) self.assertEqual(status_update_callback_mock.call_count, 2) os.remove(configname) # now should not have reports (read complete/failed reports are deleted) report = actionQueue.result() self.assertEqual(len(report['reports']), 0) ## Test failed execution python_execution_result_dict['status'] = 'FAILED' python_execution_result_dict['exitcode'] = 13 # We call method in a separate thread execution_thread = Thread(target = patched_aq_execute_command , args = (self.datanode_install_command, )) execution_thread.start() unfreeze_flag.set() # check in progress report # wait until ready report = actionQueue.result() while len(report['reports']) == 0 or \ report['reports'][0]['status'] == 'IN_PROGRESS': time.sleep(0.1) report = actionQueue.result() # check report expected = {'status': 'FAILED', 'stderr': 'stderr', 'stdout': 'out', 'clusterName': u'cc', 'structuredOut': '""', 'roleCommand': u'INSTALL', 'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1', 'taskId': 3, 'exitCode': 13} self.assertEqual(len(report['reports']), 1) self.assertEqual(report['reports'][0], expected) # now should not have reports (read complete/failed reports are deleted) report = actionQueue.result() self.assertEqual(len(report['reports']), 0) ### Test upgrade command ### python_execution_result_dict['status'] = 'COMPLETE' python_execution_result_dict['exitcode'] = 0 execution_thread = Thread(target = patched_aq_execute_command , args = (self.datanode_upgrade_command, )) execution_thread.start() unfreeze_flag.set() # wait until ready report = actionQueue.result() while len(report['reports']) == 0 or \ report['reports'][0]['status'] == 'IN_PROGRESS': time.sleep(0.1) report = actionQueue.result() # check report expected = {'status': 'COMPLETED', 'stderr': 'stderr', 'stdout': 'out', 'clusterName': 'clusterName', 'structuredOut': '""', 'roleCommand': 'UPGRADE', 'serviceName': 'serviceName', 'role': 'role', 'actionId': 17, 'taskId': 'taskId', 'exitCode': 0} self.assertEqual(len(report['reports']), 1) self.assertEqual(report['reports'][0], expected) # now should not have reports (read complete/failed reports are deleted) report = actionQueue.result() self.assertEqual(len(report['reports']), 0)
def test_command_in_progress(self): config = AmbariConfig().getConfig() tmpfile = tempfile.gettempdir() config.set('agent', 'prefix', tmpfile) actionQueue = ActionQueue(config) actionQueue.IDLE_SLEEP_TIME = 0.01 executor_started_event = threading.Event() end_executor_event = threading.Event() actionQueue.puppetExecutor = FakeExecutor(executor_started_event, end_executor_event) before_start_result = actionQueue.result() command = { 'commandId': 17, 'role': "role", 'taskId': "taskId", 'clusterName': "clusterName", 'serviceName': "serviceName", 'status': 'IN_PROGRESS', 'hostname': "localhost.localdomain", 'hostLevelParams': "hostLevelParams", 'clusterHostInfo': "clusterHostInfo", 'roleCommand': "roleCommand", 'configurations': "configurations", 'commandType': "EXECUTION_COMMAND", 'configurations': { 'global': {} } } actionQueue.put(command) actionQueue.start() executor_started_event.wait() #print ("ii: " + pprint.pformat(actionQueue.commandInProgress)) in_progress_result = actionQueue.result() end_executor_event.set() actionQueue.stop() actionQueue.join() after_start_result = actionQueue.result() self.assertEquals(len(before_start_result['componentStatus']), 0) self.assertEquals(len(before_start_result['reports']), 0) self.assertEquals(len(in_progress_result['componentStatus']), 0) self.assertEquals(len(in_progress_result['reports']), 1) self.assertEquals(in_progress_result['reports'][0]['status'], "IN_PROGRESS") self.assertEquals(in_progress_result['reports'][0]['stdout'], "Dummy output") self.assertEquals(in_progress_result['reports'][0]['exitCode'], 777) self.assertEquals(in_progress_result['reports'][0]['stderr'], 'Dummy err') self.assertEquals(len(after_start_result['componentStatus']), 0) self.assertEquals(len(after_start_result['reports']), 1) self.assertEquals(after_start_result['reports'][0]['status'], "COMPLETED") self.assertEquals(after_start_result['reports'][0]['stdout'], "returned stdout") self.assertEquals(after_start_result['reports'][0]['exitCode'], 0) self.assertEquals(after_start_result['reports'][0]['stderr'], 'returned stderr')