class TestManifestGenerator(TestCase): def setUp(self): # disable stdout out = StringIO.StringIO() sys.stdout = out self.dir = tempfile.mkdtemp() self.config = AmbariConfig() jsonCommand = file('../../main/python/ambari_agent/test.json').read() self.parsedJson = json.loads(jsonCommand) def tearDown(self): shutil.rmtree(self.dir) # enable stdout sys.stdout = sys.__stdout__ def testWriteImports(self): tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1] print tmpFileName tmpFile = file(tmpFileName, 'r+') manifestGenerator.writeImports(tmpFile, '../../main/puppet/modules', self.config.getImports()) tmpFile.seek(0) print tmpFile.read() tmpFile.close() pass @patch.object(manifestGenerator, 'writeImports') @patch.object(manifestGenerator, 'writeNodes') @patch.object(manifestGenerator, 'writeParams') @patch.object(manifestGenerator, 'writeTasks') def testGenerateManifest(self, writeTasksMock, writeParamsMock, writeNodesMock, writeImportsMock): tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1] manifestGenerator.generateManifest(self.parsedJson, tmpFileName, '../../main/puppet/modules', self.config.getConfig()) self.assertTrue(writeParamsMock.called) self.assertTrue(writeNodesMock.called) self.assertTrue(writeImportsMock.called) self.assertTrue(writeTasksMock.called) print file(tmpFileName).read() pass
def test_build(self): testsPath = os.path.dirname(os.path.realpath(__file__)) dictPath = testsPath + os.sep + '..' + os.sep + '..' + os.sep + 'main' + os.sep + 'python' + os.sep + 'ambari_agent' + os.sep + 'servicesToPidNames.dict' AmbariConfig.config.set('services', 'serviceToPidMapFile', dictPath) actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig()) heartbeat = Heartbeat(actionQueue) result = heartbeat.build(100)
def test_no_mapping(self, register_mock, result_mock): result_mock.return_value = { 'reports': [{ 'status': 'IN_PROGRESS', 'stderr': 'Read from /tmp/errors-3.txt', 'stdout': 'Read from /tmp/output-3.txt', 'clusterName': u'cc', 'roleCommand': u'INSTALL', 'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1', 'taskId': 3, 'exitCode': 777 }], 'componentStatus': [{ 'status': 'HEALTHY', 'componentName': 'NAMENODE' }] } actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig(), 'dummy_controller') heartbeat = Heartbeat(actionQueue) hb = heartbeat.build(id=10, state_interval=1, componentsMapped=True) self.assertEqual(register_mock.call_args_list[0][0][1], True) register_mock.reset_mock() hb = heartbeat.build(id=0, state_interval=1, componentsMapped=True) self.assertEqual(register_mock.call_args_list[0][0][1], False)
def test_heartbeat_host_check_no_cmd(self, register_mock): actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig()) heartbeat = Heartbeat(actionQueue) heartbeat.build(12, 6) self.assertTrue(register_mock.called) args, kwargs = register_mock.call_args_list[0] self.assertFalse(args[1]) self.assertFalse(args[2])
def setUp(self): self.tmpdir = tempfile.mkdtemp() config = AmbariConfig.AmbariConfig() config.set('server', 'hostname', 'example.com') config.set('server', 'url_port', '777') config.set('security', 'keysdir', self.tmpdir) config.set('security', 'server_crt', 'ca.crt') server_hostname = config.get('server', 'hostname') self.certMan = CertificateManager(config, server_hostname)
def test_config(self): """ Verify that if the config does not have a property, default values are used. """ DataCleaner.logger.reset_mock() config = AmbariConfig.AmbariConfig() config.remove_option('agent', 'data_cleanup_max_age') config.remove_option('agent', 'data_cleanup_interval') config.remove_option('agent', 'data_cleanup_max_size_MB') cleaner = DataCleaner.DataCleaner(config) self.assertEqual(cleaner.file_max_age, 86400) self.assertEqual(cleaner.cleanup_interval, 3600) self.assertEqual(cleaner.cleanup_max_size_MB, 10000)
def test_heartbeat_no_host_check_cmd_in_queue(self, register_mock): actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig()) statusCommand = { "serviceName": 'HDFS', "commandType": "STATUS_COMMAND", "clusterName": "", "componentName": "DATANODE", 'configurations': { 'global': {} } } actionQueue.commandQueue.put(statusCommand) heartbeat = Heartbeat(actionQueue) heartbeat.build(12, 6) self.assertTrue(register_mock.called) args, kwargs = register_mock.call_args_list[0] self.assertTrue(args[2]) self.assertFalse(args[1])
def test_status_commands_does_not_stack_up(self, register_mock, Popen_mock): config = AmbariConfig.AmbariConfig() config.set('agent', 'prefix', 'tmp') config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache") config.set('agent', 'tolerate_download_failures', "true") dummy_controller = MagicMock() actionQueue = ActionQueue(config, dummy_controller) dummy_controller.statusCommandsExecutor = SingleProcessStatusCommandsExecutor( config, actionQueue) statusCommands = [{ "serviceName": 'HDFS', "commandType": "STATUS_COMMAND", "clusterName": "c1", "componentName": "DATANODE", "role": "DATANODE", 'configurations': { 'cluster-env': {} } }, { "serviceName": 'HDFS', "commandType": "STATUS_COMMAND", "clusterName": "c1", "componentName": "NAMENODE", "role": "NAMENODE", 'configurations': { 'cluster-env': {} } }] # add commands ten times for i in range(10): actionQueue.put_status(statusCommands) # status commands should not stack up. Size should be 2 not 20. self.assertEquals( len(dummy_controller.statusCommandsExecutor.statusCommandQueue. queue), 2)
def test_heartbeat_retries(self): netutil = NetUtil() netutil.HEARTBEAT_IDDLE_INTERVAL_SEC = 0.05 netutil.HEARTBEAT_NOT_IDDLE_INTERVAL_SEC = 0.05 #building heartbeat object testsPath = os.path.dirname(os.path.realpath(__file__)) dictPath = testsPath + os.sep + '..' + os.sep + '..' + os.sep + 'main' + os.sep + 'python' + os.sep + 'ambari_agent' + os.sep + 'servicesToPidNames.dict' AmbariConfig.config.set('services', 'serviceToPidMapFile', dictPath) actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig()) heartbeat = Heartbeat(actionQueue) # testing controller with our heartbeat and wrong url controller = Controller(AmbariConfig.config) controller.heartbeat = heartbeat controller.heartbeatUrl = BAD_URL controller.actionQueue = actionQueue controller.logger = self.logger controller.netutil = netutil thread = Thread(target=controller.heartbeatWithServer) thread.start() time.sleep(1) # I have to stop the thread anyway, so I'll check results later threadWasAlive = thread.isAlive() successfull_heartbits0 = controller.DEBUG_SUCCESSFULL_HEARTBEATS heartbeat_retries0 = controller.DEBUG_HEARTBEAT_RETRIES # Stopping thread controller.DEBUG_STOP_HEARTBITTING = True time.sleep(1) # Checking results before thread stop self.assertEquals(threadWasAlive, True, "Heartbeat should be alive now") self.assertEquals(successfull_heartbits0, 0, "Heartbeat should not have any success") self.assertEquals(heartbeat_retries0 > 1, True, "Heartbeat should retry connecting") # Checking results after thread stop self.assertEquals(thread.isAlive(), False, "Heartbeat should stop now") self.assertEquals(controller.DEBUG_SUCCESSFULL_HEARTBEATS, 0, "Heartbeat should not have any success")
def test_heartbeat_no_host_check_cmd_in_progress(self, register_mock): actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig()) actionQueue.commandInProgress = { 'role': "role", 'actionId': "actionId", 'taskId': "taskId", 'stdout': "stdout", 'clusterName': "clusterName", 'stderr': 'none', 'exitCode': 777, 'serviceName': "serviceName", 'status': 'IN_PROGRESS', 'configurations': { 'global': {} }, 'roleCommand': 'START' } heartbeat = Heartbeat(actionQueue) heartbeat.build(12, 6) self.assertTrue(register_mock.called) args, kwargs = register_mock.call_args_list[0] self.assertTrue(args[2]) self.assertFalse(args[1])
class TestManifestGenerator(TestCase): def setUp(self): # disable stdout out = StringIO.StringIO() sys.stdout = out self.dir = tempfile.mkdtemp() self.config = AmbariConfig() jsonCommand = file('../../main/python/ambari_agent/test.json').read() self.parsedJson = json.loads(jsonCommand) def tearDown(self): shutil.rmtree(self.dir) # enable stdout sys.stdout = sys.__stdout__ def testWriteImports(self): tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1] print tmpFileName tmpFile = file(tmpFileName, 'r+') manifestGenerator.writeImports(tmpFile, '../../main/puppet/modules', self.config.getImports()) tmpFile.seek(0) print tmpFile.read() tmpFile.close() pass @patch.object(manifestGenerator, 'writeImports') @patch.object(manifestGenerator, 'writeNodes') @patch.object(manifestGenerator, 'writeParams') @patch.object(manifestGenerator, 'writeTasks') def testGenerateManifest(self, writeTasksMock, writeParamsMock, writeNodesMock, writeImportsMock): tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1] self.parsedJson['roleParams'] = 'role param' manifestGenerator.generateManifest(self.parsedJson, tmpFileName, '../../main/puppet/modules', self.config.getConfig()) self.assertTrue(writeParamsMock.called) self.assertTrue(writeNodesMock.called) self.assertTrue(writeImportsMock.called) self.assertTrue(writeTasksMock.called) print file(tmpFileName).read() def raiseTypeError(): raise TypeError() writeNodesMock.side_effect = raiseTypeError manifestGenerator.generateManifest(self.parsedJson, tmpFileName, '../../main/puppet/modules', self.config.getConfig()) pass def testEscape(self): shouldBe = '\\\'\\\\' result = manifestGenerator.escape('\'\\') self.assertEqual(result, shouldBe) def test_writeNodes(self): tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1] tmpFile = file(tmpFileName, 'r+') clusterHostInfo = self.parsedJson['clusterHostInfo'] clusterHostInfo['zookeeper_hosts'] = [ "h1.hortonworks.com", "h2.hortonworks.com" ] manifestGenerator.writeNodes(tmpFile, clusterHostInfo) tmpFile.seek(0) print tmpFile.read() tmpFile.close() os.remove(tmpFileName) def test_writeNodes_failed(self): tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1] tmpFile = file(tmpFileName, 'r+') clusterHostInfo = self.parsedJson['clusterHostInfo'] clusterHostInfo.update({u'ZOOKEEPER': [None]}) clusterHostInfo['zookeeper_hosts'] = [ "h1.hortonworks.com", "h2.hortonworks.com" ] self.assertRaises(TypeError, manifestGenerator.writeNodes, tmpFile, clusterHostInfo) tmpFile.seek(0) print tmpFile.read() tmpFile.close() os.remove(tmpFileName) def test_writeHostAttributes(self): tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1] tmpFile = file(tmpFileName, 'r+') hostAttributes = {'HostAttr1': '1', 'HostAttr2': '2'} manifestGenerator.writeHostAttributes(tmpFile, hostAttributes) tmpFile.seek(0) print tmpFile.read() tmpFile.close() os.remove(tmpFileName) def test_writeTasks(self): tmpFileName = tempfile.mkstemp(dir=self.dir, text=True)[1] tmpFile = file(tmpFileName, 'r+') roles = [{ 'role': 'ZOOKEEPER_SERVER', 'cmd': 'NONE', 'roleParams': { 'someRoleParams': '-x' } }] clusterHostInfo = self.parsedJson['clusterHostInfo'] clusterHostInfo['zookeeper_hosts'] = [ "h1.hortonworks.com", "h2.hortonworks.com" ] manifestGenerator.writeTasks(tmpFile, roles, self.config, clusterHostInfo, "h1.hortonworks.com") tmpFile.seek(0) print tmpFile.read() tmpFile.close() os.remove(tmpFileName)
def test_build_long_result(self, result_mock): actionQueue = ActionQueue(AmbariConfig.AmbariConfig().getConfig(), 'dummy_controller') result_mock.return_value = { 'reports': [{ 'status': 'IN_PROGRESS', 'stderr': 'Read from /tmp/errors-3.txt', 'stdout': 'Read from /tmp/output-3.txt', 'clusterName': u'cc', 'roleCommand': u'INSTALL', 'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1', 'taskId': 3, 'exitCode': 777 }, { 'status': 'COMPLETED', 'stderr': 'stderr', 'stdout': 'out', 'clusterName': 'clusterName', 'roleCommand': 'UPGRADE', 'serviceName': 'serviceName', 'role': 'role', 'actionId': 17, 'taskId': 'taskId', 'exitCode': 0 }, { 'status': 'FAILED', 'stderr': 'stderr', 'stdout': 'out', 'clusterName': u'cc', 'roleCommand': u'INSTALL', 'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1', 'taskId': 3, 'exitCode': 13 }, { 'status': 'COMPLETED', 'stderr': 'stderr', 'stdout': 'out', 'clusterName': u'cc', 'configurationTags': { 'global': { 'tag': 'v1' } }, 'roleCommand': u'INSTALL', 'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1', 'taskId': 3, 'exitCode': 0 }], 'componentStatus': [ { 'status': 'HEALTHY', 'componentName': 'DATANODE' }, { 'status': 'UNHEALTHY', 'componentName': 'NAMENODE' }, ], } heartbeat = Heartbeat(actionQueue) hb = heartbeat.build(10) hb['hostname'] = 'hostname' hb['timestamp'] = 'timestamp' expected = { 'nodeStatus': { 'status': 'HEALTHY', 'cause': 'NONE' }, 'timestamp': 'timestamp', 'hostname': 'hostname', 'responseId': 10, 'reports': [{ 'status': 'IN_PROGRESS', 'roleCommand': u'INSTALL', 'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1', 'stderr': 'Read from /tmp/errors-3.txt', 'stdout': 'Read from /tmp/output-3.txt', 'clusterName': u'cc', 'taskId': 3, 'exitCode': 777 }, { 'status': 'COMPLETED', 'roleCommand': 'UPGRADE', 'serviceName': 'serviceName', 'role': 'role', 'actionId': 17, 'stderr': 'stderr', 'stdout': 'out', 'clusterName': 'clusterName', 'taskId': 'taskId', 'exitCode': 0 }, { 'status': 'FAILED', 'roleCommand': u'INSTALL', 'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1', 'stderr': 'stderr', 'stdout': 'out', 'clusterName': u'cc', 'taskId': 3, 'exitCode': 13 }, { 'status': 'COMPLETED', 'stdout': 'out', 'configurationTags': { 'global': { 'tag': 'v1' } }, 'taskId': 3, 'exitCode': 0, 'roleCommand': u'INSTALL', 'clusterName': u'cc', 'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1', 'stderr': 'stderr' }], 'componentStatus': [{ 'status': 'HEALTHY', 'componentName': 'DATANODE' }, { 'status': 'UNHEALTHY', 'componentName': 'NAMENODE' }] } self.assertEquals(hb, expected)