示例#1
0
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import os
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
import datetime, sys, socket
import resource_management.libraries.functions


@patch.object(resource_management.libraries.functions,
              "get_unique_id_and_date",
              new=MagicMock(return_value=''))
@patch("socket.socket")
@patch("time.time", new=MagicMock(return_value=1431110511.43))
class TestServiceCheck(RMFTestCase):
    COMMON_SERVICES_PACKAGE_DIR = "HIVE/0.12.0.2.0/package"
    STACK_VERSION = "2.0.6"

    def test_service_check_default(self, socket_mock):

        self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR +
                           "/scripts/service_check.py",
                           classname="HiveServiceCheck",
                           command="service_check",
                           config_file="default.json",
                           hdp_stack_version=self.STACK_VERSION,
                           target=RMFTestCase.TARGET_COMMON_SERVICES)
 def accesszone_module_mock(self, mocker):
     mocker.patch(MockAccessZoneApi.MODULE_UTILS_PATH + '.ApiException',
                  new=MockApiException)
     az_module_mock = PowerScaleAccessZone()
     az_module_mock.module = MagicMock()
     return az_module_mock
示例#3
0
from ambari_commons import inet_utils, OSCheck
from resource_management import Script, ConfigDictionary
from mock.mock import patch
from mock.mock import MagicMock
from unittest import TestCase

from check_host import CheckHost

from only_for_platform import get_platform, not_for_platform, only_for_platform, os_distro_value, PLATFORM_WINDOWS

from ambari_agent.HostCheckReportFileHandler import HostCheckReportFileHandler


@patch.object(HostCheckReportFileHandler,
              "writeHostChecksCustomActionsFile",
              new=MagicMock())
class TestCheckHost(TestCase):
    current_dir = os.path.dirname(os.path.realpath(__file__))

    @patch.object(OSCheck,
                  "os_distribution",
                  new=MagicMock(return_value=os_distro_value))
    @patch("os.path.isfile")
    @patch.object(Script, 'get_config')
    @patch.object(Script, 'get_tmp_dir')
    @patch("resource_management.libraries.script.Script.put_structured_out")
    def testJavaHomeAvailableCheck(self, structured_out_mock, get_tmp_dir_mock,
                                   mock_config, os_isfile_mock):
        # test, java home exists
        os_isfile_mock.return_value = True
        get_tmp_dir_mock.return_value = "/tmp"
示例#4
0
class TestActionQueue(TestCase):
    def setUp(self):
        # save original open() method for later use
        self.original_open = open

    def tearDown(self):
        sys.stdout = sys.__stdout__

    logger = logging.getLogger()

    datanode_install_command = {
        'commandType': 'EXECUTION_COMMAND',
        'role': u'DATANODE',
        'roleCommand': u'INSTALL',
        'commandId': '1-1',
        'taskId': 3,
        'clusterName': u'cc',
        'serviceName': u'HDFS',
        'hostLevelParams': {},
        'configurations': {
            'global': {}
        },
        'configurationTags': {
            'global': {
                'tag': 'v1'
            }
        }
    }

    datanode_auto_start_command = {
        'commandType': 'AUTO_EXECUTION_COMMAND',
        'role': u'DATANODE',
        'roleCommand': u'START',
        'commandId': '1-1',
        'taskId': 3,
        'clusterName': u'cc',
        'serviceName': u'HDFS',
        'hostLevelParams': {},
        'configurations': {
            'global': {}
        },
        'configurationTags': {
            'global': {
                'tag': 'v1'
            }
        }
    }

    datanode_upgrade_command = {
        'commandId': 17,
        'role': "role",
        'taskId': "taskId",
        'clusterName': "clusterName",
        'serviceName': "serviceName",
        'roleCommand': 'UPGRADE',
        'hostname': "localhost.localdomain",
        'hostLevelParams': {},
        'clusterHostInfo': "clusterHostInfo",
        'commandType': "EXECUTION_COMMAND",
        'configurations': {
            'global': {}
        },
        'roleParams': {},
        'commandParams': {
            'source_stack_version': 'HDP-1.2.1',
            'target_stack_version': 'HDP-1.3.0'
        }
    }

    namenode_install_command = {
        'commandType': 'EXECUTION_COMMAND',
        'role': u'NAMENODE',
        'roleCommand': u'INSTALL',
        'commandId': '1-1',
        'taskId': 4,
        'clusterName': u'cc',
        'serviceName': u'HDFS',
        'hostLevelParams': {}
    }

    snamenode_install_command = {
        'commandType': 'EXECUTION_COMMAND',
        'role': u'SECONDARY_NAMENODE',
        'roleCommand': u'INSTALL',
        'commandId': '1-1',
        'taskId': 5,
        'clusterName': u'cc',
        'serviceName': u'HDFS',
        'hostLevelParams': {}
    }

    hbase_install_command = {
        'commandType': 'EXECUTION_COMMAND',
        'role': u'HBASE',
        'roleCommand': u'INSTALL',
        'commandId': '1-1',
        'taskId': 7,
        'clusterName': u'cc',
        'serviceName': u'HDFS',
        'hostLevelParams': {}
    }

    status_command = {
        "serviceName": 'HDFS',
        "commandType": "STATUS_COMMAND",
        "clusterName": "",
        "componentName": "DATANODE",
        'configurations': {},
        'hostLevelParams': {}
    }

    datanode_restart_command = {
        'commandType': 'EXECUTION_COMMAND',
        'role': u'DATANODE',
        'roleCommand': u'CUSTOM_COMMAND',
        'commandId': '1-1',
        'taskId': 9,
        'clusterName': u'cc',
        'serviceName': u'HDFS',
        'configurations': {
            'global': {}
        },
        'configurationTags': {
            'global': {
                'tag': 'v123'
            }
        },
        'hostLevelParams': {
            'custom_command': 'RESTART',
            'clientsToUpdateConfigs': []
        }
    }

    datanode_restart_command_no_clients_update = {
        'commandType': 'EXECUTION_COMMAND',
        'role': u'DATANODE',
        'roleCommand': u'CUSTOM_COMMAND',
        'commandId': '1-1',
        'taskId': 9,
        'clusterName': u'cc',
        'serviceName': u'HDFS',
        'configurations': {
            'global': {}
        },
        'configurationTags': {
            'global': {
                'tag': 'v123'
            }
        },
        'hostLevelParams': {
            'custom_command': 'RESTART'
        }
    }

    status_command_for_alerts = {
        "serviceName": 'FLUME',
        "commandType": "STATUS_COMMAND",
        "clusterName": "",
        "componentName": "FLUME_HANDLER",
        'configurations': {},
        'hostLevelParams': {}
    }

    retryable_command = {
        'commandType': 'EXECUTION_COMMAND',
        'role': 'NAMENODE',
        'roleCommand': 'INSTALL',
        'commandId': '1-1',
        'taskId': 19,
        'clusterName': 'c1',
        'serviceName': 'HDFS',
        'configurations': {
            'global': {}
        },
        'configurationTags': {
            'global': {
                'tag': 'v123'
            }
        },
        'commandParams': {
            'script_type': 'PYTHON',
            'script': 'script.py',
            'command_timeout': '600',
            'jdk_location': '.',
            'service_package_folder': '.',
            'command_retry_enabled': 'true',
            'max_duration_for_retries': '5'
        },
        'hostLevelParams': {}
    }

    background_command = {
        'commandType': 'BACKGROUND_EXECUTION_COMMAND',
        'role': 'NAMENODE',
        'roleCommand': 'CUSTOM_COMMAND',
        'commandId': '1-1',
        'taskId': 19,
        'clusterName': 'c1',
        'serviceName': 'HDFS',
        'configurations': {
            'global': {}
        },
        'configurationTags': {
            'global': {
                'tag': 'v123'
            }
        },
        'hostLevelParams': {
            'custom_command': 'REBALANCE_HDFS'
        },
        'commandParams': {
            'script_type': 'PYTHON',
            'script': 'script.py',
            'command_timeout': '600',
            'jdk_location': '.',
            'service_package_folder': '.'
        }
    }
    cancel_background_command = {
        'commandType': 'EXECUTION_COMMAND',
        'role': 'NAMENODE',
        'roleCommand': 'ACTIONEXECUTE',
        'commandId': '1-1',
        'taskId': 20,
        'clusterName': 'c1',
        'serviceName': 'HDFS',
        'configurations': {
            'global': {}
        },
        'configurationTags': {
            'global': {}
        },
        'hostLevelParams': {},
        'commandParams': {
            'script_type': 'PYTHON',
            'script': 'cancel_background_task.py',
            'before_system_hook_function': 'fetch_bg_pid_by_taskid',
            'jdk_location': '.',
            'command_timeout': '600',
            'service_package_folder': '.',
            'cancel_policy': 'SIGKILL',
            'cancel_task_id': "19",
        }
    }

    @patch.object(AmbariConfig, "get_parallel_exec_option")
    @patch.object(ActionQueue, "process_command")
    @patch.object(Queue, "get")
    @patch.object(CustomServiceOrchestrator, "__init__")
    def test_ActionQueueStartStop(self, CustomServiceOrchestrator_mock,
                                  get_mock, process_command_mock,
                                  get_parallel_exec_option_mock):
        CustomServiceOrchestrator_mock.return_value = None
        dummy_controller = MagicMock()
        config = MagicMock()
        get_parallel_exec_option_mock.return_value = 0
        config.get_parallel_exec_option = get_parallel_exec_option_mock
        actionQueue = ActionQueue(config, dummy_controller)
        actionQueue.start()
        time.sleep(0.1)
        actionQueue.stop()
        actionQueue.join()
        self.assertEqual(actionQueue.stopped(), True,
                         'Action queue is not stopped.')
        self.assertTrue(process_command_mock.call_count > 1)

    @patch.object(OSCheck,
                  "os_distribution",
                  new=MagicMock(return_value=os_distro_value))
    @patch("traceback.print_exc")
    @patch.object(ActionQueue, "execute_command")
    @patch.object(ActionQueue, "execute_status_command")
    def test_process_command(self, execute_status_command_mock,
                             execute_command_mock, print_exc_mock):
        dummy_controller = MagicMock()
        config = AmbariConfig()
        config.set('agent', 'tolerate_download_failures', "true")
        actionQueue = ActionQueue(config, dummy_controller)
        execution_command = {
            'commandType': ActionQueue.EXECUTION_COMMAND,
        }
        status_command = {
            'commandType': ActionQueue.STATUS_COMMAND,
        }
        wrong_command = {
            'commandType': "SOME_WRONG_COMMAND",
        }
        # Try wrong command
        actionQueue.process_command(wrong_command)
        self.assertFalse(execute_command_mock.called)
        self.assertFalse(execute_status_command_mock.called)
        self.assertFalse(print_exc_mock.called)

        execute_command_mock.reset_mock()
        execute_status_command_mock.reset_mock()
        print_exc_mock.reset_mock()
        # Try normal execution
        actionQueue.process_command(execution_command)
        self.assertTrue(execute_command_mock.called)
        self.assertFalse(execute_status_command_mock.called)
        self.assertFalse(print_exc_mock.called)

        execute_command_mock.reset_mock()
        execute_status_command_mock.reset_mock()
        print_exc_mock.reset_mock()

        actionQueue.process_command(status_command)
        self.assertFalse(execute_command_mock.called)
        self.assertTrue(execute_status_command_mock.called)
        self.assertFalse(print_exc_mock.called)

        execute_command_mock.reset_mock()
        execute_status_command_mock.reset_mock()
        print_exc_mock.reset_mock()

        # Try exception to check proper logging
        def side_effect(self):
            raise Exception("TerribleException")

        execute_command_mock.side_effect = side_effect
        actionQueue.process_command(execution_command)
        self.assertTrue(print_exc_mock.called)

        print_exc_mock.reset_mock()

        execute_status_command_mock.side_effect = side_effect
        actionQueue.process_command(execution_command)
        self.assertTrue(print_exc_mock.called)

    @patch.object(OSCheck,
                  "os_distribution",
                  new=MagicMock(return_value=os_distro_value))
    @patch("__builtin__.open")
    @patch.object(ActionQueue, "status_update_callback")
    def test_auto_execute_command(self, status_update_callback_mock,
                                  open_mock):
        # Make file read calls visible
        def open_side_effect(file, mode):
            if mode == 'r':
                file_mock = MagicMock()
                file_mock.read.return_value = "Read from " + str(file)
                return file_mock
            else:
                return self.original_open(file, mode)

        open_mock.side_effect = open_side_effect

        config = AmbariConfig()
        tempdir = tempfile.gettempdir()
        config.set('agent', 'prefix', tempdir)
        config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
        config.set('agent', 'tolerate_download_failures', "true")
        dummy_controller = MagicMock()
        dummy_controller.recovery_manager = RecoveryManager(tempfile.mktemp())
        dummy_controller.recovery_manager.update_config(
            5, 5, 1, 11, True, False, "")

        actionQueue = ActionQueue(config, dummy_controller)
        unfreeze_flag = threading.Event()
        python_execution_result_dict = {
            'stdout': 'out',
            'stderr': 'stderr',
            'structuredOut': ''
        }

        def side_effect(command,
                        tmpoutfile,
                        tmperrfile,
                        override_output_files=True,
                        retry=False):
            unfreeze_flag.wait()
            return python_execution_result_dict

        def patched_aq_execute_command(command):
            # We have to perform patching for separate thread in the same thread
            with patch.object(CustomServiceOrchestrator,
                              "runCommand") as runCommand_mock:
                runCommand_mock.side_effect = side_effect
                actionQueue.process_command(command)

        python_execution_result_dict['status'] = 'COMPLETE'
        python_execution_result_dict['exitcode'] = 0
        self.assertFalse(actionQueue.tasks_in_progress_or_pending())
        # We call method in a separate thread
        execution_thread = Thread(target=patched_aq_execute_command,
                                  args=(self.datanode_auto_start_command, ))
        execution_thread.start()
        #  check in progress report
        # wait until ready
        while True:
            time.sleep(0.1)
            if actionQueue.tasks_in_progress_or_pending():
                break
        # Continue command execution
        unfreeze_flag.set()
        # wait until ready
        check_queue = True
        while check_queue:
            report = actionQueue.result()
            if not actionQueue.tasks_in_progress_or_pending():
                break
            time.sleep(0.1)

        self.assertEqual(len(report['reports']), 0)

        ## Test failed execution
        python_execution_result_dict['status'] = 'FAILED'
        python_execution_result_dict['exitcode'] = 13
        # We call method in a separate thread
        execution_thread = Thread(target=patched_aq_execute_command,
                                  args=(self.datanode_auto_start_command, ))
        execution_thread.start()
        unfreeze_flag.set()
        #  check in progress report
        # wait until ready
        while check_queue:
            report = actionQueue.result()
            if not actionQueue.tasks_in_progress_or_pending():
                break
            time.sleep(0.1)

        self.assertEqual(len(report['reports']), 0)

    @patch.object(OSCheck,
                  "os_distribution",
                  new=MagicMock(return_value=os_distro_value))
    @patch("__builtin__.open")
    @patch.object(ActionQueue, "status_update_callback")
    def test_execute_command(self, status_update_callback_mock, open_mock):
        # Make file read calls visible
        def open_side_effect(file, mode):
            if mode == 'r':
                file_mock = MagicMock()
                file_mock.read.return_value = "Read from " + str(file)
                return file_mock
            else:
                return self.original_open(file, mode)

        open_mock.side_effect = open_side_effect

        config = AmbariConfig()
        tempdir = tempfile.gettempdir()
        config.set('agent', 'prefix', tempdir)
        config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
        config.set('agent', 'tolerate_download_failures', "true")
        dummy_controller = MagicMock()
        actionQueue = ActionQueue(config, dummy_controller)
        unfreeze_flag = threading.Event()
        python_execution_result_dict = {
            'stdout': 'out',
            'stderr': 'stderr',
            'structuredOut': ''
        }

        def side_effect(command,
                        tmpoutfile,
                        tmperrfile,
                        override_output_files=True,
                        retry=False):
            unfreeze_flag.wait()
            return python_execution_result_dict

        def patched_aq_execute_command(command):
            # We have to perform patching for separate thread in the same thread
            with patch.object(CustomServiceOrchestrator,
                              "runCommand") as runCommand_mock:
                runCommand_mock.side_effect = side_effect
                actionQueue.execute_command(command)

        ### Test install/start/stop command ###
        ## Test successful execution with configuration tags
        python_execution_result_dict['status'] = 'COMPLETE'
        python_execution_result_dict['exitcode'] = 0
        # We call method in a separate thread
        execution_thread = Thread(target=patched_aq_execute_command,
                                  args=(self.datanode_install_command, ))
        execution_thread.start()
        #  check in progress report
        # wait until ready
        while True:
            time.sleep(0.1)
            report = actionQueue.result()
            if len(report['reports']) != 0:
                break
        expected = {
            'status':
            'IN_PROGRESS',
            'stderr':
            'Read from {0}'.format(os.path.join(tempdir, "errors-3.txt")),
            'stdout':
            'Read from {0}'.format(os.path.join(tempdir, "output-3.txt")),
            'structuredOut':
            'Read from {0}'.format(
                os.path.join(tempdir, "structured-out-3.json")),
            'clusterName':
            u'cc',
            'roleCommand':
            u'INSTALL',
            'serviceName':
            u'HDFS',
            'role':
            u'DATANODE',
            'actionId':
            '1-1',
            'taskId':
            3,
            'exitCode':
            777
        }
        self.assertEqual(report['reports'][0], expected)
        self.assertTrue(actionQueue.tasks_in_progress_or_pending())

        # Continue command execution
        unfreeze_flag.set()
        # wait until ready
        while report['reports'][0]['status'] == 'IN_PROGRESS':
            time.sleep(0.1)
            report = actionQueue.result()
        # check report
        configname = os.path.join(tempdir, 'config.json')
        expected = {
            'status': 'COMPLETED',
            'stderr': 'stderr',
            'stdout': 'out',
            'clusterName': u'cc',
            'structuredOut': '""',
            'roleCommand': u'INSTALL',
            'serviceName': u'HDFS',
            'role': u'DATANODE',
            'actionId': '1-1',
            'taskId': 3,
            'configurationTags': {
                'global': {
                    'tag': 'v1'
                }
            },
            'exitCode': 0
        }
        self.assertEqual(len(report['reports']), 1)
        self.assertEqual(report['reports'][0], expected)
        self.assertTrue(os.path.isfile(configname))
        # Check that we had 2 status update calls ( IN_PROGRESS and COMPLETE)
        self.assertEqual(status_update_callback_mock.call_count, 2)
        os.remove(configname)

        # now should not have reports (read complete/failed reports are deleted)
        report = actionQueue.result()
        self.assertEqual(len(report['reports']), 0)

        ## Test failed execution
        python_execution_result_dict['status'] = 'FAILED'
        python_execution_result_dict['exitcode'] = 13
        # We call method in a separate thread
        execution_thread = Thread(target=patched_aq_execute_command,
                                  args=(self.datanode_install_command, ))
        execution_thread.start()
        unfreeze_flag.set()
        #  check in progress report
        # wait until ready
        report = actionQueue.result()
        while len(report['reports']) == 0 or \
                        report['reports'][0]['status'] == 'IN_PROGRESS':
            time.sleep(0.1)
            report = actionQueue.result()
            # check report
        expected = {
            'status': 'FAILED',
            'stderr': 'stderr',
            'stdout': 'out',
            'clusterName': u'cc',
            'structuredOut': '""',
            'roleCommand': u'INSTALL',
            'serviceName': u'HDFS',
            'role': u'DATANODE',
            'actionId': '1-1',
            'taskId': 3,
            'exitCode': 13
        }
        self.assertEqual(len(report['reports']), 1)
        self.assertEqual(report['reports'][0], expected)

        # now should not have reports (read complete/failed reports are deleted)
        report = actionQueue.result()
        self.assertEqual(len(report['reports']), 0)

        ### Test upgrade command ###
        python_execution_result_dict['status'] = 'COMPLETE'
        python_execution_result_dict['exitcode'] = 0
        execution_thread = Thread(target=patched_aq_execute_command,
                                  args=(self.datanode_upgrade_command, ))
        execution_thread.start()
        unfreeze_flag.set()
        # wait until ready
        report = actionQueue.result()
        while len(report['reports']) == 0 or \
                        report['reports'][0]['status'] == 'IN_PROGRESS':
            time.sleep(0.1)
            report = actionQueue.result()
        # check report
        expected = {
            'status': 'COMPLETED',
            'stderr': 'stderr',
            'stdout': 'out',
            'clusterName': 'clusterName',
            'structuredOut': '""',
            'roleCommand': 'UPGRADE',
            'serviceName': 'serviceName',
            'role': 'role',
            'actionId': 17,
            'taskId': 'taskId',
            'exitCode': 0
        }
        self.assertEqual(len(report['reports']), 1)
        self.assertEqual(report['reports'][0], expected)

        # now should not have reports (read complete/failed reports are deleted)
        report = actionQueue.result()
        self.assertEqual(len(report['reports']), 0)

    @patch.object(OSCheck,
                  "os_distribution",
                  new=MagicMock(return_value=os_distro_value))
    @patch.object(CustomServiceOrchestrator, "runCommand")
    @patch("CommandStatusDict.CommandStatusDict")
    @patch.object(ActionQueue, "status_update_callback")
    def test_store_configuration_tags(self, status_update_callback_mock,
                                      command_status_dict_mock,
                                      cso_runCommand_mock):
        custom_service_orchestrator_execution_result_dict = {
            'stdout': 'out',
            'stderr': 'stderr',
            'structuredOut': '',
            'exitcode': 0
        }
        cso_runCommand_mock.return_value = custom_service_orchestrator_execution_result_dict

        config = AmbariConfig()
        tempdir = tempfile.gettempdir()
        config.set('agent', 'prefix', tempdir)
        config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
        config.set('agent', 'tolerate_download_failures', "true")
        dummy_controller = MagicMock()
        actionQueue = ActionQueue(config, dummy_controller)
        actionQueue.execute_command(self.datanode_restart_command)
        report = actionQueue.result()
        expected = {
            'status': 'COMPLETED',
            'configurationTags': {
                'global': {
                    'tag': 'v123'
                }
            },
            'stderr': 'stderr',
            'stdout': 'out',
            'clusterName': u'cc',
            'structuredOut': '""',
            'roleCommand': u'CUSTOM_COMMAND',
            'serviceName': u'HDFS',
            'role': u'DATANODE',
            'actionId': '1-1',
            'taskId': 9,
            'customCommand': 'RESTART',
            'exitCode': 0
        }
        # Agent caches configurationTags if custom_command RESTART completed
        self.assertEqual(len(report['reports']), 1)
        self.assertEqual(expected, report['reports'][0])

    @patch.object(OSCheck,
                  "os_distribution",
                  new=MagicMock(return_value=os_distro_value))
    @patch.object(ActualConfigHandler, "write_client_components")
    @patch.object(CustomServiceOrchestrator, "runCommand")
    @patch("CommandStatusDict.CommandStatusDict")
    @patch.object(ActionQueue, "status_update_callback")
    def test_store_configuration_tags_no_clients(self,
                                                 status_update_callback_mock,
                                                 command_status_dict_mock,
                                                 cso_runCommand_mock,
                                                 write_client_components_mock):
        custom_service_orchestrator_execution_result_dict = {
            'stdout': 'out',
            'stderr': 'stderr',
            'structuredOut': '',
            'exitcode': 0
        }
        cso_runCommand_mock.return_value = custom_service_orchestrator_execution_result_dict

        config = AmbariConfig()
        tempdir = tempfile.gettempdir()
        config.set('agent', 'prefix', tempdir)
        config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
        config.set('agent', 'tolerate_download_failures', "true")
        dummy_controller = MagicMock()
        actionQueue = ActionQueue(config, dummy_controller)
        actionQueue.execute_command(
            self.datanode_restart_command_no_clients_update)
        report = actionQueue.result()
        expected = {
            'status': 'COMPLETED',
            'configurationTags': {
                'global': {
                    'tag': 'v123'
                }
            },
            'stderr': 'stderr',
            'stdout': 'out',
            'clusterName': u'cc',
            'structuredOut': '""',
            'roleCommand': u'CUSTOM_COMMAND',
            'serviceName': u'HDFS',
            'role': u'DATANODE',
            'actionId': '1-1',
            'taskId': 9,
            'customCommand': 'RESTART',
            'exitCode': 0
        }
        # Agent caches configurationTags if custom_command RESTART completed
        self.assertEqual(len(report['reports']), 1)
        self.assertEqual(expected, report['reports'][0])
        self.assertFalse(write_client_components_mock.called)

    @patch.object(OSCheck,
                  "os_distribution",
                  new=MagicMock(return_value=os_distro_value))
    @patch.object(ActionQueue, "status_update_callback")
    @patch.object(StackVersionsFileHandler, "read_stack_version")
    @patch.object(CustomServiceOrchestrator, "requestComponentStatus")
    @patch.object(CustomServiceOrchestrator, "requestComponentSecurityState")
    @patch.object(ActionQueue, "execute_command")
    @patch.object(LiveStatus, "build")
    @patch.object(CustomServiceOrchestrator, "__init__")
    def test_execute_status_command(self, CustomServiceOrchestrator_mock,
                                    build_mock, execute_command_mock,
                                    requestComponentSecurityState_mock,
                                    requestComponentStatus_mock,
                                    read_stack_version_mock,
                                    status_update_callback):
        CustomServiceOrchestrator_mock.return_value = None
        dummy_controller = MagicMock()
        actionQueue = ActionQueue(AmbariConfig(), dummy_controller)

        build_mock.return_value = {'dummy report': ''}

        dummy_controller.recovery_manager = RecoveryManager(tempfile.mktemp())

        requestComponentStatus_mock.reset_mock()
        requestComponentStatus_mock.return_value = {'exitcode': 0}

        requestComponentSecurityState_mock.reset_mock()
        requestComponentSecurityState_mock.return_value = 'UNKNOWN'

        actionQueue.execute_status_command(self.status_command)
        report = actionQueue.result()
        expected = {'dummy report': '', 'securityState': 'UNKNOWN'}

        self.assertEqual(len(report['componentStatus']), 1)
        self.assertEqual(report['componentStatus'][0], expected)
        self.assertTrue(requestComponentStatus_mock.called)

    @patch.object(OSCheck,
                  "os_distribution",
                  new=MagicMock(return_value=os_distro_value))
    @patch.object(ActionQueue, "status_update_callback")
    @patch.object(StackVersionsFileHandler, "read_stack_version")
    @patch.object(CustomServiceOrchestrator, "requestComponentStatus")
    @patch.object(CustomServiceOrchestrator, "requestComponentSecurityState")
    @patch.object(ActionQueue, "execute_command")
    @patch.object(LiveStatus, "build")
    @patch.object(CustomServiceOrchestrator, "__init__")
    def test_execute_status_command_with_alerts(
            self, CustomServiceOrchestrator_mock,
            requestComponentSecurityState_mock, build_mock,
            execute_command_mock, requestComponentStatus_mock,
            read_stack_version_mock, status_update_callback):
        CustomServiceOrchestrator_mock.return_value = None
        dummy_controller = MagicMock()
        actionQueue = ActionQueue(AmbariConfig(), dummy_controller)

        requestComponentStatus_mock.reset_mock()
        requestComponentStatus_mock.return_value = {
            'exitcode': 0,
            'stdout': 'out',
            'stderr': 'err',
            'structuredOut': {
                'alerts': [{
                    'name': 'flume_alert'
                }]
            }
        }
        build_mock.return_value = {'somestatusresult': 'aresult'}

        actionQueue.execute_status_command(self.status_command_for_alerts)

        report = actionQueue.result()

        self.assertTrue(requestComponentStatus_mock.called)
        self.assertEqual(len(report['componentStatus']), 1)
        self.assertTrue(report['componentStatus'][0].has_key('alerts'))

    @patch.object(AmbariConfig, "get_parallel_exec_option")
    @patch.object(ActionQueue, "process_command")
    @patch.object(Queue, "get")
    @patch.object(CustomServiceOrchestrator, "__init__")
    def test_reset_queue(self, CustomServiceOrchestrator_mock, get_mock,
                         process_command_mock, gpeo_mock):
        CustomServiceOrchestrator_mock.return_value = None
        dummy_controller = MagicMock()
        dummy_controller.recovery_manager = RecoveryManager(tempfile.mktemp())
        config = MagicMock()
        gpeo_mock.return_value = 0
        config.get_parallel_exec_option = gpeo_mock
        actionQueue = ActionQueue(config, dummy_controller)
        actionQueue.start()
        actionQueue.put(
            [self.datanode_install_command, self.hbase_install_command])
        self.assertEqual(2, actionQueue.commandQueue.qsize())
        self.assertTrue(actionQueue.tasks_in_progress_or_pending())
        actionQueue.reset()
        self.assertTrue(actionQueue.commandQueue.empty())
        self.assertFalse(actionQueue.tasks_in_progress_or_pending())
        time.sleep(0.1)
        actionQueue.stop()
        actionQueue.join()
        self.assertEqual(actionQueue.stopped(), True,
                         'Action queue is not stopped.')

    @patch.object(AmbariConfig, "get_parallel_exec_option")
    @patch.object(ActionQueue, "process_command")
    @patch.object(Queue, "get")
    @patch.object(CustomServiceOrchestrator, "__init__")
    def test_cancel(self, CustomServiceOrchestrator_mock, get_mock,
                    process_command_mock, gpeo_mock):
        CustomServiceOrchestrator_mock.return_value = None
        dummy_controller = MagicMock()
        config = MagicMock()
        gpeo_mock.return_value = 0
        config.get_parallel_exec_option = gpeo_mock
        actionQueue = ActionQueue(config, dummy_controller)
        actionQueue.start()
        actionQueue.put(
            [self.datanode_install_command, self.hbase_install_command])
        self.assertEqual(2, actionQueue.commandQueue.qsize())
        actionQueue.reset()
        self.assertTrue(actionQueue.commandQueue.empty())
        time.sleep(0.1)
        actionQueue.stop()
        actionQueue.join()
        self.assertEqual(actionQueue.stopped(), True,
                         'Action queue is not stopped.')

    @patch.object(AmbariConfig, "get_parallel_exec_option")
    @patch.object(ActionQueue, "process_command")
    @patch.object(CustomServiceOrchestrator, "__init__")
    def test_parallel_exec(self, CustomServiceOrchestrator_mock,
                           process_command_mock, gpeo_mock):
        CustomServiceOrchestrator_mock.return_value = None
        dummy_controller = MagicMock()
        config = MagicMock()
        gpeo_mock.return_value = 1
        config.get_parallel_exec_option = gpeo_mock
        actionQueue = ActionQueue(config, dummy_controller)
        actionQueue.put(
            [self.datanode_install_command, self.hbase_install_command])
        self.assertEqual(2, actionQueue.commandQueue.qsize())
        actionQueue.start()
        time.sleep(1)
        actionQueue.stop()
        actionQueue.join()
        self.assertEqual(actionQueue.stopped(), True,
                         'Action queue is not stopped.')
        self.assertEqual(2, process_command_mock.call_count)
        process_command_mock.assert_any_calls([
            call(self.datanode_install_command),
            call(self.hbase_install_command)
        ])

    @not_for_platform(PLATFORM_LINUX)
    @patch("time.sleep")
    @patch.object(OSCheck,
                  "os_distribution",
                  new=MagicMock(return_value=os_distro_value))
    @patch.object(StackVersionsFileHandler, "read_stack_version")
    @patch.object(CustomServiceOrchestrator, "__init__")
    def test_execute_retryable_command(self, CustomServiceOrchestrator_mock,
                                       read_stack_version_mock, sleep_mock):
        CustomServiceOrchestrator_mock.return_value = None
        dummy_controller = MagicMock()
        actionQueue = ActionQueue(AmbariConfig(), dummy_controller)
        python_execution_result_dict = {
            'exitcode': 1,
            'stdout': 'out',
            'stderr': 'stderr',
            'structuredOut': '',
            'status': 'FAILED'
        }

        def side_effect(command,
                        tmpoutfile,
                        tmperrfile,
                        override_output_files=True,
                        retry=False):
            return python_execution_result_dict

        command = copy.deepcopy(self.retryable_command)
        with patch.object(CustomServiceOrchestrator,
                          "runCommand") as runCommand_mock:
            runCommand_mock.side_effect = side_effect
            actionQueue.execute_command(command)

        #assert that python executor start
        self.assertTrue(runCommand_mock.called)
        self.assertEqual(3, runCommand_mock.call_count)
        self.assertEqual(2, sleep_mock.call_count)
        sleep_mock.assert_has_calls([call(2), call(3)], False)
        runCommand_mock.assert_has_calls([
            call(command,
                 os.sep + 'tmp' + os.sep + 'ambari-agent' + os.sep +
                 'output-19.txt',
                 os.sep + 'tmp' + os.sep + 'ambari-agent' + os.sep +
                 'errors-19.txt',
                 override_output_files=True,
                 retry=False),
            call(command,
                 os.sep + 'tmp' + os.sep + 'ambari-agent' + os.sep +
                 'output-19.txt',
                 os.sep + 'tmp' + os.sep + 'ambari-agent' + os.sep +
                 'errors-19.txt',
                 override_output_files=False,
                 retry=True),
            call(command,
                 os.sep + 'tmp' + os.sep + 'ambari-agent' + os.sep +
                 'output-19.txt',
                 os.sep + 'tmp' + os.sep + 'ambari-agent' + os.sep +
                 'errors-19.txt',
                 override_output_files=False,
                 retry=True)
        ])

    @patch("time.time")
    @patch("time.sleep")
    @patch.object(OSCheck,
                  "os_distribution",
                  new=MagicMock(return_value=os_distro_value))
    @patch.object(StackVersionsFileHandler, "read_stack_version")
    @patch.object(CustomServiceOrchestrator, "__init__")
    def test_execute_retryable_command_with_time_lapse(
            self, CustomServiceOrchestrator_mock, read_stack_version_mock,
            sleep_mock, time_mock):
        CustomServiceOrchestrator_mock.return_value = None
        dummy_controller = MagicMock()
        actionQueue = ActionQueue(AmbariConfig(), dummy_controller)
        python_execution_result_dict = {
            'exitcode': 1,
            'stdout': 'out',
            'stderr': 'stderr',
            'structuredOut': '',
            'status': 'FAILED'
        }

        times_arr = [8, 10, 14, 18, 22]
        if self.logger.isEnabledFor(logging.INFO):
            times_arr.insert(0, 4)
        time_mock.side_effect = times_arr

        def side_effect(command,
                        tmpoutfile,
                        tmperrfile,
                        override_output_files=True,
                        retry=False):
            return python_execution_result_dict

        command = copy.deepcopy(self.retryable_command)
        with patch.object(CustomServiceOrchestrator,
                          "runCommand") as runCommand_mock:
            runCommand_mock.side_effect = side_effect
            actionQueue.execute_command(command)

        #assert that python executor start
        self.assertTrue(runCommand_mock.called)
        self.assertEqual(2, runCommand_mock.call_count)
        self.assertEqual(1, sleep_mock.call_count)
        sleep_mock.assert_has_calls([call(2)], False)
        runCommand_mock.assert_has_calls([
            call(command,
                 os.sep + 'tmp' + os.sep + 'ambari-agent' + os.sep +
                 'output-19.txt',
                 os.sep + 'tmp' + os.sep + 'ambari-agent' + os.sep +
                 'errors-19.txt',
                 override_output_files=True,
                 retry=False),
            call(command,
                 os.sep + 'tmp' + os.sep + 'ambari-agent' + os.sep +
                 'output-19.txt',
                 os.sep + 'tmp' + os.sep + 'ambari-agent' + os.sep +
                 'errors-19.txt',
                 override_output_files=False,
                 retry=True)
        ])

    #retryable_command
    @not_for_platform(PLATFORM_LINUX)
    @patch("time.sleep")
    @patch.object(OSCheck,
                  "os_distribution",
                  new=MagicMock(return_value=os_distro_value))
    @patch.object(StackVersionsFileHandler, "read_stack_version")
    @patch.object(CustomServiceOrchestrator, "__init__")
    def test_execute_retryable_command_fail_and_succeed(
            self, CustomServiceOrchestrator_mock, read_stack_version_mock,
            sleep_mock):
        CustomServiceOrchestrator_mock.return_value = None
        dummy_controller = MagicMock()
        actionQueue = ActionQueue(AmbariConfig(), dummy_controller)
        execution_result_fail_dict = {
            'exitcode': 1,
            'stdout': 'out',
            'stderr': 'stderr',
            'structuredOut': '',
            'status': 'FAILED'
        }
        execution_result_succ_dict = {
            'exitcode': 0,
            'stdout': 'out',
            'stderr': 'stderr',
            'structuredOut': '',
            'status': 'COMPLETED'
        }

        command = copy.deepcopy(self.retryable_command)
        with patch.object(CustomServiceOrchestrator,
                          "runCommand") as runCommand_mock:
            runCommand_mock.side_effect = [
                execution_result_fail_dict, execution_result_succ_dict
            ]
            actionQueue.execute_command(command)

        #assert that python executor start
        self.assertTrue(runCommand_mock.called)
        self.assertEqual(2, runCommand_mock.call_count)
        self.assertEqual(1, sleep_mock.call_count)
        sleep_mock.assert_any_call(2)

    @not_for_platform(PLATFORM_LINUX)
    @patch("time.sleep")
    @patch.object(OSCheck,
                  "os_distribution",
                  new=MagicMock(return_value=os_distro_value))
    @patch.object(StackVersionsFileHandler, "read_stack_version")
    @patch.object(CustomServiceOrchestrator, "__init__")
    def test_execute_retryable_command_succeed(self,
                                               CustomServiceOrchestrator_mock,
                                               read_stack_version_mock,
                                               sleep_mock):
        CustomServiceOrchestrator_mock.return_value = None
        dummy_controller = MagicMock()
        actionQueue = ActionQueue(AmbariConfig(), dummy_controller)
        execution_result_succ_dict = {
            'exitcode': 0,
            'stdout': 'out',
            'stderr': 'stderr',
            'structuredOut': '',
            'status': 'COMPLETED'
        }

        command = copy.deepcopy(self.retryable_command)
        with patch.object(CustomServiceOrchestrator,
                          "runCommand") as runCommand_mock:
            runCommand_mock.side_effect = [execution_result_succ_dict]
            actionQueue.execute_command(command)

        #assert that python executor start
        self.assertTrue(runCommand_mock.called)
        self.assertFalse(sleep_mock.called)
        self.assertEqual(1, runCommand_mock.call_count)

    @patch.object(OSCheck,
                  "os_distribution",
                  new=MagicMock(return_value=os_distro_value))
    @patch.object(StackVersionsFileHandler, "read_stack_version")
    @patch.object(CustomServiceOrchestrator, "runCommand")
    @patch.object(CustomServiceOrchestrator, "__init__")
    def test_execute_background_command(self, CustomServiceOrchestrator_mock,
                                        runCommand_mock,
                                        read_stack_version_mock):
        CustomServiceOrchestrator_mock.return_value = None
        CustomServiceOrchestrator.runCommand.return_value = {
            'exitcode': 0,
            'stdout': 'out-11',
            'stderr': 'err-13'
        }

        dummy_controller = MagicMock()
        actionQueue = ActionQueue(AmbariConfig(), dummy_controller)

        execute_command = copy.deepcopy(self.background_command)
        actionQueue.put([execute_command])
        actionQueue.processBackgroundQueueSafeEmpty()
        actionQueue.processStatusCommandQueueSafeEmpty()

        #assert that python execturor start
        self.assertTrue(runCommand_mock.called)
        runningCommand = actionQueue.commandStatuses.current_state.get(
            execute_command['taskId'])
        self.assertTrue(runningCommand is not None)
        self.assertEqual(runningCommand[1]['status'],
                         ActionQueue.IN_PROGRESS_STATUS)

        report = actionQueue.result()
        self.assertEqual(len(report['reports']), 1)

    @patch.object(CustomServiceOrchestrator, "get_py_executor")
    @patch.object(CustomServiceOrchestrator, "resolve_script_path")
    @patch.object(StackVersionsFileHandler, "read_stack_version")
    def test_execute_python_executor(self, read_stack_version_mock,
                                     resolve_script_path_mock,
                                     get_py_executor_mock):

        dummy_controller = MagicMock()
        cfg = AmbariConfig()
        cfg.set('agent', 'tolerate_download_failures', 'true')
        cfg.set('agent', 'prefix', '.')
        cfg.set('agent', 'cache_dir', 'background_tasks')

        actionQueue = ActionQueue(cfg, dummy_controller)
        pyex = PythonExecutor(actionQueue.customServiceOrchestrator.tmp_dir,
                              actionQueue.customServiceOrchestrator.config)
        patch_output_file(pyex)
        get_py_executor_mock.return_value = pyex
        actionQueue.customServiceOrchestrator.dump_command_to_json = MagicMock(
        )

        result = {}
        lock = threading.RLock()
        complete_done = threading.Condition(lock)

        def command_complete_w(process_condensed_result, handle):
            with lock:
                result['command_complete'] = {
                    'condensed_result':
                    copy.copy(process_condensed_result),
                    'handle':
                    copy.copy(handle),
                    'command_status':
                    actionQueue.commandStatuses.get_command_status(
                        handle.command['taskId'])
                }
                complete_done.notifyAll()

        actionQueue.on_background_command_complete_callback = wraped(
            actionQueue.on_background_command_complete_callback, None,
            command_complete_w)
        actionQueue.put([self.background_command])
        actionQueue.processBackgroundQueueSafeEmpty()
        actionQueue.processStatusCommandQueueSafeEmpty()

        with lock:
            complete_done.wait(0.1)

            finished_status = result['command_complete']['command_status']
            self.assertEqual(finished_status['status'],
                             ActionQueue.COMPLETED_STATUS)
            self.assertEqual(finished_status['stdout'], 'process_out')
            self.assertEqual(finished_status['stderr'], 'process_err')
            self.assertEqual(finished_status['exitCode'], 0)

        runningCommand = actionQueue.commandStatuses.current_state.get(
            self.background_command['taskId'])
        self.assertTrue(runningCommand is not None)

        report = actionQueue.result()
        self.assertEqual(len(report['reports']), 1)
        self.assertEqual(report['reports'][0]['stdout'], 'process_out')


#    self.assertEqual(report['reports'][0]['structuredOut'],'{"a": "b."}')

    cancel_background_command = {
        "commandType": "CANCEL_COMMAND",
        "role": "AMBARI_SERVER_ACTION",
        "roleCommand": "ABORT",
        "commandId": "2--1",
        "taskId": 20,
        "clusterName": "c1",
        "serviceName": "",
        "hostname": "c6401",
        "roleParams": {
            "cancelTaskIdTargets": "13,14"
        },
    }
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
import datetime, sys, socket
import resource_management.libraries.functions


@patch.object(resource_management.libraries.functions,
              "get_unique_id_and_date",
              new=MagicMock(return_value=''))
@patch("socket.socket", new=MagicMock())
class TestServiceCheck(RMFTestCase):
    @patch("sys.exit")
    def test_service_check_default(self, sys_exit_mock):

        self.executeScript(
            "2.0.6/services/HIVE/package/scripts/service_check.py",
            classname="HiveServiceCheck",
            command="service_check",
            config_file="default.json")
        self.assertResourceCalled(
            'File',
            '/tmp/hcatSmoke.sh',
            content=StaticFile('hcatSmoke.sh'),
            mode=0755,
示例#6
0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
import os

origin_exists = os.path.exists


@patch.object(os.path,
              "exists",
              new=MagicMock(side_effect=lambda *args: origin_exists(args[0])
                            if args[0][-2:] == "j2" else True))
class TestResourceManager(RMFTestCase):
    COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
    STACK_VERSION = "2.0.6"

    def test_configure_default(self):
        self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR +
                           "/scripts/resourcemanager.py",
                           classname="Resourcemanager",
                           command="configure",
                           config_file="default.json",
                           hdp_stack_version=self.STACK_VERSION,
                           target=RMFTestCase.TARGET_COMMON_SERVICES)
        self.assert_configure_default()
        self.assertNoMoreResources()
示例#7
0
 def open_subprocess_files_win(fout, ferr, f):
     return MagicMock(), MagicMock()
示例#8
0
class TestDatanode(RMFTestCase):
  COMMON_SERVICES_PACKAGE_DIR = "HDFS/2.1.0.2.0/package"
  STACK_VERSION = "2.0.6"

  CONFIG_OVERRIDES = {"serviceName":"HDFS", "role":"DATANODE"}

  def test_configure_default(self):
    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                       classname = "DataNode",
                       command = "configure",
                       config_file = "default.json",
                       stack_version = self.STACK_VERSION,
                       target = RMFTestCase.TARGET_COMMON_SERVICES
    )
    self.assert_configure_default()
    self.assertNoMoreResources()

  def test_start_default(self):
    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                       classname = "DataNode",
                       command = "start",
                       config_file = "default.json",
                       stack_version = self.STACK_VERSION,
                       target = RMFTestCase.TARGET_COMMON_SERVICES
    )
    self.assert_configure_default()
    self.assertResourceCalled('Directory', '/var/run/hadoop',
                              owner = 'hdfs',
                              group = 'hadoop',
                              mode = 0755
                              )
    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
                              owner = 'hdfs',
                              group = 'hadoop',
                              create_parents = True,
                              )
    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
                              owner = 'hdfs',
                              group = 'hadoop',
                              create_parents = True,
                              )
    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
        action = ['delete'],
        not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid",
    )
    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode'",
        environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
        not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid",
    )
    self.assertNoMoreResources()

  @patch('time.sleep')
  @patch("os.path.exists", new = MagicMock(return_value=False))
  @patch("resource_management.core.shell.checked_call")
  def test_stop_default(self, checked_call_mock, time_mock):
    def side_effect(arg):
      if '-D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo' in arg :
        raise Fail()
      return
    checked_call_mock.side_effect = side_effect
    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                       classname = "DataNode",
                       command = "stop",
                       config_file = "default.json",
                       stack_version = self.STACK_VERSION,
                       checked_call_mocks = side_effect,
                       target = RMFTestCase.TARGET_COMMON_SERVICES
    )
    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode'",
        environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
        only_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid")

    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid', action = ['delete'])

    self.assertNoMoreResources()

  def test_configure_secured(self):
    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                       classname = "DataNode",
                       command = "configure",
                       config_file = "secured.json",
                       stack_version = self.STACK_VERSION,
                       target = RMFTestCase.TARGET_COMMON_SERVICES
    )
    self.assert_configure_secured()
    self.assertNoMoreResources()

  def test_start_secured(self):
    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                       classname = "DataNode",
                       command = "start",
                       config_file = "secured.json",
                       stack_version = self.STACK_VERSION,
                       target = RMFTestCase.TARGET_COMMON_SERVICES
    )
    self.assert_configure_secured()
    self.assertResourceCalled('Directory', '/var/run/hadoop',
                              owner = 'hdfs',
                              group = 'hadoop',
                              mode = 0755
                              )
    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
                              owner = 'hdfs',
                              group = 'hadoop',
                              create_parents = True,
                              )
    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
                              owner = 'hdfs',
                              group = 'hadoop',
                              create_parents = True,
                              )
    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
        action = ['delete'],
        not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid",
    )
    self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start datanode',
        environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
        not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid",
    )
    self.assertNoMoreResources()

  def test_start_secured_HDP22_root(self):
    config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/secured.json"
    with open(config_file, "r") as f:
      secured_json = json.load(f)

    secured_json['hostLevelParams']['stack_version']= '2.2'

    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                       classname = "DataNode",
                       command = "start",
                       config_dict = secured_json,
                       stack_version = self.STACK_VERSION,
                       target = RMFTestCase.TARGET_COMMON_SERVICES
    )
    self.assert_configure_secured("2.2", snappy_enabled=False)
    self.assertResourceCalled('Directory', '/var/run/hadoop',
                              owner = 'hdfs',
                              group = 'hadoop',
                              mode = 0755
                              )
    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
                              owner = 'hdfs',
                              group = 'hadoop',
                              create_parents = True,
                              )
    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
                              owner = 'hdfs',
                              group = 'hadoop',
                              create_parents = True,
                              )
    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
        action = ['delete'],
        not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid",
    )
    self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf start datanode',
        environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
        not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid",
    )
    self.assertNoMoreResources()

  def test_start_secured_HDP22_non_root_https_only(self):
    config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/secured.json"
    with open(config_file, "r") as f:
      secured_json = json.load(f)

    secured_json['hostLevelParams']['stack_version']= '2.2'
    secured_json['configurations']['hdfs-site']['dfs.http.policy']= 'HTTPS_ONLY'
    secured_json['configurations']['hdfs-site']['dfs.datanode.address']= '0.0.0.0:10000'
    secured_json['configurations']['hdfs-site']['dfs.datanode.https.address']= '0.0.0.0:50000'

    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                       classname = "DataNode",
                       command = "start",
                       config_dict = secured_json,
                       stack_version = self.STACK_VERSION,
                       target = RMFTestCase.TARGET_COMMON_SERVICES
    )
    self.assert_configure_secured("2.2", snappy_enabled=False)
    self.assertResourceCalled('Directory', '/var/run/hadoop',
                              owner = 'hdfs',
                              group = 'hadoop',
                              mode = 0755
                              )
    self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
                              owner = 'hdfs',
                              group = 'hadoop',
                              create_parents = True,
                              )
    self.assertResourceCalled('Directory', '/var/log/hadoop/hdfs',
                              owner = 'hdfs',
                              group = 'hadoop',
                              create_parents = True,
                              )
    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid',
        action = ['delete'],
        not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid",
    )
    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf start datanode'",
        environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
        not_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid",
    )
    self.assertNoMoreResources()

  @patch('time.sleep')
  @patch("os.path.exists", new = MagicMock(return_value=False))
  @patch("resource_management.core.shell.checked_call")
  def test_stop_secured(self, checked_call_mock, time_mock):
    def side_effect(arg):
      if '-D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo' in arg :
        raise Fail()
      return
    checked_call_mock.side_effect = side_effect
    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                       classname = "DataNode",
                       command = "stop",
                       config_file = "secured.json",
                       stack_version = self.STACK_VERSION,
                       checked_call_mocks = side_effect,
                       target = RMFTestCase.TARGET_COMMON_SERVICES
    )
    self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
        environment = {'HADOOP_LIBEXEC_DIR': '/usr/lib/hadoop/libexec'},
        only_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid")

    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid', action = ['delete'])
    self.assertNoMoreResources()

  @patch('time.sleep')
  @patch("os.path.exists", new = MagicMock(return_value=False))
  @patch("resource_management.core.shell.checked_call")
  def test_stop_secured_HDP22_root(self, checked_call_mock, time_mock):
    def side_effect(arg):
      if '-D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo' in arg :
        raise Fail()
      return
    checked_call_mock.side_effect = side_effect
    config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/secured.json"
    with open(config_file, "r") as f:
      secured_json = json.load(f)

    secured_json['hostLevelParams']['stack_version']= '2.2'

    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                       classname = "DataNode",
                       command = "stop",
                       config_dict = secured_json,
                       stack_version = self.STACK_VERSION,
                       checked_call_mocks = side_effect,
                       target = RMFTestCase.TARGET_COMMON_SERVICES
    )
    self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf stop datanode',
        environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
        only_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid")

    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid', action = ['delete'])
    self.assertNoMoreResources()

  @patch('time.sleep')
  @patch("os.path.exists", new = MagicMock(return_value=False))
  @patch("resource_management.core.shell.checked_call")
  def test_stop_secured_HDP22_non_root_https_only(self, checked_call_mock, time_mock):
    def side_effect(arg):
      if '-D ipc.client.connect.max.retries=5 -D ipc.client.connect.retry.interval=1000 -getDatanodeInfo' in arg :
        raise Fail()
      return
    checked_call_mock.side_effect = side_effect
    config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/secured.json"
    with open(config_file, "r") as f:
      secured_json = json.load(f)

    secured_json['hostLevelParams']['stack_version']= '2.2'
    secured_json['configurations']['hdfs-site']['dfs.http.policy']= 'HTTPS_ONLY'
    secured_json['configurations']['hdfs-site']['dfs.datanode.address']= '0.0.0.0:10000'
    secured_json['configurations']['hdfs-site']['dfs.datanode.https.address']= '0.0.0.0:50000'

    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                       classname = "DataNode",
                       command = "stop",
                       config_dict = secured_json,
                       stack_version = self.STACK_VERSION,
                       checked_call_mocks = side_effect,
                       target = RMFTestCase.TARGET_COMMON_SERVICES
    )
    self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf stop datanode'",
        environment = {'HADOOP_LIBEXEC_DIR': '/usr/hdp/current/hadoop-client/libexec'},
        only_if = "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid && ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -F /var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid")

    self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-datanode.pid', action = ['delete'])
    self.assertNoMoreResources()

  def assert_configure_default(self):
    self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-i386-32',
        create_parents = True,
    )
    self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-amd64-64',
        create_parents = True,
    )
    self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-i386-32/libsnappy.so',
        to = '/usr/lib/hadoop/lib/libsnappy.so',
    )
    self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-amd64-64/libsnappy.so',
        to = '/usr/lib/hadoop/lib64/libsnappy.so',
    )
    self.assertResourceCalled('Directory', '/etc/security/limits.d',
                              owner = 'root',
                              group = 'root',
                              create_parents = True,
                              )
    self.assertResourceCalled('File', '/etc/security/limits.d/hdfs.conf',
                              content = Template('hdfs.conf.j2'),
                              owner = 'root',
                              group = 'root',
                              mode = 0644,
                              )
    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
                              owner = 'hdfs',
                              group = 'hadoop',
                              conf_dir = '/etc/hadoop/conf',
                              configurations = self.getConfig()['configurations']['hdfs-site'],
                              configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
                              )
    self.assertResourceCalled('XmlConfig', 'core-site.xml',
                              owner = 'hdfs',
                              group = 'hadoop',
                              conf_dir = '/etc/hadoop/conf',
                              configurations = self.getConfig()['configurations']['core-site'],
                              configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
                              mode = 0644
                              )
    self.assertResourceCalled('File', '/etc/hadoop/conf/slaves',
                              content = Template('slaves.j2'),
                              owner = 'hdfs',
                              )

    self.assertResourceCalled('Directory', '/var/lib/hadoop-hdfs',
                              owner = 'hdfs',
                              group = 'hadoop',
                              mode = 0751,
                              create_parents = True,
                              )
    self.assertResourceCalled('Directory', '/var/lib/ambari-agent/data/datanode',
                              mode = 0755,
                              create_parents = True
    )
    self.assertResourceCalled('Directory', '/hadoop/hdfs/data',
                              owner = 'hdfs',
                              ignore_failures = True,
                              group = 'hadoop',
                              mode = 0750,
                              create_parents = True,
                              cd_access='a'
                              )
    content = resource_management.libraries.functions.mounted_dirs_helper.DIR_TO_MOUNT_HEADER
    self.assertResourceCalled('File', '/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist',
                              owner = 'hdfs',
                              group = 'hadoop',
                              mode = 0644,
                              content = content
                              )

  def assert_configure_secured(self, stackVersion=STACK_VERSION, snappy_enabled=True):
    conf_dir = '/etc/hadoop/conf'
    if stackVersion != self.STACK_VERSION:
      conf_dir = '/usr/hdp/current/hadoop-client/conf'
    
    if snappy_enabled:
      self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-i386-32',
          create_parents = True,
      )
      self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-amd64-64',
          create_parents = True,
      )
      self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-i386-32/libsnappy.so',
          to = '/usr/lib/hadoop/lib/libsnappy.so',
      )
      self.assertResourceCalled('Link', '/usr/lib/hadoop/lib/native/Linux-amd64-64/libsnappy.so',
          to = '/usr/lib/hadoop/lib64/libsnappy.so',
      )
    self.assertResourceCalled('Directory', '/etc/security/limits.d',
                              owner = 'root',
                              group = 'root',
                              create_parents = True,
                              )
    self.assertResourceCalled('File', '/etc/security/limits.d/hdfs.conf',
                              content = Template('hdfs.conf.j2'),
                              owner = 'root',
                              group = 'root',
                              mode = 0644,
                              )
    self.assertResourceCalled('File', conf_dir + '/hdfs_dn_jaas.conf',
                              content = Template('hdfs_dn_jaas.conf.j2'),
                              owner = 'hdfs',
                              group = 'hadoop',
                              )
    self.assertResourceCalled('File', conf_dir + '/hdfs_nn_jaas.conf',
                              content = Template('hdfs_nn_jaas.conf.j2'),
                              owner = 'hdfs',
                              group = 'hadoop',
                              )
    self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
                              owner = 'hdfs',
                              group = 'hadoop',
                              conf_dir = conf_dir,
                              configurations = self.getConfig()['configurations']['hdfs-site'],
                              configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
                              )

    self.assertResourceCalled('XmlConfig', 'core-site.xml',
                              owner = 'hdfs',
                              group = 'hadoop',
                              conf_dir = conf_dir,
                              configurations = self.getConfig()['configurations']['core-site'],
                              configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
                              mode = 0644
    )
    self.assertResourceCalled('File', conf_dir + '/slaves',
                              content = Template('slaves.j2'),
                              owner = 'root',
                              )

    self.assertResourceCalled('Directory', '/var/lib/hadoop-hdfs',
                              owner = 'hdfs',
                              group = 'hadoop',
                              mode = 0751,
                              create_parents = True,
                              )
    self.assertResourceCalled('Directory', '/var/lib/ambari-agent/data/datanode',
                              mode = 0755,
                              create_parents = True
    )
    self.assertResourceCalled('Directory', '/hadoop/hdfs/data',
                              owner = 'hdfs',
                              ignore_failures = True,
                              group = 'hadoop',
                              mode = 0750,
                              create_parents = True,
                              cd_access='a'
                              )
    content = resource_management.libraries.functions.mounted_dirs_helper.DIR_TO_MOUNT_HEADER
    self.assertResourceCalled('File', '/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist',
                              owner = 'hdfs',
                              group = 'hadoop',
                              mode = 0644,
                              content = content
                              )


  def test_pre_upgrade_restart(self):
    config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
    with open(config_file, "r") as f:
      json_content = json.load(f)
    version = '2.2.1.0-3242'
    json_content['commandParams']['version'] = version
    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                       classname = "DataNode",
                       command = "pre_upgrade_restart",
                       config_dict = json_content,
                       config_overrides = self.CONFIG_OVERRIDES,
                       stack_version = self.STACK_VERSION,
                       target = RMFTestCase.TARGET_COMMON_SERVICES)
    self.assertResourceCalled('Execute',
                              ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-hdfs-datanode', version), sudo=True,)
    self.assertNoMoreResources()


  @patch("resource_management.core.shell.call")
  def test_pre_upgrade_restart_23(self, call_mock):
    config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
    with open(config_file, "r") as f:
      json_content = json.load(f)
    version = '2.3.0.0-1234'
    json_content['commandParams']['version'] = version

    mocks_dict = {}
    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                       classname = "DataNode",
                       command = "pre_upgrade_restart",
                       config_dict = json_content,
                       config_overrides = self.CONFIG_OVERRIDES,
                       stack_version = self.STACK_VERSION,
                       target = RMFTestCase.TARGET_COMMON_SERVICES,
                       mocks_dict = mocks_dict)
    self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-hdfs-datanode', version), sudo=True,)

    self.assertNoMoreResources()


  @patch("socket.gethostbyname")
  @patch('time.sleep')
  def test_post_upgrade_restart(self, time_mock, socket_gethostbyname_mock):
    shell_call_output = """
      Live datanodes (2):

      Name: 192.168.64.102:50010 (c6401.ambari.apache.org)
      Hostname: c6401.ambari.apache.org
      Decommission Status : Normal
      Configured Capacity: 524208947200 (488.21 GB)
      DFS Used: 193069056 (184.13 MB)
      Non DFS Used: 29264986112 (27.26 GB)
      DFS Remaining: 494750892032 (460.77 GB)
      DFS Used%: 0.04%
      DFS Remaining%: 94.38%
      Configured Cache Capacity: 0 (0 B)
      Cache Used: 0 (0 B)
      Cache Remaining: 0 (0 B)
      Cache Used%: 100.00%
      Cache Remaining%: 0.00%
      Xceivers: 2
      Last contact: Fri Dec 12 20:47:21 UTC 2014
    """
    mocks_dict = {}
    socket_gethostbyname_mock.return_value = "test_host"
    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                       classname = "DataNode",
                       command = "post_upgrade_restart",
                       config_file = "default.json",
                       stack_version = self.STACK_VERSION,
                       target = RMFTestCase.TARGET_COMMON_SERVICES,
                       call_mocks = [(0, shell_call_output)],
                       mocks_dict = mocks_dict
    )

    self.assertTrue(mocks_dict['call'].called)
    self.assertEqual(mocks_dict['call'].call_count,1)


  @patch("socket.gethostbyname")
  @patch('time.sleep')
  def test_post_upgrade_restart_datanode_not_ready(self, time_mock, socket_gethostbyname_mock):
    mocks_dict = {}
    socket_gethostbyname_mock.return_value = "test_host"
    try:
      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                         classname = "DataNode",
                         command = "post_upgrade_restart",
                         config_file = "default.json",
                         stack_version = self.STACK_VERSION,
                         target = RMFTestCase.TARGET_COMMON_SERVICES,
                         call_mocks = [(0, 'There are no DataNodes here!')] * 30,
                         mocks_dict = mocks_dict
      )
      self.fail('Missing DataNode should have caused a failure')
    except Fail,fail:
      self.assertTrue(mocks_dict['call'].called)
      self.assertEqual(mocks_dict['call'].call_count,30)
示例#9
0
with the License.  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''

import json
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *

@patch("os.path.exists", new = MagicMock(return_value=True))
class TestHookAfterInstall(RMFTestCase):

  def test_hook_default(self):

    self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
                       classname="AfterInstallHook",
                       command="hook",
                       config_file="default.json"
    )
    self.assertResourceCalled('XmlConfig', 'core-site.xml',
                              owner = 'hdfs',
                              group = 'hadoop',
                              conf_dir = '/etc/hadoop/conf',
                              configurations = self.getConfig()['configurations']['core-site'],
                              configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
示例#10
0
    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, patch
from stacks.utils.RMFTestCase import *

from only_for_platform import not_for_platform, PLATFORM_WINDOWS

@not_for_platform(PLATFORM_WINDOWS)
@patch("resource_management.libraries.functions.get_hdp_version", new=MagicMock(return_value="2.3.2.0-1597"))
class TestSparkThriftServer(RMFTestCase):
  COMMON_SERVICES_PACKAGE_DIR = "SPARK/1.2.0.2.2/package"
  STACK_VERSION = "2.3"

  @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
  def test_configure_default(self, copy_to_hdfs_mock):
    copy_to_hdfs_mock = True
    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/spark_thrift_server.py",
                   classname = "SparkThriftServer",
                   command = "configure",
                   config_file="spark_default.json",
                   hdp_stack_version = self.STACK_VERSION,
                   target = RMFTestCase.TARGET_COMMON_SERVICES
    )
    self.assert_configure_default()
示例#11
0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from stacks.utils.RMFTestCase import *
import json
from mock.mock import MagicMock, patch
from resource_management.libraries.script.script import Script
from resource_management.core import shell
import itertools
from resource_management.core.exceptions import Fail
import resource_management.libraries.functions.mounted_dirs_helper

@patch.object(resource_management.libraries.functions, 'check_process_status', new = MagicMock())
@patch.object(Script, 'format_package_name', new = MagicMock())
class TestDatanode(RMFTestCase):
  COMMON_SERVICES_PACKAGE_DIR = "HDFS/2.1.0.2.0/package"
  STACK_VERSION = "2.0.6"

  CONFIG_OVERRIDES = {"serviceName":"HDFS", "role":"DATANODE"}

  def test_configure_default(self):
    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                       classname = "DataNode",
                       command = "configure",
                       config_file = "default.json",
                       stack_version = self.STACK_VERSION,
                       target = RMFTestCase.TARGET_COMMON_SERVICES
    )
示例#12
0
class TestHardware(TestCase):
    @patch.object(Hardware, "osdisks", new=MagicMock(return_value=[]))
    @patch.object(Hardware,
                  "_chk_writable_mount",
                  new=MagicMock(return_value=True))
    @patch.object(FacterLinux,
                  "get_ip_address_by_ifname",
                  new=MagicMock(return_value=None))
    @patch.object(OSCheck, "get_os_type")
    @patch.object(OSCheck, "get_os_version")
    def test_build(self, get_os_version_mock, get_os_type_mock):
        get_os_type_mock.return_value = "suse"
        get_os_version_mock.return_value = "11"
        hardware = Hardware()
        result = hardware.get()
        osdisks = hardware.osdisks()

        for dev_item in result['mounts']:
            self.assertTrue(dev_item['available'] >= 0)
            self.assertTrue(dev_item['used'] >= 0)
            self.assertTrue(dev_item['percent'] is not None)
            self.assertTrue(dev_item['device'] is not None)
            self.assertTrue(dev_item['mountpoint'] is not None)
            self.assertTrue(dev_item['type'] is not None)
            self.assertTrue(dev_item['size'] > 0)

        for os_disk_item in osdisks:
            self.assertTrue(os_disk_item['available'] >= 0)
            self.assertTrue(os_disk_item['used'] >= 0)
            self.assertTrue(os_disk_item['percent'] is not None)
            self.assertTrue(os_disk_item['device'] is not None)
            self.assertTrue(os_disk_item['mountpoint'] is not None)
            self.assertTrue(os_disk_item['type'] is not None)
            self.assertTrue(os_disk_item['size'] > 0)

        self.assertTrue(len(result['mounts']) == len(osdisks))

    @patch.object(Hardware, "_chk_writable_mount")
    @patch("ambari_agent.Hardware.path_isfile")
    @patch("resource_management.core.shell.call")
    def test_osdisks_parsing(self, shell_call_mock, isfile_mock,
                             chk_writable_mount_mock):
        df_output =\
                    """Filesystem                                                                                        Type  1024-blocks     Used Available Capacity Mounted on
                /dev/mapper/docker-253:0-4980899-d45c264d37ab18c8ed14f890f4d59ac2b81e1c52919eb36a79419787209515f3 xfs      31447040  1282384  30164656       5% /
                tmpfs                                                                                             tmpfs    32938336        4  32938332       1% /dev
                tmpfs                                                                                             tmpfs    32938336        0  32938336       0% /sys/fs/cgroup
                /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /etc/resolv.conf
                /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /etc/hostname
                /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /etc/hosts
                shm                                                                                               tmpfs       65536        0     65536       0% /dev/shm
                /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /run/secrets
                """

        def isfile_side_effect(path):
            assume_files = ["/etc/resolv.conf", "/etc/hostname", "/etc/hosts"]
            return path in assume_files

        def chk_writable_mount_side_effect(path):
            assume_read_only = ["/run/secrets"]
            return path not in assume_read_only

        isfile_mock.side_effect = isfile_side_effect
        chk_writable_mount_mock.side_effect = chk_writable_mount_side_effect
        shell_call_mock.return_value = (0, df_output, '')

        result = Hardware(cache_info=False).osdisks()

        self.assertEquals(1, len(result))

        expected_mounts_left = ["/"]
        mounts_left = [item["mountpoint"] for item in result]

        self.assertEquals(expected_mounts_left, mounts_left)

    @patch.object(Hardware, "_chk_writable_mount")
    @patch("ambari_agent.Hardware.path_isfile")
    @patch("resource_management.core.shell.call")
    def test_osdisks_no_ignore_property(self, shell_call_mock, isfile_mock,
                                        chk_writable_mount_mock):
        df_output = \
          """Filesystem                                                                                        Type  1024-blocks     Used Available Capacity Mounted on
      /dev/mapper/docker-253:0-4980899-d45c264d37ab18c8ed14f890f4d59ac2b81e1c52919eb36a79419787209515f3 xfs      31447040  1282384  30164656       5% /
      """

        isfile_mock.return_value = False
        chk_writable_mount_mock.return_value = True
        shell_call_mock.return_value = (0, df_output, '')
        config = AmbariConfig()

        # check, that config do not define ignore_mount_points property
        self.assertEquals(
            "test", config.get('agent', 'ignore_mount_points', default="test"))

        result = Hardware(config=config, cache_info=False).osdisks()

        self.assertEquals(1, len(result))

        expected_mounts_left = ["/"]
        mounts_left = [item["mountpoint"] for item in result]

        self.assertEquals(expected_mounts_left, mounts_left)

    @patch.object(OSCheck, "get_os_type")
    @patch.object(OSCheck, "get_os_version")
    @patch("resource_management.core.shell.call")
    def test_osdisks_remote(self, shell_call_mock, get_os_version_mock,
                            get_os_type_mock):
        get_os_type_mock.return_value = "suse"
        get_os_version_mock.return_value = "11"
        Hardware(cache_info=False).osdisks()
        timeout = 10
        shell_call_mock.assert_called_with(
            ['timeout', str(timeout), "df", "-kPT"],
            stdout=subprocess32.PIPE,
            stderr=subprocess32.PIPE,
            timeout=timeout,
            quiet=True)

        config = AmbariConfig()
        Hardware(config=config, cache_info=False).osdisks()
        shell_call_mock.assert_called_with(
            ['timeout', str(timeout), "df", "-kPT"],
            stdout=subprocess32.PIPE,
            stderr=subprocess32.PIPE,
            timeout=timeout,
            quiet=True)

        config.add_section(AmbariConfig.AMBARI_PROPERTIES_CATEGORY)
        config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY,
                   Hardware.CHECK_REMOTE_MOUNTS_KEY, "true")
        Hardware(config=config, cache_info=False).osdisks()
        shell_call_mock.assert_called_with(
            ['timeout', str(timeout), "df", "-kPT"],
            stdout=subprocess32.PIPE,
            stderr=subprocess32.PIPE,
            timeout=timeout,
            quiet=True)

        config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY,
                   Hardware.CHECK_REMOTE_MOUNTS_KEY, "false")
        Hardware(config=config, cache_info=False).osdisks()
        shell_call_mock.assert_called_with(
            ['timeout', str(timeout), "df", "-kPT", "-l"],
            stdout=subprocess32.PIPE,
            stderr=subprocess32.PIPE,
            timeout=timeout,
            quiet=True)

        config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY,
                   Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "0")
        Hardware(config=config, cache_info=False).osdisks()
        shell_call_mock.assert_called_with(
            ['timeout', str(timeout), "df", "-kPT", "-l"],
            stdout=subprocess32.PIPE,
            stderr=subprocess32.PIPE,
            timeout=timeout,
            quiet=True)

        timeout = 1
        config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY,
                   Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, str(timeout))
        Hardware(config=config, cache_info=False).osdisks()
        shell_call_mock.assert_called_with(
            ['timeout', str(timeout), "df", "-kPT", "-l"],
            stdout=subprocess32.PIPE,
            stderr=subprocess32.PIPE,
            timeout=timeout,
            quiet=True)

        timeout = 2
        config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY,
                   Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, str(timeout))
        Hardware(config=config, cache_info=False).osdisks()
        shell_call_mock.assert_called_with(
            ['timeout', str(timeout), "df", "-kPT", "-l"],
            stdout=subprocess32.PIPE,
            stderr=subprocess32.PIPE,
            timeout=timeout,
            quiet=True)

    def test_parse_df_line(self):
        df_line_sample = "device type size used available percent mountpoint"

        samples = [{
            "sample":
            df_line_sample,
            "expected":
            dict(zip(df_line_sample.split(), df_line_sample.split()))
        }, {
            "sample": "device type size used available percent",
            "expected": None,
        }, {
            "sample":
            "device type size used available percent mountpoint info",
            "expected": None,
        }, {
            "sample": "",
            "expected": None
        }]

        for sample in samples:
            try:
                result = Hardware(cache_info=False)._parse_df(
                    [sample["sample"]]).next()
            except StopIteration:
                result = None

            self.assertEquals(
                result, sample["expected"],
                "Failed with sample: '{0}', expected: {1}, got: {2}".format(
                    sample["sample"], sample["expected"], result))

    @patch.object(FacterLinux,
                  "get_ip_address_by_ifname",
                  new=MagicMock(return_value=None))
    @patch.object(hostname, "hostname")
    @patch.object(FacterLinux, "getFqdn")
    @patch.object(OSCheck, "get_os_type")
    @patch.object(OSCheck, "get_os_version")
    def test_fqdnDomainHostname(self, get_os_version_mock, get_os_type_mock,
                                facter_getFqdn_mock, hostname_mock):
        facter_getFqdn_mock.return_value = "ambari.apache.org"
        hostname_mock.return_value = 'ambari'
        get_os_type_mock.return_value = "suse"
        get_os_version_mock.return_value = "11"
        config = None
        result = Facter(config).facterInfo()

        self.assertEquals(result['hostname'], "ambari")
        self.assertEquals(result['domain'], "apache.org")
        self.assertEquals(result['fqdn'],
                          (result['hostname'] + '.' + result['domain']))

    @patch.object(FacterLinux,
                  "get_ip_address_by_ifname",
                  new=MagicMock(return_value=None))
    @patch.object(FacterLinux, "setDataUpTimeOutput")
    @patch.object(OSCheck, "get_os_type")
    @patch.object(OSCheck, "get_os_version")
    def test_uptimeSecondsHoursDays(self, get_os_version_mock,
                                    get_os_type_mock,
                                    facter_setDataUpTimeOutput_mock):
        # 3 days + 1 hour + 13 sec
        facter_setDataUpTimeOutput_mock.return_value = "262813.00 123.45"
        get_os_type_mock.return_value = "suse"
        get_os_version_mock.return_value = "11"
        config = None
        result = Facter(config).facterInfo()

        self.assertEquals(result['uptime_seconds'], '262813')
        self.assertEquals(result['uptime_hours'], '73')
        self.assertEquals(result['uptime_days'], '3')

    @patch.object(FacterLinux,
                  "get_ip_address_by_ifname",
                  new=MagicMock(return_value=None))
    @patch.object(FacterLinux, "setMemInfoOutput")
    @patch.object(OSCheck, "get_os_type")
    @patch.object(OSCheck, "get_os_version")
    @patch.object(FacterLinux, "getSystemResourceOverrides")
    def test_facterMemInfoOutput(self, getSystemResourceOverridesMock,
                                 get_os_version_mock, get_os_type_mock,
                                 facter_setMemInfoOutput_mock):

        getSystemResourceOverridesMock.return_value = {}
        facter_setMemInfoOutput_mock.return_value = '''
MemTotal:        1832392 kB
MemFree:          868648 kB
HighTotal:             0 kB
HighFree:              0 kB
LowTotal:        1832392 kB
LowFree:          868648 kB
SwapTotal:       2139592 kB
SwapFree:        1598676 kB
    '''

        get_os_type_mock.return_value = "suse"
        get_os_version_mock.return_value = "11"
        config = None
        result = Facter(config).facterInfo()

        self.assertEquals(result['memorysize'], 1832392)
        self.assertEquals(result['memorytotal'], 1832392)
        self.assertEquals(result['memoryfree'], 868648)
        self.assertEquals(result['swapsize'], '2.04 GB')
        self.assertEquals(result['swapfree'], '1.52 GB')

    @patch("fcntl.ioctl")
    @patch("socket.socket")
    @patch("struct.pack")
    @patch("socket.inet_ntoa")
    @patch.object(FacterLinux, "get_ip_address_by_ifname")
    @patch.object(Facter, "getIpAddress")
    @patch.object(OSCheck, "get_os_type")
    @patch.object(OSCheck, "get_os_version")
    def test_facterDataIfConfigOutput(self, get_os_version_mock,
                                      get_os_type_mock, getIpAddress_mock,
                                      get_ip_address_by_ifname_mock,
                                      inet_ntoa_mock, struct_pack_mock,
                                      socket_socket_mock, fcntl_ioctl_mock):
        getIpAddress_mock.return_value = "10.0.2.15"
        get_ip_address_by_ifname_mock.return_value = "10.0.2.15"
        inet_ntoa_mock.return_value = "255.255.255.0"

        get_os_type_mock.return_value = "suse"
        get_os_version_mock.return_value = "11"
        config = None
        result = Facter(config).facterInfo()

        self.assertTrue(inet_ntoa_mock.called)
        self.assertTrue(get_ip_address_by_ifname_mock.called)
        self.assertTrue(getIpAddress_mock.called)
        self.assertEquals(result['ipaddress'], '10.0.2.15')
        self.assertEquals(result['netmask'], '255.255.255.0')
        self.assertEquals(result['interfaces'], 'eth0,eth1,eth2,lo')

    @patch("fcntl.ioctl")
    @patch("socket.socket")
    @patch("struct.pack")
    @patch("socket.inet_ntoa")
    @patch.object(FacterLinux, "get_ip_address_by_ifname")
    @patch.object(Facter, "getIpAddress")
    @patch.object(OSCheck, "get_os_type")
    @patch.object(OSCheck, "get_os_version")
    def test_facterDataIfConfigOutputNone(self, get_os_version_mock,
                                          get_os_type_mock, getIpAddress_mock,
                                          get_ip_address_by_ifname_mock,
                                          inet_ntoa_mock, struct_pack_mock,
                                          socket_socket_mock,
                                          fcntl_ioctl_mock):
        getIpAddress_mock.return_value = "10.0.2.15"
        get_ip_address_by_ifname_mock.return_value = ""
        inet_ntoa_mock.return_value = "255.255.255.0"

        get_os_type_mock.return_value = "suse"
        get_os_version_mock.return_value = "11"
        config = None
        result = Facter(config).facterInfo()

        self.assertTrue(get_ip_address_by_ifname_mock.called)
        self.assertEquals(result['netmask'], None)

    @patch.object(FacterLinux,
                  "get_ip_address_by_ifname",
                  new=MagicMock(return_value=None))
    @patch.object(OSCheck, "get_os_type")
    @patch.object(OSCheck, "get_os_family")
    @patch.object(OSCheck, "get_os_version")
    def test_facterDataOperatingsystemVsFamily(self, get_os_version_mock,
                                               get_os_family_mock,
                                               get_os_type_mock):
        get_os_type_mock.return_value = "some_type_of_os"
        get_os_version_mock.return_value = "11"
        get_os_family_mock.return_value = "redhat"

        config = None
        result = Facter(config).facterInfo()
        self.assertEquals(result['operatingsystem'], 'some_type_of_os')
        self.assertEquals(result['osfamily'], 'redhat')

        get_os_family_mock.return_value = "ubuntu"
        result = Facter(config).facterInfo()
        self.assertEquals(result['operatingsystem'], 'some_type_of_os')
        self.assertEquals(result['osfamily'], 'ubuntu')

        get_os_family_mock.return_value = "suse"
        result = Facter(config).facterInfo()
        self.assertEquals(result['operatingsystem'], 'some_type_of_os')
        self.assertEquals(result['osfamily'], 'suse')

        get_os_family_mock.return_value = "My_new_family"
        result = Facter(config).facterInfo()
        self.assertEquals(result['operatingsystem'], 'some_type_of_os')
        self.assertEquals(result['osfamily'], 'My_new_family')

    @patch("os.path.exists")
    @patch("os.path.isdir")
    @patch("json.loads")
    @patch("glob.glob")
    @patch("__builtin__.open")
    @patch.object(OSCheck, "get_os_type")
    @patch.object(OSCheck, "get_os_version")
    @patch.object(FacterLinux, "resolve_ambari_config")
    def test_system_resource_overrides(self, resolve_ambari_config,
                                       get_os_version_mock, get_os_type_mock,
                                       open_mock, glob_mock, json_mock, isdir,
                                       exists):
        get_os_type_mock.return_value = "suse"
        get_os_version_mock.return_value = "11"
        config = MagicMock()
        config.get.return_value = '/etc/custom_resource_overrides'
        config.has_option.return_value = True
        resolve_ambari_config.return_value = config
        isdir.return_value = True
        exists.return_value = True
        open_mock.return_value.read = "1"
        file_handle = open_mock.return_value.__enter__.return_value
        file_handle.read.return_value = '1'
        glob_mock.side_effect = \
          [
            [
              "/etc/custom_resource_overrides/1.json",
              "/etc/custom_resource_overrides/2.json"
              ]
          ]
        json_data = json_mock.return_value
        json_data.items.return_value = [('key', 'value')]
        json_data.__getitem__.return_value = 'value'

        facter = Facter(config)
        result = facter.getSystemResourceOverrides()

        isdir.assert_called_with('/etc/custom_resource_overrides')
        exists.assert_called_with('/etc/custom_resource_overrides')
        glob_mock.assert_called_with('/etc/custom_resource_overrides/*.json')
        self.assertTrue(config.has_option.called)
        self.assertTrue(config.get.called)
        self.assertTrue(glob_mock.called)
        self.assertEquals(2, file_handle.read.call_count)
        self.assertEquals(2, open_mock.call_count)
        self.assertEquals(2, json_mock.call_count)
        self.assertEquals('value', result['key'])

    @patch.object(Hardware, "_chk_writable_mount")
    @patch("ambari_agent.Hardware.path_isfile")
    @patch("resource_management.core.shell.call")
    def test_osdisks_blacklist(self, shell_call_mock, isfile_mock,
                               chk_writable_mount_mock):
        df_output = \
          """Filesystem                                                                                        Type  1024-blocks     Used Available Capacity Mounted on
      /dev/mapper/docker-253:0-4980899-d45c264d37ab18c8ed14f890f4d59ac2b81e1c52919eb36a79419787209515f3 xfs      31447040  1282384  30164656       5% /
      tmpfs                                                                                             tmpfs    32938336        4  32938332       1% /dev
      tmpfs                                                                                             tmpfs    32938336        0  32938336       0% /sys/fs/cgroup
      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /etc/resolv.conf
      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /etc/hostname
      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /etc/hosts
      shm                                                                                               tmpfs       65536        0     65536       0% /dev/shm
      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /run/secrets
      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /mnt/blacklisted_mount
      /dev/mapper/fedora-root                                                                           ext4    224161316 12849696 199901804       7% /mnt/blacklisted_mount/sub-dir
      """

        def isfile_side_effect(path):
            assume_files = ["/etc/resolv.conf", "/etc/hostname", "/etc/hosts"]
            return path in assume_files

        def chk_writable_mount_side_effect(path):
            assume_read_only = ["/run/secrets"]
            return path not in assume_read_only

        isfile_mock.side_effect = isfile_side_effect
        chk_writable_mount_mock.side_effect = chk_writable_mount_side_effect

        config_dict = {
            "agent": {
                "ignore_mount_points": "/mnt/blacklisted_mount"
            }
        }

        shell_call_mock.return_value = (0, df_output, '')

        def conf_get(section, key, default=""):
            if section in config_dict and key in config_dict[section]:
                return config_dict[section][key]

            return default

        def has_option(section, key):
            return section in config_dict and key in config_dict[section]

        conf = Mock()
        attr = {
            'get.side_effect': conf_get,
            'has_option.side_effect': has_option
        }
        conf.configure_mock(**attr)

        result = Hardware(config=conf, cache_info=False).osdisks()

        self.assertEquals(1, len(result))

        expected_mounts_left = ["/"]
        mounts_left = [item["mountpoint"] for item in result]

        self.assertEquals(expected_mounts_left, mounts_left)
示例#13
0
import unittest
import platform
import socket
from ambari_commons import subprocess32
from only_for_platform import not_for_platform, PLATFORM_WINDOWS
from ambari_agent import hostname
from ambari_agent.Hardware import Hardware
from ambari_agent.AmbariConfig import AmbariConfig
from ambari_agent.Facter import Facter, FacterLinux
from ambari_commons import OSCheck


@not_for_platform(PLATFORM_WINDOWS)
@patch.object(platform,
              "linux_distribution",
              new=MagicMock(return_value=('Suse', '11', 'Final')))
@patch.object(socket,
              "getfqdn",
              new=MagicMock(return_value="ambari.apache.org"))
@patch.object(socket,
              "gethostbyname",
              new=MagicMock(return_value="192.168.1.1"))
@patch.object(
    FacterLinux,
    "setDataIfConfigShortOutput",
    new=MagicMock(
        return_value=
        '''Iface   MTU Met    RX-OK RX-ERR RX-DRP RX-OVR    TX-OK TX-ERR TX-DRP TX-OVR Flg
eth0   1500   0     9986      0      0      0     5490      0      0      0 BMRU
eth1   1500   0        0      0      0      0        6      0      0      0 BMRU
eth2   1500   0        0      0      0      0        6      0      0      0 BMRU
示例#14
0
    def test_alert(self, conn_mock):
        connection = MagicMock()
        response = MagicMock()
        response.status = 200
        connection.getresponse.return_value = response
        conn_mock.return_value = connection
        response.read.return_value = '{"metrics":[{"metricname":"metric1","metrics":{"1459966360838":1,"1459966370838":3}}]}'

        # OK, but no datapoints above the minimum threshold
        [status, messages] = alert.execute(configurations=configs,
                                           parameters=parameters)
        self.assertEqual(status, RESULT_STATE_OK)
        self.assertTrue(messages is not None and len(messages) == 1)
        self.assertEquals(
            'No datapoints found above the minimum threshold of 30 seconds',
            messages[0])

        # Unable to calculate the standard deviation for 1 data point
        response.read.return_value = '{"metrics":[{"metricname":"metric1","metrics":{"1459966360838":40000}}]}'
        [status, messages] = alert.execute(configurations=configs,
                                           parameters=parameters)
        self.assertEqual(status, RESULT_STATE_SKIPPED)
        self.assertTrue(messages is not None and len(messages) == 1)
        self.assertEquals(
            'Unable to calculate the standard deviation for 1 datapoints',
            messages[0])

        # OK
        response.read.return_value = '{"metrics":[{"metricname":"metric1","metrics":{"1459966360838":40000,"1459966370838":50000}}]}'
        [status, messages] = alert.execute(configurations=configs,
                                           parameters=parameters)
        self.assertEqual(status, RESULT_STATE_OK)
        self.assertTrue(messages is not None and len(messages) == 1)
        self.assertTrue(
            'OK. Percentage standard deviation value is' in messages[0])

        # Warning
        response.read.return_value = '{"metrics":[{"metricname":"metric1","metrics":{"1459966360838":40000,"1459966370838":1000000}}]}'
        [status, messages] = alert.execute(configurations=configs,
                                           parameters=parameters)
        self.assertEqual(status, RESULT_STATE_WARNING)
        self.assertTrue(messages is not None and len(messages) == 1)
        self.assertTrue(
            'WARNING. Percentage standard deviation' in messages[0])

        # HTTP request to AMS failed
        response.read.return_value = ''
        response.status = 501
        [status, messages] = alert.execute(configurations=configs,
                                           parameters=parameters)
        self.assertEqual(status, RESULT_STATE_UNKNOWN)
        self.assertTrue(messages is not None and len(messages) == 1)
        self.assertEquals('Unable to retrieve metrics from AMS.', messages[0])

        # Unable to connect to AMS
        conn_mock.side_effect = Exception('Unable to connect to AMS')
        [status, messages] = alert.execute(configurations=configs,
                                           parameters=parameters)
        self.assertEqual(status, RESULT_STATE_UNKNOWN)
        self.assertTrue(messages is not None and len(messages) == 1)
        self.assertEquals('Unable to retrieve metrics from AMS.', messages[0])
示例#15
0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, patch
from stacks.utils.RMFTestCase import *

from only_for_platform import not_for_platform, PLATFORM_WINDOWS


@not_for_platform(PLATFORM_WINDOWS)
@patch("resource_management.libraries.functions.get_hdp_version",
       new=MagicMock(return_value="2.3.0.0-1597"))
class TestSparkClient(RMFTestCase):
    COMMON_SERVICES_PACKAGE_DIR = "SPARK/1.2.0.2.2/package"
    STACK_VERSION = "2.2"

    def test_configure_default(self):
        self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR +
                           "/scripts/spark_client.py",
                           classname="SparkClient",
                           command="configure",
                           config_file="default.json",
                           hdp_stack_version=self.STACK_VERSION,
                           target=RMFTestCase.TARGET_COMMON_SERVICES)
        self.assert_configure_default()
        self.assertNoMoreResources()
示例#16
0
class TestHookAfterInstall(RMFTestCase):

  def test_hook_default(self):

    self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
                       classname="AfterInstallHook",
                       command="hook",
                       config_file="default.json"
    )
    self.assertResourceCalled('XmlConfig', 'core-site.xml',
                              owner = 'hdfs',
                              group = 'hadoop',
                              conf_dir = '/etc/hadoop/conf',
                              configurations = self.getConfig()['configurations']['core-site'],
                              configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
                              only_if="ls /etc/hadoop/conf")

    self.assertNoMoreResources()


  @patch("shared_initialization.load_version", new = MagicMock(return_value="2.3.0.0-1243"))
  @patch("resource_management.libraries.functions.conf_select.create")
  @patch("resource_management.libraries.functions.conf_select.select")
  @patch("os.symlink")
  @patch("shutil.rmtree")
  def test_hook_default_conf_select(self, rmtree_mock, symlink_mock, conf_select_select_mock, conf_select_create_mock):

    def mocked_conf_select(arg1, arg2, arg3, dry_run = False):
      return "/etc/{0}/{1}/0".format(arg2, arg3)

    conf_select_create_mock.side_effect = mocked_conf_select

    config_file = self.get_src_folder() + "/test/python/stacks/2.0.6/configs/default.json"
    with open(config_file, "r") as f:
      json_content = json.load(f)

    version = '2.3.0.0-1234'
    json_content['commandParams']['version'] = version
    json_content['hostLevelParams']['stack_version'] = "2.3"

    self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
                       classname="AfterInstallHook",
                       command="hook",
                       config_dict = json_content)


    self.assertResourceCalled("Execute",
      "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E touch /var/lib/ambari-agent/data/hdp-select-set-all.performed ; " \
      "ambari-sudo.sh /usr/bin/hdp-select set all `ambari-python-wrap /usr/bin/hdp-select versions | grep ^2.3 | tail -1`",
      only_if = "ls -d /usr/hdp/2.3*",
      not_if = "test -f /var/lib/ambari-agent/data/hdp-select-set-all.performed")


    self.assertResourceCalled('XmlConfig', 'core-site.xml',
      owner = 'hdfs',
      group = 'hadoop',
      conf_dir = "/usr/hdp/current/hadoop-client/conf",
      configurations = self.getConfig()['configurations']['core-site'],
      configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
      only_if="ls /usr/hdp/current/hadoop-client/conf")

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/ranger/kms/conf', '/etc/ranger/kms/conf.install'),
        not_if = 'test -e /etc/ranger/kms/conf.install',
        sudo = True,)
    self.assertResourceCalled('Directory', '/etc/ranger/kms/conf',
        action = ['delete'],)
    self.assertResourceCalled('Link', '/etc/ranger/kms/conf',
        to = '/usr/hdp/current/ranger-kms/conf',)

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/zookeeper/conf', '/etc/zookeeper/conf.install'),
        not_if = 'test -e /etc/zookeeper/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/zookeeper/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/zookeeper/conf',
        to = '/usr/hdp/current/zookeeper-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/pig/conf', '/etc/pig/conf.install'),
        not_if = 'test -e /etc/pig/conf.install',
        sudo = True,)
    self.assertResourceCalled('Directory', '/etc/pig/conf',
        action = ['delete'],)
    self.assertResourceCalled('Link', '/etc/pig/conf',
        to = '/usr/hdp/current/pig-client/conf',)

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/tez/conf', '/etc/tez/conf.install'),
        not_if = 'test -e /etc/tez/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/tez/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/tez/conf',
        to = '/usr/hdp/current/tez-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/hive-webhcat/conf', '/etc/hive-webhcat/conf.install'),
        not_if = 'test -e /etc/hive-webhcat/conf.install',
        sudo = True,)
    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/hive-hcatalog/conf', '/etc/hive-hcatalog/conf.install'),
        not_if = 'test -e /etc/hive-hcatalog/conf.install',
        sudo = True,)

    self.assertResourceCalled('Directory', '/etc/hive-webhcat/conf',
        action = ['delete'],)
    self.assertResourceCalled('Link', '/etc/hive-webhcat/conf',
        to = '/usr/hdp/current/hive-webhcat/etc/webhcat',)

    self.assertResourceCalled('Directory', '/etc/hive-hcatalog/conf',
        action = ['delete'],)
    self.assertResourceCalled('Link', '/etc/hive-hcatalog/conf',
        to = '/usr/hdp/current/hive-webhcat/etc/hcatalog',)

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/hbase/conf', '/etc/hbase/conf.install'),
        not_if = 'test -e /etc/hbase/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/hbase/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/hbase/conf',
        to = '/usr/hdp/current/hbase-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/knox/conf', '/etc/knox/conf.install'),
        not_if = 'test -e /etc/knox/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/knox/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/knox/conf',
        to = '/usr/hdp/current/knox-server/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/ranger/usersync/conf', '/etc/ranger/usersync/conf.install'),
        not_if = 'test -e /etc/ranger/usersync/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/ranger/usersync/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/ranger/usersync/conf',
        to = '/usr/hdp/current/ranger-usersync/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/hadoop/conf', '/etc/hadoop/conf.install'),
        not_if = 'test -e /etc/hadoop/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/hadoop/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/hadoop/conf',
        to = '/usr/hdp/current/hadoop-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/mahout/conf', '/etc/mahout/conf.install'),
        not_if = 'test -e /etc/mahout/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/mahout/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/mahout/conf',
        to = '/usr/hdp/current/mahout-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/storm/conf', '/etc/storm/conf.install'),
        not_if = 'test -e /etc/storm/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/storm/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/storm/conf',
        to = '/usr/hdp/current/storm-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/ranger/admin/conf', '/etc/ranger/admin/conf.install'),
        not_if = 'test -e /etc/ranger/admin/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/ranger/admin/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/ranger/admin/conf',
        to = '/usr/hdp/current/ranger-admin/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/flume/conf', '/etc/flume/conf.install'),
        not_if = 'test -e /etc/flume/conf.install',
        sudo = True,)
    self.assertResourceCalled('Directory', '/etc/flume/conf',
        action = ['delete'],)
    self.assertResourceCalled('Link', '/etc/flume/conf',
        to = '/usr/hdp/current/flume-server/conf',)

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/sqoop/conf', '/etc/sqoop/conf.install'),
        not_if = 'test -e /etc/sqoop/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/sqoop/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/sqoop/conf',
        to = '/usr/hdp/current/sqoop-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/accumulo/conf', '/etc/accumulo/conf.install'),
        not_if = 'test -e /etc/accumulo/conf.install',
        sudo = True,)
    self.assertResourceCalled('Directory', '/etc/accumulo/conf',
        action = ['delete'],)
    self.assertResourceCalled('Link', '/etc/accumulo/conf',
        to = '/usr/hdp/current/accumulo-client/conf',)

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/phoenix/conf', '/etc/phoenix/conf.install'),
        not_if = 'test -e /etc/phoenix/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/phoenix/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/phoenix/conf',
        to = '/usr/hdp/current/phoenix-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/storm-slider-client/conf', '/etc/storm-slider-client/conf.install'),
        not_if = 'test -e /etc/storm-slider-client/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/storm-slider-client/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/storm-slider-client/conf',
        to = '/usr/hdp/current/storm-slider-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/slider/conf', '/etc/slider/conf.install'),
        not_if = 'test -e /etc/slider/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/slider/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/slider/conf',
        to = '/usr/hdp/current/slider-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/oozie/conf', '/etc/oozie/conf.install'),
        not_if = 'test -e /etc/oozie/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/oozie/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/oozie/conf',
        to = '/usr/hdp/current/oozie-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/falcon/conf', '/etc/falcon/conf.install'),
        not_if = 'test -e /etc/falcon/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/falcon/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/falcon/conf',
        to = '/usr/hdp/current/falcon-client/conf')


    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/spark/conf', '/etc/spark/conf.install'),
        not_if = 'test -e /etc/spark/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/spark/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/spark/conf',
        to = '/usr/hdp/current/spark-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/kafka/conf', '/etc/kafka/conf.install'),
        not_if = 'test -e /etc/kafka/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/kafka/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/kafka/conf',
        to = '/usr/hdp/current/kafka-broker/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/hive/conf', '/etc/hive/conf.install'),
        not_if = 'test -e /etc/hive/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/hive/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/hive/conf',
        to = '/usr/hdp/current/hive-client/conf')

    self.assertNoMoreResources()

  @patch("shared_initialization.load_version", new = MagicMock(return_value="2.3.0.0-1243"))
  @patch("resource_management.libraries.functions.conf_select.create")
  @patch("resource_management.libraries.functions.conf_select.select")
  @patch("os.symlink")
  @patch("shutil.rmtree")
  def test_hook_default_conf_select_with_error(self, rmtree_mock, symlink_mock, conf_select_select_mock, conf_select_create_mock):

    def mocked_conf_select(arg1, arg2, arg3, dry_run = False):
      if arg2 == "pig" and not dry_run:
        raise Exception("whoops")
      return "/etc/{0}/{1}/0".format(arg2, arg3)

    conf_select_create_mock.side_effect = mocked_conf_select

    conf_select_select_mock.side_effect = mocked_conf_select

    config_file = self.get_src_folder() + "/test/python/stacks/2.0.6/configs/default.json"
    with open(config_file, "r") as f:
      json_content = json.load(f)

    version = '2.3.0.0-1234'
    json_content['commandParams']['version'] = version
    json_content['hostLevelParams']['stack_version'] = "2.3"

    self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
                       classname="AfterInstallHook",
                       command="hook",
                       config_dict = json_content)


    self.assertResourceCalled("Execute",
      "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E touch /var/lib/ambari-agent/data/hdp-select-set-all.performed ; " \
      "ambari-sudo.sh /usr/bin/hdp-select set all `ambari-python-wrap /usr/bin/hdp-select versions | grep ^2.3 | tail -1`",
      only_if = "ls -d /usr/hdp/2.3*",
      not_if = "test -f /var/lib/ambari-agent/data/hdp-select-set-all.performed")


    self.assertResourceCalled('XmlConfig', 'core-site.xml',
      owner = 'hdfs',
      group = 'hadoop',
      conf_dir = "/usr/hdp/current/hadoop-client/conf",
      configurations = self.getConfig()['configurations']['core-site'],
      configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
      only_if="ls /usr/hdp/current/hadoop-client/conf")

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/ranger/kms/conf', '/etc/ranger/kms/conf.install'),
        not_if = 'test -e /etc/ranger/kms/conf.install',
        sudo = True,)
    self.assertResourceCalled('Directory', '/etc/ranger/kms/conf',
        action = ['delete'],)
    self.assertResourceCalled('Link', '/etc/ranger/kms/conf',
        to = '/usr/hdp/current/ranger-kms/conf',)

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/zookeeper/conf', '/etc/zookeeper/conf.install'),
        not_if = 'test -e /etc/zookeeper/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/zookeeper/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/zookeeper/conf',
        to = '/usr/hdp/current/zookeeper-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/pig/conf', '/etc/pig/conf.install'),
        not_if = 'test -e /etc/pig/conf.install',
        sudo = True,)
    # pig fails, so no Directory/Link combo

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/tez/conf', '/etc/tez/conf.install'),
        not_if = 'test -e /etc/tez/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/tez/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/tez/conf',
        to = '/usr/hdp/current/tez-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/hive-webhcat/conf', '/etc/hive-webhcat/conf.install'),
        not_if = 'test -e /etc/hive-webhcat/conf.install',
        sudo = True,)
    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/hive-hcatalog/conf', '/etc/hive-hcatalog/conf.install'),
        not_if = 'test -e /etc/hive-hcatalog/conf.install',
        sudo = True,)

    self.assertResourceCalled('Directory', '/etc/hive-webhcat/conf',
        action = ['delete'],)
    self.assertResourceCalled('Link', '/etc/hive-webhcat/conf',
        to = '/usr/hdp/current/hive-webhcat/etc/webhcat',)

    self.assertResourceCalled('Directory', '/etc/hive-hcatalog/conf',
        action = ['delete'],)
    self.assertResourceCalled('Link', '/etc/hive-hcatalog/conf',
        to = '/usr/hdp/current/hive-webhcat/etc/hcatalog',)

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/hbase/conf', '/etc/hbase/conf.install'),
        not_if = 'test -e /etc/hbase/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/hbase/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/hbase/conf',
        to = '/usr/hdp/current/hbase-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/knox/conf', '/etc/knox/conf.install'),
        not_if = 'test -e /etc/knox/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/knox/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/knox/conf',
        to = '/usr/hdp/current/knox-server/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/ranger/usersync/conf', '/etc/ranger/usersync/conf.install'),
        not_if = 'test -e /etc/ranger/usersync/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/ranger/usersync/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/ranger/usersync/conf',
        to = '/usr/hdp/current/ranger-usersync/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/hadoop/conf', '/etc/hadoop/conf.install'),
        not_if = 'test -e /etc/hadoop/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/hadoop/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/hadoop/conf',
        to = '/usr/hdp/current/hadoop-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/mahout/conf', '/etc/mahout/conf.install'),
        not_if = 'test -e /etc/mahout/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/mahout/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/mahout/conf',
        to = '/usr/hdp/current/mahout-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/storm/conf', '/etc/storm/conf.install'),
        not_if = 'test -e /etc/storm/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/storm/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/storm/conf',
        to = '/usr/hdp/current/storm-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/ranger/admin/conf', '/etc/ranger/admin/conf.install'),
        not_if = 'test -e /etc/ranger/admin/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/ranger/admin/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/ranger/admin/conf',
        to = '/usr/hdp/current/ranger-admin/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/flume/conf', '/etc/flume/conf.install'),
        not_if = 'test -e /etc/flume/conf.install',
        sudo = True,)
    self.assertResourceCalled('Directory', '/etc/flume/conf',
        action = ['delete'],)
    self.assertResourceCalled('Link', '/etc/flume/conf',
        to = '/usr/hdp/current/flume-server/conf',)

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/sqoop/conf', '/etc/sqoop/conf.install'),
        not_if = 'test -e /etc/sqoop/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/sqoop/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/sqoop/conf',
        to = '/usr/hdp/current/sqoop-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/accumulo/conf', '/etc/accumulo/conf.install'),
        not_if = 'test -e /etc/accumulo/conf.install',
        sudo = True,)
    self.assertResourceCalled('Directory', '/etc/accumulo/conf',
        action = ['delete'],)
    self.assertResourceCalled('Link', '/etc/accumulo/conf',
        to = '/usr/hdp/current/accumulo-client/conf',)

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/phoenix/conf', '/etc/phoenix/conf.install'),
        not_if = 'test -e /etc/phoenix/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/phoenix/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/phoenix/conf',
        to = '/usr/hdp/current/phoenix-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/storm-slider-client/conf', '/etc/storm-slider-client/conf.install'),
        not_if = 'test -e /etc/storm-slider-client/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/storm-slider-client/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/storm-slider-client/conf',
        to = '/usr/hdp/current/storm-slider-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/slider/conf', '/etc/slider/conf.install'),
        not_if = 'test -e /etc/slider/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/slider/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/slider/conf',
        to = '/usr/hdp/current/slider-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/oozie/conf', '/etc/oozie/conf.install'),
        not_if = 'test -e /etc/oozie/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/oozie/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/oozie/conf',
        to = '/usr/hdp/current/oozie-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/falcon/conf', '/etc/falcon/conf.install'),
        not_if = 'test -e /etc/falcon/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/falcon/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/falcon/conf',
        to = '/usr/hdp/current/falcon-client/conf')


    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/spark/conf', '/etc/spark/conf.install'),
        not_if = 'test -e /etc/spark/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/spark/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/spark/conf',
        to = '/usr/hdp/current/spark-client/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/kafka/conf', '/etc/kafka/conf.install'),
        not_if = 'test -e /etc/kafka/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/kafka/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/kafka/conf',
        to = '/usr/hdp/current/kafka-broker/conf')

    self.assertResourceCalled('Execute', ('cp', '-R', '-p', '/etc/hive/conf', '/etc/hive/conf.install'),
        not_if = 'test -e /etc/hive/conf.install',
        sudo = True)
    self.assertResourceCalled('Directory', '/etc/hive/conf',
        action = ['delete'])
    self.assertResourceCalled('Link', '/etc/hive/conf',
        to = '/usr/hdp/current/hive-client/conf')

    self.assertNoMoreResources()
示例#17
0
"License"); you may not use this file except in compliance
with the License.  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *

@patch("os.path.exists", new = MagicMock(return_value=True))
@patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
class TestZookeeperClient(RMFTestCase):
  COMMON_SERVICES_PACKAGE_DIR = "ZOOKEEPER/3.4.5.2.0/package"
  STACK_VERSION = "2.0.6"

  def test_configure_default(self):
    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_client.py",
                       classname = "ZookeeperClient",
                       command = "configure",
                       config_file = "default.json",
                       hdp_stack_version = self.STACK_VERSION,
                       target = RMFTestCase.TARGET_COMMON_SERVICES
    )

    self.assertResourceCalled('Directory', '/etc/zookeeper/conf',
示例#18
0
 def test_create_wallet_testnet_created(self):
     plebnet_settings.Init.wallets_testnet_created = MagicMock(
         return_value=True)
     assert walletcontroller.create_wallet('TBTC')
示例#19
0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, patch
import tempfile
import tarfile
import contextlib
from resource_management import *
from stacks.utils.RMFTestCase import *


@patch("platform.linux_distribution", new=MagicMock(return_value="Linux"))
@patch.object(tarfile, "open", new=MagicMock())
@patch.object(tempfile, "mkdtemp", new=MagicMock(return_value='/tmp/123'))
@patch.object(contextlib, "closing", new=MagicMock())
@patch("os.path.exists", new=MagicMock(return_value=True))
class Test(RMFTestCase):
    COMMON_SERVICES_PACKAGE_DIR = "HDFS/2.1.0.2.0/package"
    STACK_VERSION = "2.0.6"

    def test_generate_configs_default(self):
        self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR +
                           "/scripts/hdfs_client.py",
                           classname="HdfsClient",
                           command="generate_configs",
                           config_file="default.json",
                           hdp_stack_version=self.STACK_VERSION,
示例#20
0
 def test_create_wallet_no_market(self):
     marketcontroller.is_market_running = MagicMock(return_value=False)
     self.assertFalse(walletcontroller.create_wallet('TBTC'))
示例#21
0
    def test_auto_execute_command(self, status_update_callback_mock,
                                  open_mock):
        # Make file read calls visible
        def open_side_effect(file, mode):
            if mode == 'r':
                file_mock = MagicMock()
                file_mock.read.return_value = "Read from " + str(file)
                return file_mock
            else:
                return self.original_open(file, mode)

        open_mock.side_effect = open_side_effect

        config = AmbariConfig()
        tempdir = tempfile.gettempdir()
        config.set('agent', 'prefix', tempdir)
        config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
        config.set('agent', 'tolerate_download_failures', "true")
        dummy_controller = MagicMock()
        dummy_controller.recovery_manager = RecoveryManager(tempfile.mktemp())
        dummy_controller.recovery_manager.update_config(
            5, 5, 1, 11, True, False, "")

        actionQueue = ActionQueue(config, dummy_controller)
        unfreeze_flag = threading.Event()
        python_execution_result_dict = {
            'stdout': 'out',
            'stderr': 'stderr',
            'structuredOut': ''
        }

        def side_effect(command,
                        tmpoutfile,
                        tmperrfile,
                        override_output_files=True,
                        retry=False):
            unfreeze_flag.wait()
            return python_execution_result_dict

        def patched_aq_execute_command(command):
            # We have to perform patching for separate thread in the same thread
            with patch.object(CustomServiceOrchestrator,
                              "runCommand") as runCommand_mock:
                runCommand_mock.side_effect = side_effect
                actionQueue.process_command(command)

        python_execution_result_dict['status'] = 'COMPLETE'
        python_execution_result_dict['exitcode'] = 0
        self.assertFalse(actionQueue.tasks_in_progress_or_pending())
        # We call method in a separate thread
        execution_thread = Thread(target=patched_aq_execute_command,
                                  args=(self.datanode_auto_start_command, ))
        execution_thread.start()
        #  check in progress report
        # wait until ready
        while True:
            time.sleep(0.1)
            if actionQueue.tasks_in_progress_or_pending():
                break
        # Continue command execution
        unfreeze_flag.set()
        # wait until ready
        check_queue = True
        while check_queue:
            report = actionQueue.result()
            if not actionQueue.tasks_in_progress_or_pending():
                break
            time.sleep(0.1)

        self.assertEqual(len(report['reports']), 0)

        ## Test failed execution
        python_execution_result_dict['status'] = 'FAILED'
        python_execution_result_dict['exitcode'] = 13
        # We call method in a separate thread
        execution_thread = Thread(target=patched_aq_execute_command,
                                  args=(self.datanode_auto_start_command, ))
        execution_thread.start()
        unfreeze_flag.set()
        #  check in progress report
        # wait until ready
        while check_queue:
            report = actionQueue.result()
            if not actionQueue.tasks_in_progress_or_pending():
                break
            time.sleep(0.1)

        self.assertEqual(len(report['reports']), 0)
示例#22
0
class TestRepositoryResource(TestCase):
    @patch.object(System, "os_family", new='redhat')
    @patch("resource_management.libraries.providers.repository.File")
    def test_create_repo_redhat(self, file_mock):
        with Environment('/') as env:
            with patch.object(
                    repository,
                    "Template",
                    new=DummyTemplate.create(RHEL_SUSE_DEFAULT_TEMPLATE)):
                Repository(
                    'hadoop',
                    base_url='http://download.base_url.org/rpm/',
                    mirror_list=
                    'https://mirrors.base_url.org/?repo=Repository&arch=$basearch',
                    repo_file_name='Repository',
                    repo_template='dummy.j2')

                self.assertTrue('hadoop' in env.resources['Repository'])
                defined_arguments = env.resources['Repository'][
                    'hadoop'].arguments
                expected_arguments = {
                    'repo_template': 'dummy.j2',
                    'base_url': 'http://download.base_url.org/rpm/',
                    'mirror_list':
                    'https://mirrors.base_url.org/?repo=Repository&arch=$basearch',
                    'repo_file_name': 'Repository'
                }
                expected_template_arguments = {
                    'base_url': 'http://download.base_url.org/rpm/',
                    'mirror_list':
                    'https://mirrors.base_url.org/?repo=Repository&arch=$basearch',
                    'repo_file_name': 'Repository'
                }

                self.assertEqual(defined_arguments, expected_arguments)
                self.assertEqual(file_mock.call_args[0][0],
                                 '/etc/yum.repos.d/Repository.repo')

                template_item = file_mock.call_args[1]['content']
                template = str(template_item.name)
                expected_template_arguments.update({'repo_id': 'hadoop'})

                self.assertEqual(expected_template_arguments,
                                 template_item.context._dict)
                self.assertEqual('dummy.j2', template)

    @patch.object(System, "os_family", new='suse')
    @patch("resource_management.libraries.providers.repository.File")
    def test_create_repo_suse(self, file_mock):
        with Environment('/') as env:
            with patch.object(
                    repository,
                    "Template",
                    new=DummyTemplate.create(RHEL_SUSE_DEFAULT_TEMPLATE)):
                Repository(
                    'hadoop',
                    base_url='http://download.base_url.org/rpm/',
                    mirror_list=
                    'https://mirrors.base_url.org/?repo=Repository&arch=$basearch',
                    repo_template="dummy.j2",
                    repo_file_name='Repository')

                self.assertTrue('hadoop' in env.resources['Repository'])
                defined_arguments = env.resources['Repository'][
                    'hadoop'].arguments
                expected_arguments = {
                    'repo_template': 'dummy.j2',
                    'mirror_list':
                    'https://mirrors.base_url.org/?repo=Repository&arch=$basearch',
                    'base_url': 'http://download.base_url.org/rpm/',
                    'repo_file_name': 'Repository'
                }
                expected_template_arguments = {
                    'mirror_list':
                    'https://mirrors.base_url.org/?repo=Repository&arch=$basearch',
                    'base_url': 'http://download.base_url.org/rpm/',
                    'repo_file_name': 'Repository'
                }

                self.assertEqual(defined_arguments, expected_arguments)
                self.assertEqual(file_mock.call_args[0][0],
                                 '/etc/zypp/repos.d/Repository.repo')

                template_item = file_mock.call_args[1]['content']
                template = str(template_item.name)
                expected_template_arguments.update({'repo_id': 'hadoop'})

                self.assertEqual(expected_template_arguments,
                                 template_item.context._dict)
                self.assertEqual('dummy.j2', template)

    @patch("resource_management.libraries.providers.repository.checked_call")
    @patch.object(tempfile, "NamedTemporaryFile")
    @patch("resource_management.libraries.providers.repository.Execute")
    @patch("resource_management.libraries.providers.repository.File")
    @patch("os.path.isfile", new=MagicMock(return_value=True))
    @patch("filecmp.cmp", new=MagicMock(return_value=False))
    @patch.object(System, "os_release_name", new='precise')
    @patch.object(System, "os_family", new='ubuntu')
    def test_create_repo_ubuntu_repo_exists(self, file_mock, execute_mock,
                                            tempfile_mock, checked_call_mock):
        tempfile_mock.return_value = MagicMock(spec=file)
        tempfile_mock.return_value.__enter__.return_value.name = "/tmp/1.txt"
        checked_call_mock.return_value = 0, "The following signatures couldn't be verified because the public key is not available: NO_PUBKEY 123ABCD"

        with Environment('/') as env:
            with patch.object(
                    repository,
                    "Template",
                    new=DummyTemplate.create(DEBIAN_DEFAUTL_TEMPLATE)):
                Repository('HDP',
                           base_url='http://download.base_url.org/rpm/',
                           repo_file_name='HDP',
                           repo_template="dummy.j2",
                           components=['a', 'b', 'c'])

        call_content = file_mock.call_args_list[0]
        template_name = call_content[0][0]
        template_content = call_content[1]['content']

        self.assertEquals(template_name, '/tmp/1.txt')
        self.assertEquals(template_content,
                          'deb http://download.base_url.org/rpm/ a b c\n')

        copy_item = str(file_mock.call_args_list[1])
        self.assertEqual(
            copy_item,
            "call('/etc/apt/sources.list.d/HDP.list', content=StaticFile('/tmp/1.txt'))"
        )
        #'apt-get update -qq -o Dir::Etc::sourcelist="sources.list.d/HDP.list" -o APT::Get::List-Cleanup="0"')
        execute_command_item = execute_mock.call_args_list[0][0][0]

        self.assertEqual(checked_call_mock.call_args_list[0][0][0], [
            'apt-get', 'update', '-qq', '-o',
            'Dir::Etc::sourcelist=sources.list.d/HDP.list', '-o',
            'APT::Get::List-Cleanup=0'
        ])
        self.assertEqual(
            execute_command_item,
            'apt-key adv --recv-keys --keyserver keyserver.ubuntu.com 123ABCD')

    @patch("resource_management.libraries.providers.repository.checked_call")
    @patch.object(tempfile, "NamedTemporaryFile")
    @patch("resource_management.libraries.providers.repository.Execute")
    @patch("resource_management.libraries.providers.repository.File")
    @patch("os.path.isfile", new=MagicMock(return_value=True))
    @patch("filecmp.cmp", new=MagicMock(return_value=False))
    @patch.object(System, "os_release_name", new='precise')
    @patch.object(System, "os_family", new='ubuntu')
    def test_create_repo_ubuntu_gpg_key_wrong_output(self, file_mock,
                                                     execute_mock,
                                                     tempfile_mock,
                                                     checked_call_mock):
        """
      Checks that GPG key is extracted from output without \r sign
      """
        tempfile_mock.return_value = MagicMock(spec=file)
        tempfile_mock.return_value.__enter__.return_value.name = "/tmp/1.txt"
        checked_call_mock.return_value = 0, "The following signatures couldn't be verified because the public key is not available: NO_PUBKEY 123ABCD\r\n"

        with Environment('/') as env:
            with patch.object(
                    repository,
                    "Template",
                    new=DummyTemplate.create(DEBIAN_DEFAUTL_TEMPLATE)):
                Repository('HDP',
                           base_url='http://download.base_url.org/rpm/',
                           repo_file_name='HDP',
                           repo_template="dummy.j2",
                           components=['a', 'b', 'c'])

        call_content = file_mock.call_args_list[0]
        template_name = call_content[0][0]
        template_content = call_content[1]['content']

        self.assertEquals(template_name, '/tmp/1.txt')
        self.assertEquals(template_content,
                          'deb http://download.base_url.org/rpm/ a b c\n')

        copy_item = str(file_mock.call_args_list[1])
        self.assertEqual(
            copy_item,
            "call('/etc/apt/sources.list.d/HDP.list', content=StaticFile('/tmp/1.txt'))"
        )
        execute_command_item = execute_mock.call_args_list[0][0][0]

        self.assertEqual(checked_call_mock.call_args_list[0][0][0], [
            'apt-get', 'update', '-qq', '-o',
            'Dir::Etc::sourcelist=sources.list.d/HDP.list', '-o',
            'APT::Get::List-Cleanup=0'
        ])
        self.assertEqual(
            execute_command_item,
            'apt-key adv --recv-keys --keyserver keyserver.ubuntu.com 123ABCD')

    @patch.object(tempfile, "NamedTemporaryFile")
    @patch("resource_management.libraries.providers.repository.Execute")
    @patch("resource_management.libraries.providers.repository.File")
    @patch("os.path.isfile", new=MagicMock(return_value=True))
    @patch("filecmp.cmp", new=MagicMock(return_value=True))
    @patch.object(System, "os_release_name", new='precise')
    @patch.object(System, "os_family", new='ubuntu')
    def test_create_repo_ubuntu_doesnt_repo_exist(self, file_mock,
                                                  execute_mock, tempfile_mock):
        tempfile_mock.return_value = MagicMock(spec=file)
        tempfile_mock.return_value.__enter__.return_value.name = "/tmp/1.txt"

        with Environment('/') as env:
            with patch.object(
                    repository,
                    "Template",
                    new=DummyTemplate.create(DEBIAN_DEFAUTL_TEMPLATE)):
                Repository('HDP',
                           base_url='http://download.base_url.org/rpm/',
                           repo_file_name='HDP',
                           repo_template="dummy.j2",
                           components=['a', 'b', 'c'])

        call_content = file_mock.call_args_list[0]
        template_name = call_content[0][0]
        template_content = call_content[1]['content']

        self.assertEquals(template_name, '/tmp/1.txt')
        self.assertEquals(template_content,
                          'deb http://download.base_url.org/rpm/ a b c\n')

        self.assertEqual(file_mock.call_count, 1)
        self.assertEqual(execute_mock.call_count, 0)

    @patch("os.path.isfile", new=MagicMock(return_value=True))
    @patch.object(System, "os_family", new='ubuntu')
    @patch("resource_management.libraries.providers.repository.Execute")
    @patch("resource_management.libraries.providers.repository.File")
    def test_remove_repo_ubuntu_repo_exist(self, file_mock, execute_mock):
        with Environment('/') as env:
            Repository('HDP', action="remove", repo_file_name='HDP')

        self.assertEqual(
            str(file_mock.call_args),
            "call('/etc/apt/sources.list.d/HDP.list', action='delete')")
        self.assertEqual(execute_mock.call_args[0][0], [
            'apt-get', 'update', '-qq', '-o',
            'Dir::Etc::sourcelist=sources.list.d/HDP.list', '-o',
            'APT::Get::List-Cleanup=0'
        ])

    @patch("os.path.isfile", new=MagicMock(return_value=False))
    @patch.object(System, "os_family", new='ubuntu')
    @patch("resource_management.libraries.providers.repository.Execute")
    @patch("resource_management.libraries.providers.repository.File")
    def test_remove_repo_ubuntu_repo_doenst_exist(self, file_mock,
                                                  execute_mock):
        with Environment('/') as env:
            Repository('HDP', action="remove", repo_file_name='HDP')

        self.assertEqual(file_mock.call_count, 0)
        self.assertEqual(execute_mock.call_count, 0)

    @patch.object(System, "os_family", new='redhat')
    @patch("resource_management.libraries.providers.repository.File")
    def test_remove_repo_redhat(self, file_mock):
        with Environment('/') as env:
            Repository(
                'hadoop',
                action='remove',
                base_url='http://download.base_url.org/rpm/',
                mirror_list=
                'https://mirrors.base_url.org/?repo=Repository&arch=$basearch',
                repo_file_name='Repository')

            self.assertTrue('hadoop' in env.resources['Repository'])
            defined_arguments = env.resources['Repository']['hadoop'].arguments
            expected_arguments = {
                'action': ['remove'],
                'base_url': 'http://download.base_url.org/rpm/',
                'mirror_list':
                'https://mirrors.base_url.org/?repo=Repository&arch=$basearch',
                'repo_file_name': 'Repository'
            }
            self.assertEqual(defined_arguments, expected_arguments)
            self.assertEqual(file_mock.call_args[1]['action'], 'delete')
            self.assertEqual(file_mock.call_args[0][0],
                             '/etc/yum.repos.d/Repository.repo')

    @patch.object(System, "os_family", new='suse')
    @patch("resource_management.libraries.providers.repository.File")
    def test_remove_repo_suse(self, file_mock):
        with Environment('/') as env:
            Repository(
                'hadoop',
                action='remove',
                base_url='http://download.base_url.org/rpm/',
                mirror_list=
                'https://mirrors.base_url.org/?repo=Repository&arch=$basearch',
                repo_file_name='Repository')

            self.assertTrue('hadoop' in env.resources['Repository'])
            defined_arguments = env.resources['Repository']['hadoop'].arguments
            expected_arguments = {
                'action': ['remove'],
                'base_url': 'http://download.base_url.org/rpm/',
                'mirror_list':
                'https://mirrors.base_url.org/?repo=Repository&arch=$basearch',
                'repo_file_name': 'Repository'
            }
            self.assertEqual(defined_arguments, expected_arguments)
            self.assertEqual(file_mock.call_args[1]['action'], 'delete')
            self.assertEqual(file_mock.call_args[0][0],
                             '/etc/zypp/repos.d/Repository.repo')
示例#23
0
    def test_execute_command(self, status_update_callback_mock, open_mock):
        # Make file read calls visible
        def open_side_effect(file, mode):
            if mode == 'r':
                file_mock = MagicMock()
                file_mock.read.return_value = "Read from " + str(file)
                return file_mock
            else:
                return self.original_open(file, mode)

        open_mock.side_effect = open_side_effect

        config = AmbariConfig()
        tempdir = tempfile.gettempdir()
        config.set('agent', 'prefix', tempdir)
        config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
        config.set('agent', 'tolerate_download_failures', "true")
        dummy_controller = MagicMock()
        actionQueue = ActionQueue(config, dummy_controller)
        unfreeze_flag = threading.Event()
        python_execution_result_dict = {
            'stdout': 'out',
            'stderr': 'stderr',
            'structuredOut': ''
        }

        def side_effect(command,
                        tmpoutfile,
                        tmperrfile,
                        override_output_files=True,
                        retry=False):
            unfreeze_flag.wait()
            return python_execution_result_dict

        def patched_aq_execute_command(command):
            # We have to perform patching for separate thread in the same thread
            with patch.object(CustomServiceOrchestrator,
                              "runCommand") as runCommand_mock:
                runCommand_mock.side_effect = side_effect
                actionQueue.execute_command(command)

        ### Test install/start/stop command ###
        ## Test successful execution with configuration tags
        python_execution_result_dict['status'] = 'COMPLETE'
        python_execution_result_dict['exitcode'] = 0
        # We call method in a separate thread
        execution_thread = Thread(target=patched_aq_execute_command,
                                  args=(self.datanode_install_command, ))
        execution_thread.start()
        #  check in progress report
        # wait until ready
        while True:
            time.sleep(0.1)
            report = actionQueue.result()
            if len(report['reports']) != 0:
                break
        expected = {
            'status':
            'IN_PROGRESS',
            'stderr':
            'Read from {0}'.format(os.path.join(tempdir, "errors-3.txt")),
            'stdout':
            'Read from {0}'.format(os.path.join(tempdir, "output-3.txt")),
            'structuredOut':
            'Read from {0}'.format(
                os.path.join(tempdir, "structured-out-3.json")),
            'clusterName':
            u'cc',
            'roleCommand':
            u'INSTALL',
            'serviceName':
            u'HDFS',
            'role':
            u'DATANODE',
            'actionId':
            '1-1',
            'taskId':
            3,
            'exitCode':
            777
        }
        self.assertEqual(report['reports'][0], expected)
        self.assertTrue(actionQueue.tasks_in_progress_or_pending())

        # Continue command execution
        unfreeze_flag.set()
        # wait until ready
        while report['reports'][0]['status'] == 'IN_PROGRESS':
            time.sleep(0.1)
            report = actionQueue.result()
        # check report
        configname = os.path.join(tempdir, 'config.json')
        expected = {
            'status': 'COMPLETED',
            'stderr': 'stderr',
            'stdout': 'out',
            'clusterName': u'cc',
            'structuredOut': '""',
            'roleCommand': u'INSTALL',
            'serviceName': u'HDFS',
            'role': u'DATANODE',
            'actionId': '1-1',
            'taskId': 3,
            'configurationTags': {
                'global': {
                    'tag': 'v1'
                }
            },
            'exitCode': 0
        }
        self.assertEqual(len(report['reports']), 1)
        self.assertEqual(report['reports'][0], expected)
        self.assertTrue(os.path.isfile(configname))
        # Check that we had 2 status update calls ( IN_PROGRESS and COMPLETE)
        self.assertEqual(status_update_callback_mock.call_count, 2)
        os.remove(configname)

        # now should not have reports (read complete/failed reports are deleted)
        report = actionQueue.result()
        self.assertEqual(len(report['reports']), 0)

        ## Test failed execution
        python_execution_result_dict['status'] = 'FAILED'
        python_execution_result_dict['exitcode'] = 13
        # We call method in a separate thread
        execution_thread = Thread(target=patched_aq_execute_command,
                                  args=(self.datanode_install_command, ))
        execution_thread.start()
        unfreeze_flag.set()
        #  check in progress report
        # wait until ready
        report = actionQueue.result()
        while len(report['reports']) == 0 or \
                        report['reports'][0]['status'] == 'IN_PROGRESS':
            time.sleep(0.1)
            report = actionQueue.result()
            # check report
        expected = {
            'status': 'FAILED',
            'stderr': 'stderr',
            'stdout': 'out',
            'clusterName': u'cc',
            'structuredOut': '""',
            'roleCommand': u'INSTALL',
            'serviceName': u'HDFS',
            'role': u'DATANODE',
            'actionId': '1-1',
            'taskId': 3,
            'exitCode': 13
        }
        self.assertEqual(len(report['reports']), 1)
        self.assertEqual(report['reports'][0], expected)

        # now should not have reports (read complete/failed reports are deleted)
        report = actionQueue.result()
        self.assertEqual(len(report['reports']), 0)

        ### Test upgrade command ###
        python_execution_result_dict['status'] = 'COMPLETE'
        python_execution_result_dict['exitcode'] = 0
        execution_thread = Thread(target=patched_aq_execute_command,
                                  args=(self.datanode_upgrade_command, ))
        execution_thread.start()
        unfreeze_flag.set()
        # wait until ready
        report = actionQueue.result()
        while len(report['reports']) == 0 or \
                        report['reports'][0]['status'] == 'IN_PROGRESS':
            time.sleep(0.1)
            report = actionQueue.result()
        # check report
        expected = {
            'status': 'COMPLETED',
            'stderr': 'stderr',
            'stdout': 'out',
            'clusterName': 'clusterName',
            'structuredOut': '""',
            'roleCommand': 'UPGRADE',
            'serviceName': 'serviceName',
            'role': 'role',
            'actionId': 17,
            'taskId': 'taskId',
            'exitCode': 0
        }
        self.assertEqual(len(report['reports']), 1)
        self.assertEqual(report['reports'][0], expected)

        # now should not have reports (read complete/failed reports are deleted)
        report = actionQueue.result()
        self.assertEqual(len(report['reports']), 0)
示例#24
0
  def test_build_long_result(self, result_mock):
    config = AmbariConfig.AmbariConfig()
    config.set('agent', 'prefix', 'tmp')
    config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
    config.set('agent', 'tolerate_download_failures', "true")
    dummy_controller = MagicMock()
    dummy_controller.recovery_manager = RecoveryManager(tempfile.mktemp())
    actionQueue = ActionQueue(config, dummy_controller)
    result_mock.return_value = {
      'reports': [{'status': 'IN_PROGRESS',
            'stderr': 'Read from /tmp/errors-3.txt',
            'stdout': 'Read from /tmp/output-3.txt',
            'clusterName': u'cc',
            'roleCommand': u'INSTALL',
            'serviceName': u'HDFS',
            'role': u'DATANODE',
            'actionId': '1-1',
            'taskId': 3,
            'exitCode': 777},

            {'status': 'COMPLETED',
             'stderr': 'stderr',
             'stdout': 'out',
             'clusterName': 'clusterName',
             'roleCommand': 'UPGRADE',
             'serviceName': 'serviceName',
             'role': 'role',
             'actionId': 17,
             'taskId': 'taskId',
             'exitCode': 0},

            {'status': 'FAILED',
             'stderr': 'stderr',
             'stdout': 'out',
             'clusterName': u'cc',
             'roleCommand': u'INSTALL',
             'serviceName': u'HDFS',
             'role': u'DATANODE',
             'actionId': '1-1',
             'taskId': 3,
             'exitCode': 13},

            {'status': 'COMPLETED',
             'stderr': 'stderr',
             'stdout': 'out',
             'clusterName': u'cc',
             'configurationTags': {'global': {'tag': 'v1'}},
             'roleCommand': u'INSTALL',
             'serviceName': u'HDFS',
             'role': u'DATANODE',
             'actionId': '1-1',
             'taskId': 3,
             'exitCode': 0}

            ],
      'componentStatus': [
        {'status': 'HEALTHY', 'componentName': 'DATANODE'},
        {'status': 'UNHEALTHY', 'componentName': 'NAMENODE'},
      ],
    }
    heartbeat = Heartbeat(actionQueue)
    hb = heartbeat.build(10)
    hb['hostname'] = 'hostname'
    hb['timestamp'] = 'timestamp'
    expected = {'nodeStatus':
                  {'status': 'HEALTHY',
                   'cause': 'NONE'},
                'recoveryReport': {'summary': 'DISABLED'},
                'recoveryTimestamp': -1,
                'timestamp': 'timestamp', 'hostname': 'hostname',
                'responseId': 10, 'reports': [
      {'status': 'IN_PROGRESS', 'roleCommand': u'INSTALL',
       'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1',
       'stderr': 'Read from /tmp/errors-3.txt',
       'stdout': 'Read from /tmp/output-3.txt', 'clusterName': u'cc',
       'taskId': 3, 'exitCode': 777},
      {'status': 'COMPLETED', 'roleCommand': 'UPGRADE',
       'serviceName': 'serviceName', 'role': 'role', 'actionId': 17,
       'stderr': 'stderr', 'stdout': 'out', 'clusterName': 'clusterName',
       'taskId': 'taskId', 'exitCode': 0},
      {'status': 'FAILED', 'roleCommand': u'INSTALL', 'serviceName': u'HDFS',
       'role': u'DATANODE', 'actionId': '1-1', 'stderr': 'stderr',
       'stdout': 'out', 'clusterName': u'cc', 'taskId': 3, 'exitCode': 13},
      {'status': 'COMPLETED', 'stdout': 'out',
       'configurationTags': {'global': {'tag': 'v1'}}, 'taskId': 3,
       'exitCode': 0, 'roleCommand': u'INSTALL', 'clusterName': u'cc',
       'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1',
       'stderr': 'stderr'}], 'componentStatus': [
      {'status': 'HEALTHY', 'componentName': 'DATANODE'},
      {'status': 'UNHEALTHY', 'componentName': 'NAMENODE'}]}
    self.assertEqual.__self__.maxDiff = None
    self.assertEquals(hb, expected)
from __future__ import (absolute_import, division, print_function)

__metaclass__ = type
ANSIBLE_METADATA = {
    'metadata_version': '1.1',
    'status': ['preview'],
    'supported_by': 'community'
}

import pytest
from mock.mock import MagicMock
from ansible_collections.dellemc.powerscale.plugins.module_utils.storage.dell \
    import dellemc_ansible_powerscale_utils as utils

utils.get_logger = MagicMock()
utils.isi_sdk = MagicMock()
from ansible.module_utils import basic
basic.AnsibleModule = MagicMock()

from ansible_collections.dellemc.powerscale.plugins.modules.dellemc_powerscale_accesszone import PowerScaleAccessZone
from ansible_collections.dellemc.powerscale.tests.unit.plugins.\
    module_utils import mock_accesszone_api as MockAccessZoneApi
from ansible_collections.dellemc.powerscale.tests.unit.plugins.module_utils.mock_sdk_response \
    import MockSDKResponse
from ansible_collections.dellemc.powerscale.tests.unit.plugins.module_utils.mock_api_exception \
    import MockApiException


class TestPowerScaleAccessZone():
    get_access_zone_args = {
示例#26
0
class TestHeartbeat(TestCase):

  def setUp(self):
    # disable stdout
    out = StringIO.StringIO()
    sys.stdout = out


  def tearDown(self):
    # enable stdout
    sys.stdout = sys.__stdout__


  def test_build(self):
    config = AmbariConfig.AmbariConfig()
    config.set('agent', 'prefix', 'tmp')
    config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
    config.set('agent', 'tolerate_download_failures', "true")
    dummy_controller = MagicMock()
    dummy_controller.recovery_manager.recovery_timestamp = -1
    actionQueue = ActionQueue(config, dummy_controller)
    heartbeat = Heartbeat(actionQueue)
    result = heartbeat.build(100)
    print "Heartbeat: " + str(result)
    self.assertEquals(result['hostname'] != '', True, "hostname should not be empty")
    self.assertEquals(result['responseId'], 100)
    self.assertEquals(result['componentStatus'] is not None, True, "Heartbeat should contain componentStatus")
    self.assertEquals(result['reports'] is not None, True, "Heartbeat should contain reports")
    self.assertEquals(result['timestamp'] >= 1353679373880L, True)
    self.assertEquals(result['recoveryTimestamp'], -1)
    self.assertEquals(len(result['nodeStatus']), 2)
    self.assertEquals(result['nodeStatus']['cause'], "NONE")
    self.assertEquals(result['nodeStatus']['status'], "HEALTHY")
    # result may or may NOT have an agentEnv structure in it
    self.assertEquals((len(result) is 7) or (len(result) is 8), True)
    self.assertEquals(not heartbeat.reports, True, "Heartbeat should not contain task in progress")

  @patch("subprocess.Popen")
  @patch.object(Hardware, "_chk_writable_mount", new = MagicMock(return_value=True))
  @patch.object(ActionQueue, "result")
  @patch.object(HostInfoLinux, "register")
  def test_no_mapping(self, register_mock, result_mock, Popen_mock):
    result_mock.return_value = {
      'reports': [{'status': 'IN_PROGRESS',
                   'stderr': 'Read from /tmp/errors-3.txt',
                   'stdout': 'Read from /tmp/output-3.txt',
                   'clusterName': u'cc',
                   'roleCommand': u'INSTALL',
                   'serviceName': u'HDFS',
                   'role': u'DATANODE',
                   'actionId': '1-1',
                   'taskId': 3,
                   'exitCode': 777}],
      'componentStatus': [{'status': 'HEALTHY', 'componentName': 'NAMENODE'}]
    }
    config = AmbariConfig.AmbariConfig()
    config.set('agent', 'prefix', 'tmp')
    config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
    config.set('agent', 'tolerate_download_failures', "true")
    dummy_controller = MagicMock()
    actionQueue = ActionQueue(config, dummy_controller)
    heartbeat = Heartbeat(actionQueue)
    hb = heartbeat.build(id = 10, add_state=True, componentsMapped=True)
    self.assertEqual(register_mock.call_args_list[0][0][1], True)
    register_mock.reset_mock()

    hb = heartbeat.build(id = 0, add_state=True, componentsMapped=True)
    self.assertEqual(register_mock.call_args_list[0][0][1], False)

  @patch.object(ActionQueue, "result")
  def test_build_long_result(self, result_mock):
    config = AmbariConfig.AmbariConfig()
    config.set('agent', 'prefix', 'tmp')
    config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
    config.set('agent', 'tolerate_download_failures', "true")
    dummy_controller = MagicMock()
    dummy_controller.recovery_manager = RecoveryManager(tempfile.mktemp())
    actionQueue = ActionQueue(config, dummy_controller)
    result_mock.return_value = {
      'reports': [{'status': 'IN_PROGRESS',
            'stderr': 'Read from /tmp/errors-3.txt',
            'stdout': 'Read from /tmp/output-3.txt',
            'clusterName': u'cc',
            'roleCommand': u'INSTALL',
            'serviceName': u'HDFS',
            'role': u'DATANODE',
            'actionId': '1-1',
            'taskId': 3,
            'exitCode': 777},

            {'status': 'COMPLETED',
             'stderr': 'stderr',
             'stdout': 'out',
             'clusterName': 'clusterName',
             'roleCommand': 'UPGRADE',
             'serviceName': 'serviceName',
             'role': 'role',
             'actionId': 17,
             'taskId': 'taskId',
             'exitCode': 0},

            {'status': 'FAILED',
             'stderr': 'stderr',
             'stdout': 'out',
             'clusterName': u'cc',
             'roleCommand': u'INSTALL',
             'serviceName': u'HDFS',
             'role': u'DATANODE',
             'actionId': '1-1',
             'taskId': 3,
             'exitCode': 13},

            {'status': 'COMPLETED',
             'stderr': 'stderr',
             'stdout': 'out',
             'clusterName': u'cc',
             'configurationTags': {'global': {'tag': 'v1'}},
             'roleCommand': u'INSTALL',
             'serviceName': u'HDFS',
             'role': u'DATANODE',
             'actionId': '1-1',
             'taskId': 3,
             'exitCode': 0}

            ],
      'componentStatus': [
        {'status': 'HEALTHY', 'componentName': 'DATANODE'},
        {'status': 'UNHEALTHY', 'componentName': 'NAMENODE'},
      ],
    }
    heartbeat = Heartbeat(actionQueue)
    hb = heartbeat.build(10)
    hb['hostname'] = 'hostname'
    hb['timestamp'] = 'timestamp'
    expected = {'nodeStatus':
                  {'status': 'HEALTHY',
                   'cause': 'NONE'},
                'recoveryReport': {'summary': 'DISABLED'},
                'recoveryTimestamp': -1,
                'timestamp': 'timestamp', 'hostname': 'hostname',
                'responseId': 10, 'reports': [
      {'status': 'IN_PROGRESS', 'roleCommand': u'INSTALL',
       'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1',
       'stderr': 'Read from /tmp/errors-3.txt',
       'stdout': 'Read from /tmp/output-3.txt', 'clusterName': u'cc',
       'taskId': 3, 'exitCode': 777},
      {'status': 'COMPLETED', 'roleCommand': 'UPGRADE',
       'serviceName': 'serviceName', 'role': 'role', 'actionId': 17,
       'stderr': 'stderr', 'stdout': 'out', 'clusterName': 'clusterName',
       'taskId': 'taskId', 'exitCode': 0},
      {'status': 'FAILED', 'roleCommand': u'INSTALL', 'serviceName': u'HDFS',
       'role': u'DATANODE', 'actionId': '1-1', 'stderr': 'stderr',
       'stdout': 'out', 'clusterName': u'cc', 'taskId': 3, 'exitCode': 13},
      {'status': 'COMPLETED', 'stdout': 'out',
       'configurationTags': {'global': {'tag': 'v1'}}, 'taskId': 3,
       'exitCode': 0, 'roleCommand': u'INSTALL', 'clusterName': u'cc',
       'serviceName': u'HDFS', 'role': u'DATANODE', 'actionId': '1-1',
       'stderr': 'stderr'}], 'componentStatus': [
      {'status': 'HEALTHY', 'componentName': 'DATANODE'},
      {'status': 'UNHEALTHY', 'componentName': 'NAMENODE'}]}
    self.assertEqual.__self__.maxDiff = None
    self.assertEquals(hb, expected)

  @patch("subprocess.Popen")
  @patch.object(Hardware, "_chk_writable_mount", new = MagicMock(return_value=True))
  @patch.object(HostInfoLinux, 'register')
  def test_heartbeat_no_host_check_cmd_in_queue(self, register_mock, Popen_mock):
    config = AmbariConfig.AmbariConfig()
    config.set('agent', 'prefix', 'tmp')
    config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
    config.set('agent', 'tolerate_download_failures', "true")

    dummy_controller = MagicMock()
    actionQueue = ActionQueue(config, dummy_controller)
    actionQueue.statusCommandQueue = multiprocessing.Queue()
    statusCommand = {
      "serviceName" : 'HDFS',
      "commandType" : "STATUS_COMMAND",
      "clusterName" : "c1",
      "componentName" : "DATANODE",
      "role" : "DATANODE",
      'configurations':{'global' : {}}
    }
    actionQueue.put_status([statusCommand])

    heartbeat = Heartbeat(actionQueue)
    heartbeat.build(12, 6)
    self.assertTrue(register_mock.called)
    args, kwargs = register_mock.call_args_list[0]
    self.assertFalse(args[2])
    self.assertFalse(args[1])


  @patch("subprocess.Popen")
  @patch.object(Hardware, "_chk_writable_mount", new = MagicMock(return_value=True))
  @patch.object(HostInfoLinux, 'register')
  def test_heartbeat_host_check_no_cmd(self, register_mock, Popen_mock):
    config = AmbariConfig.AmbariConfig()
    config.set('agent', 'prefix', 'tmp')
    config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
    config.set('agent', 'tolerate_download_failures', "true")
    dummy_controller = MagicMock()
    actionQueue = ActionQueue(config, dummy_controller)
    heartbeat = Heartbeat(actionQueue)
    heartbeat.build(12, 6)
    self.assertTrue(register_mock.called)
    args, kwargs = register_mock.call_args_list[0]
    self.assertFalse(args[1])
    self.assertFalse(args[2])
示例#27
0
import json
import os

from stacks.utils.RMFTestCase import *

from mock.mock import MagicMock, patch
from resource_management.libraries import functions
from resource_management.core.logger import Logger
from resource_management.libraries.script.config_dictionary import UnknownConfiguration
from hive_server_interactive import HiveServerInteractiveDefault
from resource_management.libraries.script.script import Script
from resource_management.core import shell


@patch("resource_management.libraries.Script.get_tmp_dir",
       new=MagicMock(return_value=('/var/lib/ambari-agent/tmp')))
@patch.object(functions,
              "get_stack_version",
              new=MagicMock(return_value="2.0.0.0-1234"))
@patch("resource_management.libraries.functions.check_thrift_port_sasl",
       new=MagicMock())
@patch(
    "resource_management.libraries.functions.get_user_call_output.get_user_call_output",
    new=MagicMock(return_value=(0, '123', '')))
class TestHiveServerInteractive(RMFTestCase):
    COMMON_SERVICES_PACKAGE_DIR = "HIVE/0.12.0.2.0/package"
    STACK_VERSION = "2.0.6"
    UPGRADE_STACK_VERSION = "2.2"
    DEFAULT_IMMUTABLE_PATHS = [
        '/apps/hive/warehouse', '/apps/falcon', '/mr-history/done',
        '/app-logs', '/tmp'
示例#28
0
 def setUp(self):
     HostCleanup.logger = MagicMock()
     self.hostcleanup = HostCleanup.HostCleanup()
     # disable stdout
     out = StringIO.StringIO()
     sys.stdout = out
示例#29
0
class TestCheckHost(TestCase):
    current_dir = os.path.dirname(os.path.realpath(__file__))

    @patch.object(OSCheck,
                  "os_distribution",
                  new=MagicMock(return_value=os_distro_value))
    @patch("os.path.isfile")
    @patch.object(Script, 'get_config')
    @patch.object(Script, 'get_tmp_dir')
    @patch("resource_management.libraries.script.Script.put_structured_out")
    def testJavaHomeAvailableCheck(self, structured_out_mock, get_tmp_dir_mock,
                                   mock_config, os_isfile_mock):
        # test, java home exists
        os_isfile_mock.return_value = True
        get_tmp_dir_mock.return_value = "/tmp"
        mock_config.return_value = {
            "commandParams": {
                "check_execute_list": "java_home_check",
                "java_home": "test_java_home"
            }
        }

        checkHost = CheckHost()
        checkHost.actionexecute(None)

        self.assertEquals(structured_out_mock.call_args[0][0], {
            'java_home_check': {
                'message': 'Java home exists!',
                'exit_code': 0
            }
        })
        # test, java home doesn't exist
        os_isfile_mock.reset_mock()
        os_isfile_mock.return_value = False

        checkHost.actionexecute(None)

        self.assertEquals(
            structured_out_mock.call_args[0][0], {
                'java_home_check': {
                    "message": "Java home doesn't exist!",
                    "exit_code": 1
                }
            })

    @patch.object(OSCheck,
                  "os_distribution",
                  new=MagicMock(return_value=os_distro_value))
    @patch.object(Script, 'get_config')
    @patch.object(Script, 'get_tmp_dir')
    @patch("check_host.download_file")
    @patch("resource_management.libraries.script.Script.put_structured_out")
    @patch("check_host.format")
    @patch("os.path.isfile")
    @patch("resource_management.core.shell.call")
    def testDBConnectionCheck(self, shell_call_mock, isfile_mock, format_mock,
                              structured_out_mock, download_file_mock,
                              get_tmp_dir_mock, mock_config):
        # test, download DBConnectionVerification.jar failed
        mock_config.return_value = {
            "commandParams": {
                "check_execute_list": "db_connection_check",
                "java_home": "test_java_home",
                "ambari_server_host": "test_host",
                "jdk_location": "test_jdk_location",
                "db_name": "mysql",
                "db_connection_url": "test_db_connection_url",
                "user_name": "test_user_name",
                "user_passwd": "test_user_passwd",
                "jdk_name": "test_jdk_name"
            },
            "hostLevelParams": {
                "agentCacheDir": "/nonexistent_tmp"
            }
        }
        get_tmp_dir_mock.return_value = "/tmp"
        download_file_mock.side_effect = Exception("test exception")
        isfile_mock.return_value = True
        checkHost = CheckHost()
        checkHost.actionexecute(None)

        self.assertEquals(structured_out_mock.call_args[0][0], {'db_connection_check': {'message': 'Error downloading ' \
                         'DBConnectionVerification.jar from Ambari Server resources. Check network access to Ambari ' \
                         'Server.\ntest exception', 'exit_code': 1}})

        # test, download jdbc driver failed
        mock_config.return_value = {
            "commandParams": {
                "check_execute_list": "db_connection_check",
                "java_home": "test_java_home",
                "ambari_server_host": "test_host",
                "jdk_location": "test_jdk_location",
                "db_name": "oracle",
                "db_connection_url": "test_db_connection_url",
                "user_name": "test_user_name",
                "user_passwd": "test_user_passwd",
                "jdk_name": "test_jdk_name"
            },
            "hostLevelParams": {
                "agentCacheDir": "/nonexistent_tmp"
            }
        }
        format_mock.reset_mock()
        download_file_mock.reset_mock()
        p = MagicMock()
        download_file_mock.side_effect = [p, Exception("test exception")]

        checkHost.actionexecute(None)

        self.assertEquals(
            format_mock.call_args[0][0],
            'Error: Ambari Server cannot download the database JDBC driver '
            'and is unable to test the database connection. You must run ambari-server setup '
            '--jdbc-db={db_name} --jdbc-driver=/path/to/your/{db_name}/driver.jar on the Ambari '
            'Server host to make the JDBC driver available for download and to enable testing '
            'the database connection.\n')
        self.assertEquals(
            structured_out_mock.call_args[0][0]['db_connection_check']
            ['exit_code'], 1)

        # test, no connection to remote db
        mock_config.return_value = {
            "commandParams": {
                "check_execute_list": "db_connection_check",
                "java_home": "test_java_home",
                "ambari_server_host": "test_host",
                "jdk_location": "test_jdk_location",
                "db_name": "postgres",
                "db_connection_url": "test_db_connection_url",
                "user_name": "test_user_name",
                "user_passwd": "test_user_passwd",
                "jdk_name": "test_jdk_name"
            },
            "hostLevelParams": {
                "agentCacheDir": "/nonexistent_tmp"
            }
        }
        format_mock.reset_mock()
        download_file_mock.reset_mock()
        download_file_mock.side_effect = [p, p]
        shell_call_mock.return_value = (1, "test message")

        checkHost.actionexecute(None)

        self.assertEquals(structured_out_mock.call_args[0][0], {
            'db_connection_check': {
                'message': 'test message',
                'exit_code': 1
            }
        })
        self.assertEquals(
            format_mock.call_args[0][0],
            '{java_exec} -cp {check_db_connection_path}{class_path_delimiter}'
            '{jdbc_jar_path} -Djava.library.path={java_library_path} org.apache.ambari.server.DBConnectionVerification'
            ' "{db_connection_url}" {user_name} {user_passwd!p} {jdbc_driver_class}'
        )

        # test, db connection success
        download_file_mock.reset_mock()
        download_file_mock.side_effect = [p, p]
        shell_call_mock.return_value = (0, "test message")

        checkHost.actionexecute(None)

        self.assertEquals(
            structured_out_mock.call_args[0][0], {
                'db_connection_check': {
                    'message': 'DB connection check completed successfully!',
                    'exit_code': 0
                }
            })

        #test jdk_name and java home are not available
        mock_config.return_value = {
            "commandParams": {
                "check_execute_list": "db_connection_check",
                "java_home": "test_java_home",
                "ambari_server_host": "test_host",
                "jdk_location": "test_jdk_location",
                "db_connection_url": "test_db_connection_url",
                "user_name": "test_user_name",
                "user_passwd": "test_user_passwd",
                "db_name": "postgres"
            },
            "hostLevelParams": {
                "agentCacheDir": "/nonexistent_tmp"
            }
        }

        isfile_mock.return_value = False
        checkHost.actionexecute(None)
        self.assertEquals(structured_out_mock.call_args[0][0], {'db_connection_check': {'message': 'Custom java is not ' \
                'available on host. Please install it. Java home should be the same as on server. \n', 'exit_code': 1}})
        pass

    @patch.object(OSCheck,
                  "os_distribution",
                  new=MagicMock(return_value=os_distro_value))
    @patch("socket.gethostbyname")
    @patch.object(Script, 'get_config')
    @patch.object(Script, 'get_tmp_dir')
    @patch("resource_management.libraries.script.Script.put_structured_out")
    def testHostResolution(self, structured_out_mock, get_tmp_dir_mock,
                           mock_config, mock_socket):
        mock_socket.return_value = "192.168.1.1"
        jsonFilePath = os.path.join(
            TestCheckHost.current_dir + "/../../resources/custom_actions",
            "check_host_ip_addresses.json")

        with open(jsonFilePath, "r") as jsonFile:
            jsonPayload = json.load(jsonFile)

        mock_config.return_value = ConfigDictionary(jsonPayload)
        get_tmp_dir_mock.return_value = "/tmp"

        checkHost = CheckHost()
        checkHost.actionexecute(None)

        # ensure the correct function was called
        self.assertTrue(structured_out_mock.called)
        structured_out_mock.assert_called_with({
            'host_resolution_check': {
                'failures': [],
                'message': 'All hosts resolved to an IP address.',
                'failed_count': 0,
                'success_count': 5,
                'exit_code': 0
            }
        })

        # try it now with errors
        mock_socket.side_effect = socket.error
        checkHost.actionexecute(None)

        structured_out_mock.assert_called_with({
            'host_resolution_check': {
                'failures': [{
                    'cause': (),
                    'host': u'c6401.ambari.apache.org',
                    'type': 'FORWARD_LOOKUP'
                }, {
                    'cause': (),
                    'host': u'c6402.ambari.apache.org',
                    'type': 'FORWARD_LOOKUP'
                }, {
                    'cause': (),
                    'host': u'c6403.ambari.apache.org',
                    'type': 'FORWARD_LOOKUP'
                }, {
                    'cause': (),
                    'host': u'foobar',
                    'type': 'FORWARD_LOOKUP'
                }, {
                    'cause': (),
                    'host': u'!!!',
                    'type': 'FORWARD_LOOKUP'
                }],
                'message':
                'There were 5 host(s) that could not resolve to an IP address.',
                'failed_count':
                5,
                'success_count':
                0,
                'exit_code':
                0
            }
        })
        pass

    @patch.object(OSCheck,
                  "os_distribution",
                  new=MagicMock(return_value=os_distro_value))
    @patch.object(Script, 'get_config')
    @patch.object(Script, 'get_tmp_dir')
    @patch("resource_management.libraries.script.Script.put_structured_out")
    def testInvalidCheck(self, structured_out_mock, get_tmp_dir_mock,
                         mock_config):
        jsonFilePath = os.path.join(
            TestCheckHost.current_dir + "/../../resources/custom_actions",
            "invalid_check.json")

        with open(jsonFilePath, "r") as jsonFile:
            jsonPayload = json.load(jsonFile)

        mock_config.return_value = ConfigDictionary(jsonPayload)
        get_tmp_dir_mock.return_value = "tmp"

        checkHost = CheckHost()
        checkHost.actionexecute(None)

        # ensure the correct function was called
        self.assertTrue(structured_out_mock.called)
        structured_out_mock.assert_called_with({})
        pass

    @not_for_platform(PLATFORM_WINDOWS)
    @patch.object(OSCheck,
                  "os_distribution",
                  new=MagicMock(return_value=os_distro_value))
    @patch("platform.system")
    @patch.object(Script, 'get_config')
    @patch.object(Script, 'get_tmp_dir')
    @patch('resource_management.libraries.script.Script.put_structured_out')
    @patch('ambari_agent.HostInfo.HostInfoLinux.javaProcs')
    @patch('ambari_agent.HostInfo.HostInfoLinux.checkLiveServices')
    @patch('ambari_agent.HostInfo.HostInfoLinux.getUMask')
    @patch('ambari_agent.HostInfo.HostInfoLinux.getTransparentHugePage')
    @patch('ambari_agent.HostInfo.HostInfoLinux.checkFirewall')
    @patch('ambari_agent.HostInfo.HostInfoLinux.checkReverseLookup')
    @patch('time.time')
    def testLastAgentEnv(self, time_mock, checkReverseLookup_mock,
                         checkFirewall_mock, getTransparentHugePage_mock,
                         getUMask_mock, checkLiveServices_mock, javaProcs_mock,
                         put_structured_out_mock, get_tmp_dir_mock,
                         get_config_mock, systemmock):
        jsonFilePath = os.path.join(
            TestCheckHost.current_dir + "/../../resources/custom_actions",
            "check_last_agent_env.json")
        with open(jsonFilePath, "r") as jsonFile:
            jsonPayload = json.load(jsonFile)

        get_config_mock.return_value = ConfigDictionary(jsonPayload)
        get_tmp_dir_mock.return_value = "/tmp"

        checkHost = CheckHost()
        checkHost.actionexecute(None)

        # ensure the correct function was called
        self.assertTrue(time_mock.called)
        self.assertTrue(checkReverseLookup_mock.called)
        self.assertTrue(checkFirewall_mock.called)
        self.assertTrue(getTransparentHugePage_mock.called)
        self.assertTrue(getUMask_mock.called)
        self.assertTrue(checkLiveServices_mock.called)
        self.assertTrue(javaProcs_mock.called)
        self.assertTrue(put_structured_out_mock.called)
        # ensure the correct keys are in the result map
        last_agent_env_check_result = put_structured_out_mock.call_args[0][0]
        self.assertTrue('last_agent_env_check' in last_agent_env_check_result)
        self.assertTrue('hostHealth' in
                        last_agent_env_check_result['last_agent_env_check'])
        self.assertTrue('firewallRunning' in
                        last_agent_env_check_result['last_agent_env_check'])
        self.assertTrue('firewallName' in
                        last_agent_env_check_result['last_agent_env_check'])
        self.assertTrue('reverseLookup' in
                        last_agent_env_check_result['last_agent_env_check'])
        self.assertTrue('alternatives' in
                        last_agent_env_check_result['last_agent_env_check'])
        self.assertTrue(
            'umask' in last_agent_env_check_result['last_agent_env_check'])
        self.assertTrue('stackFoldersAndFiles' in
                        last_agent_env_check_result['last_agent_env_check'])
        self.assertTrue('existingUsers' in
                        last_agent_env_check_result['last_agent_env_check'])

        # try it now with errors
        javaProcs_mock.side_effect = Exception("test exception")
        checkHost.actionexecute(None)

        #ensure the correct response is returned
        put_structured_out_mock.assert_called_with({
            'last_agent_env_check': {
                'message': 'test exception',
                'exit_code': 1
            }
        })
        pass

    @only_for_platform(PLATFORM_WINDOWS)
    @patch.object(OSCheck,
                  "os_distribution",
                  new=MagicMock(return_value=os_distro_value))
    @patch("platform.system")
    @patch.object(Script, 'get_config')
    @patch.object(Script, 'get_tmp_dir')
    @patch('resource_management.libraries.script.Script.put_structured_out')
    @patch('ambari_agent.HostInfo.HostInfoWindows.javaProcs')
    @patch('ambari_agent.HostInfo.HostInfoWindows.checkLiveServices')
    @patch('ambari_agent.HostInfo.HostInfoWindows.getUMask')
    @patch('ambari_agent.HostInfo.HostInfoWindows.checkFirewall')
    @patch('ambari_agent.HostInfo.HostInfoWindows.checkReverseLookup')
    @patch('time.time')
    def testLastAgentEnv(self, time_mock, checkReverseLookup_mock,
                         checkFirewall_mock, getUMask_mock,
                         checkLiveServices_mock, javaProcs_mock,
                         put_structured_out_mock, get_tmp_dir_mock,
                         get_config_mock, systemmock):
        jsonFilePath = os.path.join(TestCheckHost.current_dir, "..", "..",
                                    "resources", "custom_actions",
                                    "check_last_agent_env.json")
        with open(jsonFilePath, "r") as jsonFile:
            jsonPayload = json.load(jsonFile)

        get_config_mock.return_value = ConfigDictionary(jsonPayload)
        get_tmp_dir_mock.return_value = "/tmp"

        checkHost = CheckHost()
        checkHost.actionexecute(None)

        # ensure the correct function was called
        self.assertTrue(time_mock.called)
        self.assertTrue(checkReverseLookup_mock.called)
        self.assertTrue(checkFirewall_mock.called)
        self.assertTrue(getUMask_mock.called)
        self.assertTrue(checkLiveServices_mock.called)
        self.assertTrue(javaProcs_mock.called)
        self.assertTrue(put_structured_out_mock.called)
        # ensure the correct keys are in the result map
        last_agent_env_check_result = put_structured_out_mock.call_args[0][0]
        self.assertTrue('last_agent_env_check' in last_agent_env_check_result)
        self.assertTrue('hostHealth' in
                        last_agent_env_check_result['last_agent_env_check'])
        self.assertTrue('firewallRunning' in
                        last_agent_env_check_result['last_agent_env_check'])
        self.assertTrue('firewallName' in
                        last_agent_env_check_result['last_agent_env_check'])
        self.assertTrue('alternatives' in
                        last_agent_env_check_result['last_agent_env_check'])
        self.assertTrue(
            'umask' in last_agent_env_check_result['last_agent_env_check'])
        self.assertTrue('stackFoldersAndFiles' in
                        last_agent_env_check_result['last_agent_env_check'])
        self.assertTrue('existingUsers' in
                        last_agent_env_check_result['last_agent_env_check'])

        # try it now with errors
        javaProcs_mock.side_effect = Exception("test exception")
        checkHost.actionexecute(None)

        #ensure the correct response is returned
        put_structured_out_mock.assert_called_with({
            'last_agent_env_check': {
                'message': 'test exception',
                'exit_code': 1
            }
        })
        pass

    @patch.object(HostCheckReportFileHandler, "resolve_ambari_config")
    @patch("resource_management.libraries.script.Script.put_structured_out")
    @patch.object(Script, 'get_tmp_dir')
    @patch.object(Script, 'get_config')
    @patch("os.path.isfile")
    @patch('__builtin__.open')
    def testTransparentHugePage(self, open_mock, os_path_isfile_mock,
                                mock_config, get_tmp_dir_mock,
                                structured_out_mock, resolve_config_mock):
        context_manager_mock = MagicMock()
        open_mock.return_value = context_manager_mock
        file_mock = MagicMock()
        file_mock.read.return_value = "[never] always"
        enter_mock = MagicMock()
        enter_mock.return_value = file_mock
        enter_mock = MagicMock()
        enter_mock.return_value = file_mock
        exit_mock = MagicMock()
        setattr(context_manager_mock, '__enter__', enter_mock)
        setattr(context_manager_mock, '__exit__', exit_mock)
        os_path_isfile_mock.return_value = True
        get_tmp_dir_mock.return_value = "/tmp"
        mock_config.return_value = {
            "commandParams": {
                "check_execute_list": "transparentHugePage"
            }
        }

        checkHost = CheckHost()
        checkHost.actionexecute(None)

        self.assertEquals(
            structured_out_mock.call_args[0][0],
            {'transparentHugePage': {
                'message': 'never',
                'exit_code': 0
            }})

        # case 2, file not exists
        os_path_isfile_mock.return_value = False
        checkHost.actionexecute(None)

        self.assertEquals(
            structured_out_mock.call_args[0][0],
            {'transparentHugePage': {
                'message': '',
                'exit_code': 0
            }})
    def test_runCommand_with_config(self, run_file_mock,
                                    resolve_script_path_mock, unlink_mock,
                                    isfile_mock, hostname_mock):
        hostname_mock.return_value = "test.hst"
        isfile_mock.return_value = True
        command = {
            'role': 'REGION_SERVER',
            'hostLevelParams': {
                'stack_name': 'HDP',
                'stack_version': '2.0.7',
                'jdk_location': 'some_location'
            },
            'commandParams': {
                'script_type': 'PYTHON',
                'script': 'scripts/hbase_regionserver.py',
                'command_timeout': '600',
                'service_package_folder': 'HBASE'
            },
            'configurations': {
                "hbase-site": {
                    "hbase.log": "${AGENT_LOG_ROOT}",
                    "hbase.number": "10485760"
                },
                "hbase-log4j": {
                    "a": "b"
                }
            },
            'taskId': '3',
            'roleCommand': 'INSTALL',
            'commandType': 'EXECUTION_COMMAND',
            'commandId': '1-1'
        }

        command_get = {
            'roleCommand': 'GET_CONFIG',
            'commandType': 'STATUS_COMMAND'
        }

        tempdir = tempfile.gettempdir()
        config = MagicMock()
        config.get.return_value = "something"
        config.getResolvedPath.return_value = tempdir
        config.getWorkRootPath.return_value = tempdir
        config.getLogPath.return_value = tempdir

        resolve_script_path_mock.return_value = "/basedir/scriptpath"
        dummy_controller = MagicMock()
        orchestrator = CustomServiceOrchestrator(config, dummy_controller)
        # normal run case
        run_file_mock.return_value = {
            'stdout': 'sss',
            'stderr': 'eee',
            'exitcode': 0,
        }

        expected = {
            'hbase-site': {
                'hbase.log': tempdir,
                'hbase.number': '10485760'
            },
            'hbase-log4j': {
                'a': 'b'
            }
        }

        ret = orchestrator.runCommand(command, "out.txt", "err.txt", True,
                                      True)
        self.assertEqual(ret['exitcode'], 0)
        self.assertTrue(run_file_mock.called)
        self.assertEqual(orchestrator.applied_configs, expected)

        ret = orchestrator.requestComponentStatus(command_get)
        self.assertEqual(ret['configurations'], expected)
        pass