Пример #1
0
  def test_execution_results(self):
    subproc_mock = self.Subprocess_mockup()
    executor = PythonExecutor("/tmp", AmbariConfig().getConfig())
    _, tmpoutfile = tempfile.mkstemp()
    _, tmperrfile = tempfile.mkstemp()
    _, tmpstroutfile = tempfile.mkstemp()
    PYTHON_TIMEOUT_SECONDS =  5

    def launch_python_subprocess_method(command, tmpout, tmperr):
      subproc_mock.tmpout = tmpout
      subproc_mock.tmperr = tmperr
      return subproc_mock
    executor.launch_python_subprocess = launch_python_subprocess_method
    runShellKillPgrp_method = MagicMock()
    runShellKillPgrp_method.side_effect = lambda python : python.terminate()
    executor.runShellKillPgrp = runShellKillPgrp_method
    subproc_mock.returncode = 0
    subproc_mock.should_finish_event.set()
    callback_method = MagicMock()
    result = executor.run_file("file", ["arg1", "arg2"], "/fake_tmp_dir",
                               tmpoutfile, tmperrfile, PYTHON_TIMEOUT_SECONDS,
                               tmpstroutfile, "INFO", callback_method, "1-1")
    self.assertEquals(result, {'exitcode': 0, 'stderr': 'Dummy err', 'stdout': 'Dummy output',
                               'structuredOut': {}})
    self.assertTrue(callback_method.called)
Пример #2
0
  def test_watchdog_1(self, kill_process_with_children_mock):
    """
    Tests whether watchdog works
    """
    subproc_mock = self.Subprocess_mockup()
    executor = PythonExecutor("/tmp", AmbariConfig().getConfig())
    _, tmpoutfile = tempfile.mkstemp()
    _, tmperrfile = tempfile.mkstemp()
    PYTHON_TIMEOUT_SECONDS = 0.1
    kill_process_with_children_mock.side_effect = lambda pid : subproc_mock.terminate()

    def launch_python_subprocess_method(command, tmpout, tmperr):
      subproc_mock.tmpout = tmpout
      subproc_mock.tmperr = tmperr
      return subproc_mock
    executor.launch_python_subprocess = launch_python_subprocess_method
    runShellKillPgrp_method = MagicMock()
    runShellKillPgrp_method.side_effect = lambda python : python.terminate()
    executor.runShellKillPgrp = runShellKillPgrp_method
    subproc_mock.returncode = None
    thread = Thread(target =  executor.run_file, args = ("fake_puppetFile", ["arg1", "arg2"],
                                                    tmpoutfile, tmperrfile, PYTHON_TIMEOUT_SECONDS))
    thread.start()
    time.sleep(0.1)
    subproc_mock.finished_event.wait()
    self.assertEquals(subproc_mock.was_terminated, True, "Subprocess should be terminated due to timeout")
Пример #3
0
    def __init__(self, config, controller, agentToggleLogger):
        self.config = config
        self.controller = controller
        self.tmp_dir = config.getResolvedPath(AgentConfig.APP_TASK_DIR)
        self.python_executor = PythonExecutor(self.tmp_dir, config,
                                              agentToggleLogger)
        self.status_commands_stdout = os.path.realpath(
            posixpath.join(self.tmp_dir, 'status_command_stdout.txt'))
        self.status_commands_stderr = os.path.realpath(
            posixpath.join(self.tmp_dir, 'status_command_stderr.txt'))
        self.public_fqdn = hostname.public_hostname()
        self.stored_command = {}
        self.allocated_ports = {}
        self.log_folders = {}

        self.allocated_ports_set = set()
        # Clean up old status command files if any
        try:
            os.unlink(self.status_commands_stdout)
            os.unlink(self.status_commands_stderr)
        except OSError:
            pass  # Ignore fail
        self.base_dir = os.path.realpath(
            posixpath.join(config.getResolvedPath(AgentConfig.APP_PACKAGE_DIR),
                           "package"))
Пример #4
0
  def test_watchdog_2(self):
    """
    Tries to catch false positive watchdog invocations
    """
    subproc_mock = self.Subprocess_mockup()
    executor = PythonExecutor("/tmp", AmbariConfig().getConfig())
    _, tmpoutfile = tempfile.mkstemp()
    _, tmperrfile = tempfile.mkstemp()
    PYTHON_TIMEOUT_SECONDS =  5

    def launch_python_subprocess_method(command, tmpout, tmperr):
      subproc_mock.tmpout = tmpout
      subproc_mock.tmperr = tmperr
      return subproc_mock
    executor.launch_python_subprocess = launch_python_subprocess_method
    runShellKillPgrp_method = MagicMock()
    runShellKillPgrp_method.side_effect = lambda python : python.terminate()
    executor.runShellKillPgrp = runShellKillPgrp_method
    subproc_mock.returncode = 0
    thread = Thread(target =  executor.run_file, args = ("fake_puppetFile", ["arg1", "arg2"],
                                                      tmpoutfile, tmperrfile, PYTHON_TIMEOUT_SECONDS))
    thread.start()
    time.sleep(0.1)
    subproc_mock.should_finish_event.set()
    subproc_mock.finished_event.wait()
    self.assertEquals(subproc_mock.was_terminated, False, "Subprocess should not be terminated before timeout")
    self.assertEquals(subproc_mock.returncode, 0, "Subprocess should not be terminated before timeout")
  def test_watchdog_2(self):
    """
    Tries to catch false positive watchdog invocations
    """
    subproc_mock = self.Subprocess_mockup()
    executor = PythonExecutor("/tmp", AmbariConfig().getConfig())
    _, tmpoutfile = tempfile.mkstemp()
    _, tmperrfile = tempfile.mkstemp()
    _, tmpstrucout = tempfile.mkstemp()
    PYTHON_TIMEOUT_SECONDS =  5

    def launch_python_subprocess_method(command, tmpout, tmperr):
      subproc_mock.tmpout = tmpout
      subproc_mock.tmperr = tmperr
      return subproc_mock
    executor.launch_python_subprocess = launch_python_subprocess_method
    runShellKillPgrp_method = MagicMock()
    runShellKillPgrp_method.side_effect = lambda python : python.terminate()
    executor.runShellKillPgrp = runShellKillPgrp_method
    subproc_mock.returncode = 0
    callback_method = MagicMock()
    thread = Thread(target =  executor.run_file, args = ("fake_puppetFile", ["arg1", "arg2"],
                                                      tmpoutfile, tmperrfile,
                                                      PYTHON_TIMEOUT_SECONDS, tmpstrucout,
                                                      callback_method, "1-1"))
    thread.start()
    time.sleep(0.1)
    subproc_mock.should_finish_event.set()
    subproc_mock.finished_event.wait()
    self.assertEquals(subproc_mock.was_terminated, False, "Subprocess should not be terminated before timeout")
    self.assertEquals(subproc_mock.returncode, 0, "Subprocess should not be terminated before timeout")
    self.assertTrue(callback_method.called)
Пример #6
0
 def test_python_command(self):
     executor = PythonExecutor("/tmp", AmbariConfig().getConfig())
     command = executor.python_command("script", ["script_param1"])
     self.assertEqual(3, len(command))
     self.assertTrue("python" in command[0])
     self.assertEquals("script", command[1])
     self.assertEquals("script_param1", command[2])
  def test_watchdog_1(self, kill_process_with_children_mock):
    """
    Tests whether watchdog works
    """
    subproc_mock = self.Subprocess_mockup()
    executor = PythonExecutor("/tmp", AmbariConfig().getConfig())
    _, tmpoutfile = tempfile.mkstemp()
    _, tmperrfile = tempfile.mkstemp()
    _, tmpstrucout = tempfile.mkstemp()
    PYTHON_TIMEOUT_SECONDS = 0.1
    kill_process_with_children_mock.side_effect = lambda pid : subproc_mock.terminate()

    def launch_python_subprocess_method(command, tmpout, tmperr):
      subproc_mock.tmpout = tmpout
      subproc_mock.tmperr = tmperr
      return subproc_mock
    executor.launch_python_subprocess = launch_python_subprocess_method
    runShellKillPgrp_method = MagicMock()
    runShellKillPgrp_method.side_effect = lambda python : python.terminate()
    executor.runShellKillPgrp = runShellKillPgrp_method
    subproc_mock.returncode = None
    callback_method = MagicMock()
    thread = Thread(target =  executor.run_file, args = ("fake_puppetFile",
      ["arg1", "arg2"], tmpoutfile, tmperrfile,
      PYTHON_TIMEOUT_SECONDS, tmpstrucout, callback_method, '1'))
    thread.start()
    time.sleep(0.1)
    subproc_mock.finished_event.wait()
    self.assertEquals(subproc_mock.was_terminated, True, "Subprocess should be terminated due to timeout")
    self.assertTrue(callback_method.called)
  def test_runCommand_background_action(self, get_custom_actions_base_dir_mock,
                                    FileCache_mock,
                                    dump_command_to_json_mock,
                                    get_py_executor_mock):
    FileCache_mock.return_value = None
    get_custom_actions_base_dir_mock.return_value = "some path"
    _, script = tempfile.mkstemp()
    command = {
      'role' : 'any',
      'commandParams': {
        'script_type': 'PYTHON',
        'script': 'some_custom_action.py',
        'command_timeout': '600',
        'jdk_location' : 'some_location'
      },
      'taskId' : '13',
      'roleCommand': 'ACTIONEXECUTE',
      'commandType': 'BACKGROUND_EXECUTION_COMMAND',
      '__handle': BackgroundCommandExecutionHandle({'taskId': '13'}, 13,
                                                   MagicMock(), MagicMock())
    }
    dummy_controller = MagicMock()
    orchestrator = CustomServiceOrchestrator(self.config, dummy_controller)

    import TestActionQueue
    pyex = PythonExecutor(orchestrator.tmp_dir, orchestrator.config)
    TestActionQueue.patch_output_file(pyex)
    pyex.condenseOutput = MagicMock()
    get_py_executor_mock.return_value = pyex
    orchestrator.dump_command_to_json = MagicMock()

    ret = orchestrator.runCommand(command, "out.txt", "err.txt")
    self.assertEqual(ret['exitcode'], 777)
 def test_python_command(self):
   executor = PythonExecutor("/tmp", AmbariConfig().getConfig())
   command = executor.python_command("script", ["script_param1"])
   self.assertEqual(3, len(command))
   self.assertTrue("python" in command[0].lower())
   self.assertEquals("script", command[1])
   self.assertEquals("script_param1", command[2])
Пример #10
0
    def test_execution_results(self):
        subproc_mock = self.Subprocess_mockup()
        executor = PythonExecutor("/tmp", AmbariConfig().getConfig())
        _, tmpoutfile = tempfile.mkstemp()
        _, tmperrfile = tempfile.mkstemp()
        _, tmpstroutfile = tempfile.mkstemp()
        PYTHON_TIMEOUT_SECONDS = 5

        def launch_python_subprocess_method(command, tmpout, tmperr):
            subproc_mock.tmpout = tmpout
            subproc_mock.tmperr = tmperr
            return subproc_mock

        executor.launch_python_subprocess = launch_python_subprocess_method
        runShellKillPgrp_method = MagicMock()
        runShellKillPgrp_method.side_effect = lambda python: python.terminate()
        executor.runShellKillPgrp = runShellKillPgrp_method
        subproc_mock.returncode = 0
        subproc_mock.should_finish_event.set()
        result = executor.run_file("file", ["arg1", "arg2"], tmpoutfile,
                                   tmperrfile, PYTHON_TIMEOUT_SECONDS,
                                   tmpstroutfile, "INFO")
        self.assertEquals(
            result, {
                'exitcode': 0,
                'stderr': 'Dummy err',
                'stdout': 'Dummy output',
                'structuredOut': {}
            })
  def test_execution_results(self):
    self.assertEqual.__self__.maxDiff = None
    subproc_mock = self.Subprocess_mockup()
    executor = PythonExecutor("/tmp", AgentConfig("", ""), self.agentToggleLogger)
    _, tmpoutfile = tempfile.mkstemp()
    _, tmperrfile = tempfile.mkstemp()
    _, tmpstroutfile = tempfile.mkstemp()
    if IS_WINDOWS:
      if os.path.exists(tmpstroutfile):
        tmpstroutfile = tmpstroutfile + "_t"
    PYTHON_TIMEOUT_SECONDS =  5

    def launch_python_subprocess_method(command, tmpout, tmperr, environment_vars):
      subproc_mock.tmpout = tmpout
      subproc_mock.tmperr = tmperr
      return subproc_mock
    executor.launch_python_subprocess = launch_python_subprocess_method
    runShellKillPgrp_method = MagicMock()
    runShellKillPgrp_method.side_effect = lambda python : python.terminate()
    executor.runShellKillPgrp = runShellKillPgrp_method
    subproc_mock.returncode = 0
    subproc_mock.should_finish_event.set()
    result = executor.run_file("file", ["arg1", "arg2"], tmpoutfile, tmperrfile, PYTHON_TIMEOUT_SECONDS, tmpstroutfile, "INFO", True, None)
    self.assertEquals(result, {'exitcode': 0, 'stderr': 'Dummy err', 'stdout': 'Dummy output',
                               'structuredOut': {}})
  def test_execution_results(self):
    subproc_mock = self.Subprocess_mockup()
    executor = PythonExecutor("/tmp", AmbariConfig().getConfig())
    _, tmpoutfile = tempfile.mkstemp()
    _, tmperrfile = tempfile.mkstemp()
    
    tmp_file = tempfile.NamedTemporaryFile()    # the structured out file should be preserved across calls to the hooks and script.
    tmpstructuredoutfile = tmp_file.name
    tmp_file.close()

    PYTHON_TIMEOUT_SECONDS =  5

    def launch_python_subprocess_method(command, tmpout, tmperr):
      subproc_mock.tmpout = tmpout
      subproc_mock.tmperr = tmperr
      return subproc_mock
    executor.launch_python_subprocess = launch_python_subprocess_method
    runShellKillPgrp_method = MagicMock()
    runShellKillPgrp_method.side_effect = lambda python : python.terminate()
    executor.runShellKillPgrp = runShellKillPgrp_method
    subproc_mock.returncode = 0
    subproc_mock.should_finish_event.set()
    callback_method = MagicMock()
    result = executor.run_file("file", ["arg1", "arg2"],
                               tmpoutfile, tmperrfile, PYTHON_TIMEOUT_SECONDS,
                               tmpstructuredoutfile, callback_method, "1-1")
    self.assertEquals(result, {'exitcode': 0, 'stderr': '', 'stdout': '',
                               'structuredOut': {}})
    self.assertTrue(callback_method.called)
Пример #13
0
 def test_set_env_values(self, os_env_copy_mock, subprocess_mock, open_mock):
   actual_vars = {"someOther" : "value1"}
   executor = PythonExecutor("/tmp", AgentConfig("", ""))
   environment_vars = [("PYTHONPATH", "a:b")]
   os_env_copy_mock.return_value = actual_vars
   executor.run_file("script.pynot", ["a","b"], "", "", 10, "", "INFO", True, environment_vars)
   self.assertEquals(2, len(os_env_copy_mock.return_value))
  def test_watchdog_2(self):
    # Test hangs on Windows TODO
    if IS_WINDOWS:
      return
    """
    Tries to catch false positive watchdog invocations
    """
    subproc_mock = self.Subprocess_mockup()
    executor = PythonExecutor("/tmp", AgentConfig("", ""), self.agentToggleLogger)
    _, tmpoutfile = tempfile.mkstemp()
    _, tmperrfile = tempfile.mkstemp()
    _, tmpstrucout = tempfile.mkstemp()
    PYTHON_TIMEOUT_SECONDS =  5

    environment_vars = [("PYTHONPATH", "a:b")]
    def launch_python_subprocess_method(command, tmpout, tmperr, environment_vars):
      subproc_mock.tmpout = tmpout
      subproc_mock.tmperr = tmperr
      return subproc_mock
    executor.launch_python_subprocess = launch_python_subprocess_method
    runShellKillPgrp_method = MagicMock()
    runShellKillPgrp_method.side_effect = lambda python : python.terminate()
    executor.runShellKillPgrp = runShellKillPgrp_method
    subproc_mock.returncode = 0
    thread = Thread(target =  executor.run_file, args = ("fake_puppetFile", ["arg1", "arg2"],
                                                      tmpoutfile, tmperrfile,
                                                      PYTHON_TIMEOUT_SECONDS, tmpstrucout, "INFO"))
    thread.start()
    time.sleep(0.1)
    subproc_mock.should_finish_event.set()
    subproc_mock.finished_event.wait()
    self.assertEquals(subproc_mock.was_terminated, False, "Subprocess should not be terminated before timeout")
    self.assertEquals(subproc_mock.returncode, 0, "Subprocess should not be terminated before timeout")
Пример #15
0
    def test_execution_results(self):
        self.assertEqual.__self__.maxDiff = None
        subproc_mock = self.Subprocess_mockup()
        executor = PythonExecutor("/tmp", AgentConfig("", ""),
                                  self.agentToggleLogger)
        _, tmpoutfile = tempfile.mkstemp()
        _, tmperrfile = tempfile.mkstemp()
        _, tmpstroutfile = tempfile.mkstemp()
        if IS_WINDOWS:
            if os.path.exists(tmpstroutfile):
                tmpstroutfile = tmpstroutfile + "_t"
        PYTHON_TIMEOUT_SECONDS = 5

        def launch_python_subprocess_method(command, tmpout, tmperr,
                                            environment_vars):
            subproc_mock.tmpout = tmpout
            subproc_mock.tmperr = tmperr
            return subproc_mock

        executor.launch_python_subprocess = launch_python_subprocess_method
        runShellKillPgrp_method = MagicMock()
        runShellKillPgrp_method.side_effect = lambda python: python.terminate()
        executor.runShellKillPgrp = runShellKillPgrp_method
        subproc_mock.returncode = 0
        subproc_mock.should_finish_event.set()
        result = executor.run_file("file", ["arg1", "arg2"], tmpoutfile,
                                   tmperrfile, PYTHON_TIMEOUT_SECONDS,
                                   tmpstroutfile, "INFO", True, None)
        self.assertEquals(
            result, {
                'exitcode': 0,
                'stderr': 'Dummy err',
                'stdout': 'Dummy output',
                'structuredOut': {}
            })
  def test_watchdog_1(self, kill_process_with_children_mock):
    # Test hangs on Windows TODO
    if IS_WINDOWS:
      return
    
    """
    Tests whether watchdog works
    """
    subproc_mock = self.Subprocess_mockup()
    executor = PythonExecutor("/tmp", AgentConfig("", ""), self.agentToggleLogger)
    _, tmpoutfile = tempfile.mkstemp()
    _, tmperrfile = tempfile.mkstemp()
    _, tmpstrucout = tempfile.mkstemp()
    PYTHON_TIMEOUT_SECONDS = 0.1
    kill_process_with_children_mock.side_effect = lambda pid : subproc_mock.terminate()

    def launch_python_subprocess_method(command, tmpout, tmperr, environment_vars):
      subproc_mock.tmpout = tmpout
      subproc_mock.tmperr = tmperr
      return subproc_mock
    executor.launch_python_subprocess = launch_python_subprocess_method
    runShellKillPgrp_method = MagicMock()
    runShellKillPgrp_method.side_effect = lambda python : python.terminate()
    executor.runShellKillPgrp = runShellKillPgrp_method
    subproc_mock.returncode = None
    thread = Thread(target =  executor.run_file, args = ("fake_puppetFile",
      ["arg1", "arg2"], tmpoutfile, tmperrfile, PYTHON_TIMEOUT_SECONDS, tmpstrucout,"INFO"))
    thread.start()
    time.sleep(0.1)
    subproc_mock.finished_event.wait()
    self.assertEquals(subproc_mock.was_terminated, True, "Subprocess should be terminated due to timeout")
Пример #17
0
  def test_runCommand_background_action(self, get_custom_actions_base_dir_mock,
                                    FileCache_mock,
                                    dump_command_to_json_mock,
                                    get_py_executor_mock):
    FileCache_mock.return_value = None
    get_custom_actions_base_dir_mock.return_value = "some path"
    _, script = tempfile.mkstemp()
    command = {
      'role' : 'any',
      'commandParams': {
        'script_type': 'PYTHON',
        'script': 'some_custom_action.py',
        'command_timeout': '600',
        'jdk_location' : 'some_location'
      },
      'taskId' : '13',
      'roleCommand': 'ACTIONEXECUTE',
      'commandType': 'BACKGROUND_EXECUTION_COMMAND',
      '__handle': BackgroundCommandExecutionHandle({'taskId': '13'}, 13,
                                                   MagicMock(), MagicMock())
    }
    dummy_controller = MagicMock()
    orchestrator = CustomServiceOrchestrator(self.config, dummy_controller)

    import TestActionQueue
    pyex = PythonExecutor(orchestrator.tmp_dir, orchestrator.config)
    TestActionQueue.patch_output_file(pyex)
    pyex.condenseOutput = MagicMock()
    get_py_executor_mock.return_value = pyex
    orchestrator.dump_command_to_json = MagicMock()

    ret = orchestrator.runCommand(command, "out.txt", "err.txt")
    self.assertEqual(ret['exitcode'], 777)
Пример #18
0
  def test_execution_results(self):
    subproc_mock = self.Subprocess_mockup()
    executor = PythonExecutor("/tmp", AmbariConfig().getConfig())
    _, tmpoutfile = tempfile.mkstemp()
    _, tmperrfile = tempfile.mkstemp()
    
    tmp_file = tempfile.NamedTemporaryFile()    # the structured out file should be preserved across calls to the hooks and script.
    tmpstructuredoutfile = tmp_file.name
    tmp_file.close()

    PYTHON_TIMEOUT_SECONDS =  5

    def launch_python_subprocess_method(command, tmpout, tmperr):
      subproc_mock.tmpout = tmpout
      subproc_mock.tmperr = tmperr
      return subproc_mock
    executor.launch_python_subprocess = launch_python_subprocess_method
    runShellKillPgrp_method = MagicMock()
    runShellKillPgrp_method.side_effect = lambda python : python.terminate()
    executor.runShellKillPgrp = runShellKillPgrp_method
    subproc_mock.returncode = 0
    subproc_mock.should_finish_event.set()
    callback_method = MagicMock()
    result = executor.run_file("file", ["arg1", "arg2"], "/fake_tmp_dir",
                               tmpoutfile, tmperrfile, PYTHON_TIMEOUT_SECONDS,
                               tmpstructuredoutfile, "INFO", callback_method, "1-1")
    self.assertEquals(result, {'exitcode': 0, 'stderr': 'Dummy err', 'stdout': 'Dummy output',
                               'structuredOut': {}})
    self.assertTrue(callback_method.called)
Пример #19
0
 def test_python_command(self):
     executor = PythonExecutor("/tmp", AgentConfig("", ""))
     command = executor.python_command("script", ["script_param1"])
     self.assertEqual(4, len(command))
     self.assertTrue("python" in command[0])
     self.assertEquals("-S", command[1])
     self.assertEquals("script", command[2])
     self.assertEquals("script_param1", command[3])
Пример #20
0
 def test_python_command(self):
   executor = PythonExecutor("/tmp", AgentConfig("", ""))
   command = executor.python_command("script", ["script_param1"])
   self.assertEqual(4, len(command))
   self.assertTrue("python" in command[0])
   self.assertEquals("-S", command[1])
   self.assertEquals("script", command[2])
   self.assertEquals("script_param1", command[3])
 def test_python_command(self):
   executor = PythonExecutor("/tmp", AgentConfig("", ""), self.agentToggleLogger)
   command = executor.python_command("script", ["script_param1"])
   self.assertEqual(4, len(command))
   self.assertTrue("python" in command[0].lower(), "Looking for python in %s" % (command[0].lower()))
   self.assertEquals("-S", command[1])
   self.assertEquals("script", command[2])
   self.assertEquals("script_param1", command[3])
 def test_set_env_values(self, os_env_copy_mock, subprocess_mock,
                         open_mock):
     actual_vars = {"someOther": "value1"}
     executor = PythonExecutor("/tmp", AgentConfig("", ""))
     environment_vars = [("PYTHONPATH", "a:b")]
     os_env_copy_mock.return_value = actual_vars
     executor.run_file("script.pynot", ["a", "b"], "", "", 10, "", "INFO",
                       True, environment_vars)
     self.assertEquals(2, len(os_env_copy_mock.return_value))
Пример #23
0
 def test_python_command(self):
     executor = PythonExecutor("/tmp", AgentConfig("", ""),
                               self.agentToggleLogger)
     command = executor.python_command("script", ["script_param1"])
     self.assertEqual(4, len(command))
     self.assertTrue("python" in command[0].lower(),
                     "Looking for python in %s" % (command[0].lower()))
     self.assertEquals("-S", command[1])
     self.assertEquals("script", command[2])
     self.assertEquals("script_param1", command[3])
 def __init__(self, config, controller):
     self.config = config
     self.tmp_dir = config.getResolvedPath(AgentConfig.APP_TASK_DIR)
     self.python_executor = PythonExecutor(self.tmp_dir, config)
     self.status_commands_stdout = os.path.join(
         self.tmp_dir, 'status_command_stdout.txt')
     self.status_commands_stderr = os.path.join(
         self.tmp_dir, 'status_command_stderr.txt')
     self.base_dir = os.path.join(
         config.getResolvedPath(AgentConfig.APP_PACKAGE_DIR), "package")
     # Clean up old status command files if any
     try:
         os.unlink(self.status_commands_stdout)
         os.unlink(self.status_commands_stderr)
     except OSError:
         pass  # Ignore fail
Пример #25
0
 def __init__(self, config, controller):
   self.config = config
   self.tmp_dir = config.get('agent', 'prefix')
   self.file_cache = FileCache(config)
   self.python_executor = PythonExecutor(self.tmp_dir, config)
   self.status_commands_stdout = os.path.join(self.tmp_dir,
                                              'status_command_stdout.txt')
   self.status_commands_stderr = os.path.join(self.tmp_dir,
                                              'status_command_stderr.txt')
   # cache reset will be called on every agent registration
   controller.registration_listeners.append(self.file_cache.reset)
   # Clean up old status command files if any
   try:
     os.unlink(self.status_commands_stdout)
     os.unlink(self.status_commands_stderr)
   except OSError:
     pass # Ignore fail
Пример #26
0
 def get_py_executor(self, forced_command_name):
     """
 Wrapper for unit testing
 :return:
 """
     if forced_command_name in self.REFLECTIVELY_RUN_COMMANDS:
         return PythonReflectiveExecutor(self.tmp_dir, self.config)
     else:
         return PythonExecutor(self.tmp_dir, self.config)
Пример #27
0
    def test_watchdog_2(self):
        # Test hangs on Windows TODO
        if IS_WINDOWS:
            return
        """
    Tries to catch false positive watchdog invocations
    """
        subproc_mock = self.Subprocess_mockup()
        executor = PythonExecutor("/tmp", AgentConfig("", ""),
                                  self.agentToggleLogger)
        _, tmpoutfile = tempfile.mkstemp()
        _, tmperrfile = tempfile.mkstemp()
        _, tmpstrucout = tempfile.mkstemp()
        PYTHON_TIMEOUT_SECONDS = 5

        environment_vars = [("PYTHONPATH", "a:b")]

        def launch_python_subprocess_method(command, tmpout, tmperr,
                                            environment_vars):
            subproc_mock.tmpout = tmpout
            subproc_mock.tmperr = tmperr
            return subproc_mock

        executor.launch_python_subprocess = launch_python_subprocess_method
        runShellKillPgrp_method = MagicMock()
        runShellKillPgrp_method.side_effect = lambda python: python.terminate()
        executor.runShellKillPgrp = runShellKillPgrp_method
        subproc_mock.returncode = 0
        thread = Thread(target=executor.run_file,
                        args=("fake_puppetFile", ["arg1", "arg2"
                                                  ], tmpoutfile, tmperrfile,
                              PYTHON_TIMEOUT_SECONDS, tmpstrucout, "INFO"))
        thread.start()
        time.sleep(0.1)
        subproc_mock.should_finish_event.set()
        subproc_mock.finished_event.wait()
        self.assertEquals(
            subproc_mock.was_terminated, False,
            "Subprocess should not be terminated before timeout")
        self.assertEquals(
            subproc_mock.returncode, 0,
            "Subprocess should not be terminated before timeout")
Пример #28
0
  def test_is_successfull(self):
    executor = PythonExecutor("/tmp", AmbariConfig().getConfig())

    executor.python_process_has_been_killed = False
    self.assertTrue(executor.isSuccessfull(0))
    self.assertFalse(executor.isSuccessfull(1))

    executor.python_process_has_been_killed = True
    self.assertFalse(executor.isSuccessfull(0))
    self.assertFalse(executor.isSuccessfull(1))
Пример #29
0
  def test_execution_results(self):
    subproc_mock = self.Subprocess_mockup()
    executor = PythonExecutor("/tmp", AgentConfig("", ""))
    _, tmpoutfile = tempfile.mkstemp()
    _, tmperrfile = tempfile.mkstemp()
    _, tmpstroutfile = tempfile.mkstemp()
    PYTHON_TIMEOUT_SECONDS =  5

    def launch_python_subprocess_method(command, tmpout, tmperr, environment_vars):
      subproc_mock.tmpout = tmpout
      subproc_mock.tmperr = tmperr
      return subproc_mock
    executor.launch_python_subprocess = launch_python_subprocess_method
    runShellKillPgrp_method = MagicMock()
    runShellKillPgrp_method.side_effect = lambda python : python.terminate()
    executor.runShellKillPgrp = runShellKillPgrp_method
    subproc_mock.returncode = 0
    subproc_mock.should_finish_event.set()
    result = executor.run_file("file", ["arg1", "arg2"], tmpoutfile, tmperrfile, PYTHON_TIMEOUT_SECONDS, tmpstroutfile)
    self.assertEquals(result, {'exitcode': 0, 'stderr': 'Dummy err', 'stdout': 'Dummy output',
                               'structuredOut': {'msg': 'Unable to read structured output from ' + tmpstroutfile}})
Пример #30
0
    def test_is_successfull(self):
        executor = PythonExecutor("/tmp", AgentConfig("", ""),
                                  self.agentToggleLogger)

        executor.python_process_has_been_killed = False
        self.assertTrue(executor.isSuccessfull(0))
        self.assertFalse(executor.isSuccessfull(1))

        executor.python_process_has_been_killed = True
        self.assertFalse(executor.isSuccessfull(0))
        self.assertFalse(executor.isSuccessfull(1))
  def test_is_successfull(self):
    executor = PythonExecutor("/tmp", AmbariConfig().getConfig())

    executor.python_process_has_been_killed = False
    self.assertTrue(executor.isSuccessfull(0))
    self.assertFalse(executor.isSuccessfull(1))

    executor.python_process_has_been_killed = True
    self.assertFalse(executor.isSuccessfull(0))
    self.assertFalse(executor.isSuccessfull(1))
  def test_is_successfull(self):
    executor = PythonExecutor("/tmp", AgentConfig("", ""), self.agentToggleLogger)

    executor.python_process_has_been_killed = False
    self.assertTrue(executor.isSuccessfull(0))
    self.assertFalse(executor.isSuccessfull(1))

    executor.python_process_has_been_killed = True
    self.assertFalse(executor.isSuccessfull(0))
    self.assertFalse(executor.isSuccessfull(1))
Пример #33
0
 def __init__(self, config, controller):
   self.config = config
   self.tmp_dir = config.getResolvedPath(AgentConfig.APP_TASK_DIR)
   self.python_executor = PythonExecutor(self.tmp_dir, config)
   self.status_commands_stdout = os.path.join(self.tmp_dir,
                                              'status_command_stdout.txt')
   self.status_commands_stderr = os.path.join(self.tmp_dir,
                                              'status_command_stderr.txt')
   self.base_dir = os.path.join(
     config.getResolvedPath(AgentConfig.APP_PACKAGE_DIR), "package")
   # Clean up old status command files if any
   try:
     os.unlink(self.status_commands_stdout)
     os.unlink(self.status_commands_stderr)
   except OSError:
     pass # Ignore fail
Пример #34
0
 def __init__(self, config, controller):
   self.config = config
   self.tmp_dir = config.get('agent', 'prefix')
   self.file_cache = FileCache(config)
   self.python_executor = PythonExecutor(self.tmp_dir, config)
   self.status_commands_stdout = os.path.join(self.tmp_dir,
                                              'status_command_stdout.txt')
   self.status_commands_stderr = os.path.join(self.tmp_dir,
                                              'status_command_stderr.txt')
   self.public_fqdn = hostname.public_hostname()
   # cache reset will be called on every agent registration
   controller.registration_listeners.append(self.file_cache.reset)
   # Clean up old status command files if any
   try:
     os.unlink(self.status_commands_stdout)
     os.unlink(self.status_commands_stderr)
   except OSError:
     pass # Ignore fail
 def __init__(self, config, controller):
   self.config = config
   self.tmp_dir = config.getResolvedPath(AgentConfig.APP_TASK_DIR)
   self.python_executor = PythonExecutor(self.tmp_dir, config)
   self.status_commands_stdout = os.path.realpath(posixpath.join(self.tmp_dir,
                                                                 'status_command_stdout.txt'))
   self.status_commands_stderr = os.path.realpath(posixpath.join(self.tmp_dir,
                                                                 'status_command_stderr.txt'))
   self.public_fqdn = hostname.public_hostname()
   self.stored_command = {}
   self.allocated_ports = {}
   # Clean up old status command files if any
   try:
     os.unlink(self.status_commands_stdout)
     os.unlink(self.status_commands_stderr)
   except OSError:
     pass # Ignore fail
   self.base_dir = os.path.realpath(posixpath.join(
     config.getResolvedPath(AgentConfig.APP_PACKAGE_DIR), "package"))
class CustomServiceOrchestrator():
    """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

    SCRIPT_TYPE_PYTHON = "PYTHON"

    def __init__(self, config):
        self.config = config
        self.tmp_dir = config.get('agent', 'prefix')
        self.file_cache = FileCache(config)
        self.python_executor = PythonExecutor(self.tmp_dir, config)

    def runCommand(self, command, tmpoutfile, tmperrfile):
        try:
            component_name = command['role']
            stack_name = command['hostLevelParams']['stack_name']
            stack_version = command['hostLevelParams']['stack_version']
            script_type = command['commandParams']['script_type']
            script = command['commandParams']['script']
            command_name = command['roleCommand']
            timeout = int(command['commandParams']['command_timeout'])
            metadata_folder = command['commandParams'][
                'service_metadata_folder']
            base_dir = self.file_cache.get_service_base_dir(
                stack_name, stack_version, metadata_folder, component_name)
            script_path = self.resolve_script_path(base_dir, script,
                                                   script_type)
            if script_type.upper() == self.SCRIPT_TYPE_PYTHON:
                json_path = self.dump_command_to_json(command)
                script_params = [command_name, json_path, base_dir]
                ret = self.python_executor.run_file(script_path, script_params,
                                                    tmpoutfile, tmperrfile,
                                                    timeout)
            else:
                message = "Unknown script type {0}".format(script_type)
                raise AgentException(message)
        except Exception:  # We do not want to let agent fail completely
            exc_type, exc_obj, exc_tb = sys.exc_info()
            message = "Catched an exception while executing "\
              "custom service command: {0}: {1}".format(exc_type, exc_obj)
            logger.error(message)
            ret = {
                'stdout': message,
                'stderr': message,
                'exitcode': 1,
            }
        return ret

    def resolve_script_path(self, base_dir, script, script_type):
        """
    Incapsulates logic of script location determination.
    """
        path = os.path.join(base_dir, "package", script)
        if not os.path.exists(path):
            message = "Script {0} does not exist".format(path)
            raise AgentException(message)
        return path

    def dump_command_to_json(self, command):
        """
    Converts command to json file and returns file path
    """
        task_id = command['taskId']
        file_path = os.path.join(self.tmp_dir,
                                 "command-{0}.json".format(task_id))
        # Command json contains passwords, that's why we need proper permissions
        with os.fdopen(os.open(file_path, os.O_WRONLY | os.O_CREAT, 0600),
                       'w') as f:
            content = json.dumps(command, sort_keys=False, indent=4)
            f.write(content)
        return file_path
 def __init__(self, config):
     self.config = config
     self.tmp_dir = config.get('agent', 'prefix')
     self.file_cache = FileCache(config)
     self.python_executor = PythonExecutor(self.tmp_dir, config)
Пример #38
0
 def get_py_executor(self):
     """
 Wrapper for unit testing
 :return:
 """
     return PythonExecutor(self.tmp_dir, self.config)
class CustomServiceOrchestrator():
  """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

  SCRIPT_TYPE_PYTHON = "PYTHON"
  COMMAND_NAME_STATUS = "STATUS"
  COMMAND_NAME_SECURITY_STATUS = "SECURITY_STATUS"
  CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
  CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'

  PRE_HOOK_PREFIX="before"
  POST_HOOK_PREFIX="after"

  HOSTS_LIST_KEY = "all_hosts"
  PING_PORTS_KEY = "all_ping_ports"
  AMBARI_SERVER_HOST = "ambari_server_host"

  def __init__(self, config, controller):
    self.config = config
    self.tmp_dir = config.get('agent', 'prefix')
    self.exec_tmp_dir = config.get('agent', 'tmp_dir')
    self.file_cache = FileCache(config)
    self.python_executor = PythonExecutor(self.tmp_dir, config)
    self.status_commands_stdout = os.path.join(self.tmp_dir,
                                               'status_command_stdout.txt')
    self.status_commands_stderr = os.path.join(self.tmp_dir,
                                               'status_command_stderr.txt')
    self.public_fqdn = hostname.public_hostname(config)
    # cache reset will be called on every agent registration
    controller.registration_listeners.append(self.file_cache.reset)

    # Clean up old status command files if any
    try:
      os.unlink(self.status_commands_stdout)
      os.unlink(self.status_commands_stderr)
    except OSError:
      pass # Ignore fail
    self.commands_in_progress_lock = threading.RLock()
    self.commands_in_progress = {}

  def map_task_to_process(self, task_id, processId):
    with self.commands_in_progress_lock:
      logger.debug('Maps taskId=%s to pid=%s' % (task_id, processId))
      self.commands_in_progress[task_id] = processId

  def cancel_command(self, task_id, reason):
    with self.commands_in_progress_lock:
      if task_id in self.commands_in_progress.keys():
        pid = self.commands_in_progress.get(task_id)
        self.commands_in_progress[task_id] = reason
        logger.info("Canceling command with task_id - {tid}, " \
                    "reason - {reason} . Killing process {pid}"
                    .format(tid=str(task_id), reason=reason, pid=pid))
        shell.kill_process_with_children(pid)
      else: 
        logger.warn("Unable to find pid by taskId = %s" % task_id)

  def runCommand(self, command, tmpoutfile, tmperrfile, forced_command_name=None,
                 override_output_files = True):
    """
    forced_command_name may be specified manually. In this case, value, defined at
    command json, is ignored.
    """
    try:
      script_type = command['commandParams']['script_type']
      script = command['commandParams']['script']
      timeout = int(command['commandParams']['command_timeout'])

      if 'hostLevelParams' in command and 'jdk_location' in command['hostLevelParams']:
        server_url_prefix = command['hostLevelParams']['jdk_location']
      else:
        server_url_prefix = command['commandParams']['jdk_location']
        
      task_id = "status"
      
      try:
        task_id = command['taskId']
        command_name = command['roleCommand']
      except KeyError:
        pass  # Status commands have no taskId

      if forced_command_name is not None:  # If not supplied as an argument
        command_name = forced_command_name

      if command_name == self.CUSTOM_ACTION_COMMAND:
        base_dir = self.file_cache.get_custom_actions_base_dir(server_url_prefix)
        script_tuple = (os.path.join(base_dir, 'scripts', script), base_dir)
        hook_dir = None
      else:
        if command_name == self.CUSTOM_COMMAND_COMMAND:
          command_name = command['hostLevelParams']['custom_command']

        # forces a hash challenge on the directories to keep them updated, even
        # if the return type is not used
        self.file_cache.get_host_scripts_base_dir(server_url_prefix)          
        hook_dir = self.file_cache.get_hook_base_dir(command, server_url_prefix)
        base_dir = self.file_cache.get_service_base_dir(command, server_url_prefix)
        
        script_path = self.resolve_script_path(base_dir, script)
        script_tuple = (script_path, base_dir)

      tmpstrucoutfile = os.path.join(self.tmp_dir,
                                    "structured-out-{0}.json".format(task_id))

      # We don't support anything else yet
      if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
        message = "Unknown script type {0}".format(script_type)
        raise AgentException(message)

      # Execute command using proper interpreter
      handle = None
      if command.has_key('__handle'):
        handle = command['__handle']
        handle.on_background_command_started = self.map_task_to_process
        del command['__handle']

      json_path = self.dump_command_to_json(command)
      pre_hook_tuple = self.resolve_hook_script_path(hook_dir,
          self.PRE_HOOK_PREFIX, command_name, script_type)
      post_hook_tuple = self.resolve_hook_script_path(hook_dir,
          self.POST_HOOK_PREFIX, command_name, script_type)
      py_file_list = [pre_hook_tuple, script_tuple, post_hook_tuple]
      # filter None values
      filtered_py_file_list = [i for i in py_file_list if i]

      logger_level = logging.getLevelName(logger.level)

      # Executing hooks and script
      ret = None
      from ActionQueue import ActionQueue
      if command.has_key('commandType') and command['commandType'] == ActionQueue.BACKGROUND_EXECUTION_COMMAND and len(filtered_py_file_list) > 1:
        raise AgentException("Background commands are supported without hooks only")

      for py_file, current_base_dir in filtered_py_file_list:
        script_params = [command_name, json_path, current_base_dir]
        ret = self.python_executor.run_file(py_file, script_params,
                               self.exec_tmp_dir, tmpoutfile, tmperrfile, timeout,
                               tmpstrucoutfile, logger_level, self.map_task_to_process,
                               task_id, override_output_files, handle = handle)
        # Next run_file() invocations should always append to current output
        override_output_files = False
        if ret['exitcode'] != 0:
          break

      if not ret: # Something went wrong
        raise AgentException("No script has been executed")

      # if canceled and not background command
      if handle is None:
        cancel_reason = self.command_canceled_reason(task_id)
        if cancel_reason:
          ret['stdout'] += cancel_reason
          ret['stderr'] += cancel_reason

          with open(tmpoutfile, "a") as f:
            f.write(cancel_reason)
          with open(tmperrfile, "a") as f:
            f.write(cancel_reason)

    except Exception, e: # We do not want to let agent fail completely
      exc_type, exc_obj, exc_tb = sys.exc_info()
      message = "Caught an exception while executing "\
        "custom service command: {0}: {1}; {2}".format(exc_type, exc_obj, str(e))
      logger.exception(message)
      ret = {
        'stdout' : message,
        'stderr' : message,
        'structuredOut' : '{}',
        'exitcode': 1,
      }
    return ret
Пример #40
0
class CustomServiceOrchestrator():
    """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

    SCRIPT_TYPE_PYTHON = "PYTHON"
    COMMAND_NAME_STATUS = "STATUS"
    LIVE_STATUS = "STARTED"
    DEAD_STATUS = "INSTALLED"

    def __init__(self, config, controller):
        self.config = config
        self.tmp_dir = config.getResolvedPath(AgentConfig.APP_TASK_DIR)
        self.python_executor = PythonExecutor(self.tmp_dir, config)
        self.status_commands_stdout = os.path.join(
            self.tmp_dir, 'status_command_stdout.txt')
        self.status_commands_stderr = os.path.join(
            self.tmp_dir, 'status_command_stderr.txt')
        self.public_fqdn = hostname.public_hostname()
        self.applied_configs = {}
        # Clean up old status command files if any
        try:
            os.unlink(self.status_commands_stdout)
            os.unlink(self.status_commands_stderr)
        except OSError:
            pass  # Ignore fail
        self.base_dir = os.path.join(
            config.getResolvedPath(AgentConfig.APP_PACKAGE_DIR), "package")

    def runCommand(self,
                   command,
                   tmpoutfile,
                   tmperrfile,
                   override_output_files=True,
                   store_config=False):
        try:
            script_type = command['commandParams']['script_type']
            script = command['commandParams']['script']
            timeout = int(command['commandParams']['command_timeout'])
            task_id = command['taskId']
            command_name = command['roleCommand']

            script_path = self.resolve_script_path(self.base_dir, script,
                                                   script_type)
            script_tuple = (script_path, self.base_dir)

            tmpstrucoutfile = os.path.join(
                self.tmp_dir, "structured-out-{0}.json".format(task_id))
            if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
                # We don't support anything else yet
                message = "Unknown script type {0}".format(script_type)
                raise AgentException(message)
                # Execute command using proper interpreter
            json_path = self.dump_command_to_json(command, store_config)
            py_file_list = [script_tuple]
            # filter None values
            filtered_py_file_list = [i for i in py_file_list if i]

            # Executing hooks and script
            ret = None
            for py_file, current_base_dir in filtered_py_file_list:
                script_params = [command_name, json_path, current_base_dir]
                python_paths = [
                    os.path.join(self.config.getWorkRootPath(),
                                 "infra/agent/slider-agent/jinja2"),
                    os.path.join(self.config.getWorkRootPath(),
                                 "infra/agent/slider-agent")
                ]
                environment_vars = [("PYTHONPATH", ":".join(python_paths))]
                ret = self.python_executor.run_file(py_file, script_params,
                                                    tmpoutfile, tmperrfile,
                                                    timeout, tmpstrucoutfile,
                                                    override_output_files,
                                                    environment_vars)
                # Next run_file() invocations should always append to current output
                override_output_files = False
                if ret['exitcode'] != 0:
                    break

            if not ret:  # Something went wrong
                raise AgentException("No script has been executed")

        except Exception:  # We do not want to let agent fail completely
            exc_type, exc_obj, exc_tb = sys.exc_info()
            message = "Caught an exception while executing " \
                      "command: {0}: {1}".format(exc_type, exc_obj)
            logger.exception(message)
            ret = {
                'stdout': message,
                'stderr': message,
                'structuredOut': '{}',
                'exitcode': 1,
            }

        return ret

    def resolve_script_path(self, base_dir, script, script_type):
        """
    Encapsulates logic of script location determination.
    """
        path = os.path.join(base_dir, script)
        if not os.path.exists(path):
            message = "Script {0} does not exist".format(path)
            raise AgentException(message)
        return path

    def requestComponentStatus(self, command):
        """
     Component status is determined by exit code, returned by runCommand().
     Exit code 0 means that component is running and any other exit code means that
     component is not running
    """
        override_output_files = True  # by default, we override status command output
        if logger.level == logging.DEBUG:
            override_output_files = False

        if command['roleCommand'] == "GET_CONFIG":
            logger.info("Requesting applied config ...")
            return {'configurations': self.applied_configs}

        else:
            res = self.runCommand(command,
                                  self.status_commands_stdout,
                                  self.status_commands_stderr,
                                  override_output_files=override_output_files)
            if res['exitcode'] == 0:
                res['exitcode'] = CustomServiceOrchestrator.LIVE_STATUS
            else:
                res['exitcode'] = CustomServiceOrchestrator.DEAD_STATUS

            return res
        pass

    def dump_command_to_json(self, command, store_config=False):
        """
    Converts command to json file and returns file path
    """
        # Perform few modifications to stay compatible with the way in which
        # site.pp files are generated by manifestGenerator.py
        public_fqdn = self.public_fqdn
        command['public_hostname'] = public_fqdn
        # Now, dump the json file
        command_type = command['commandType']
        from ActionQueue import ActionQueue  # To avoid cyclic dependency

        if command_type == ActionQueue.STATUS_COMMAND:
            # These files are frequently created, thats why we don't
            # store them all, but only the latest one
            file_path = os.path.join(self.tmp_dir, "status_command.json")
        else:
            task_id = command['taskId']
            file_path = os.path.join(self.tmp_dir,
                                     "command-{0}.json".format(task_id))
            # Json may contain passwords, that's why we need proper permissions
        if os.path.isfile(file_path):
            os.unlink(file_path)

        self.finalize_command(command, store_config)

        with os.fdopen(os.open(file_path, os.O_WRONLY | os.O_CREAT, 0600),
                       'w') as f:
            content = json.dumps(command, sort_keys=False, indent=4)
            f.write(content)
        return file_path
Пример #41
0
class CustomServiceOrchestrator():
    """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

    SCRIPT_TYPE_PYTHON = "PYTHON"
    COMMAND_NAME_STATUS = "STATUS"
    CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
    CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'

    PRE_HOOK_PREFIX = "before"
    POST_HOOK_PREFIX = "after"

    HOSTS_LIST_KEY = "all_hosts"
    PING_PORTS_KEY = "all_ping_ports"
    AMBARI_SERVER_HOST = "ambari_server_host"

    def __init__(self, config, controller):
        self.config = config
        self.tmp_dir = config.get('agent', 'prefix')
        self.exec_tmp_dir = config.get('agent', 'tmp_dir')
        self.file_cache = FileCache(config)
        self.python_executor = PythonExecutor(self.tmp_dir, config)
        self.status_commands_stdout = os.path.join(
            self.tmp_dir, 'status_command_stdout.txt')
        self.status_commands_stderr = os.path.join(
            self.tmp_dir, 'status_command_stderr.txt')
        self.public_fqdn = hostname.public_hostname(config)
        # cache reset will be called on every agent registration
        controller.registration_listeners.append(self.file_cache.reset)

        # Clean up old status command files if any
        try:
            os.unlink(self.status_commands_stdout)
            os.unlink(self.status_commands_stderr)
        except OSError:
            pass  # Ignore fail
        self.commands_in_progress_lock = threading.RLock()
        self.commands_in_progress = {}

    def map_task_to_process(self, task_id, processId):
        with self.commands_in_progress_lock:
            logger.debug('Maps taskId=%s to pid=%s' % (task_id, processId))
            self.commands_in_progress[task_id] = processId

    def cancel_command(self, task_id, reason):
        with self.commands_in_progress_lock:
            if task_id in self.commands_in_progress.keys():
                pid = self.commands_in_progress.get(task_id)
                self.commands_in_progress[task_id] = reason
                logger.info("Canceling command with task_id - {tid}, " \
                            "reason - {reason} . Killing process {pid}"
                .format(tid = str(task_id), reason = reason, pid = pid))
                shell.kill_process_with_children(pid)
            else:
                logger.warn("Unable to find pid by taskId = %s" % task_id)

    def runCommand(self,
                   command,
                   tmpoutfile,
                   tmperrfile,
                   forced_command_name=None,
                   override_output_files=True):
        """
    forced_command_name may be specified manually. In this case, value, defined at
    command json, is ignored.
    """
        try:
            script_type = command['commandParams']['script_type']
            script = command['commandParams']['script']
            timeout = int(command['commandParams']['command_timeout'])

            if 'hostLevelParams' in command and 'jdk_location' in command[
                    'hostLevelParams']:
                server_url_prefix = command['hostLevelParams']['jdk_location']
            else:
                server_url_prefix = command['commandParams']['jdk_location']
            task_id = "status"
            try:
                task_id = command['taskId']
                command_name = command['roleCommand']
            except KeyError:
                pass  # Status commands have no taskId

            if forced_command_name is not None:  # If not supplied as an argument
                command_name = forced_command_name

            if command_name == self.CUSTOM_ACTION_COMMAND:
                base_dir = self.file_cache.get_custom_actions_base_dir(
                    server_url_prefix)
                script_tuple = (os.path.join(base_dir, script), base_dir)
                hook_dir = None
            else:
                if command_name == self.CUSTOM_COMMAND_COMMAND:
                    command_name = command['hostLevelParams']['custom_command']
                hook_dir = self.file_cache.get_hook_base_dir(
                    command, server_url_prefix)
                base_dir = self.file_cache.get_service_base_dir(
                    command, server_url_prefix)
                script_path = self.resolve_script_path(base_dir, script,
                                                       script_type)
                script_tuple = (script_path, base_dir)

            tmpstrucoutfile = os.path.join(
                self.tmp_dir, "structured-out-{0}.json".format(task_id))

            if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
                # We don't support anything else yet
                message = "Unknown script type {0}".format(script_type)
                raise AgentException(message)
            # Execute command using proper interpreter
            handle = None
            if (command.has_key('__handle')):
                handle = command['__handle']
                handle.on_background_command_started = self.map_task_to_process
                del command['__handle']

            json_path = self.dump_command_to_json(command)
            pre_hook_tuple = self.resolve_hook_script_path(
                hook_dir, self.PRE_HOOK_PREFIX, command_name, script_type)
            post_hook_tuple = self.resolve_hook_script_path(
                hook_dir, self.POST_HOOK_PREFIX, command_name, script_type)
            py_file_list = [pre_hook_tuple, script_tuple, post_hook_tuple]
            # filter None values
            filtered_py_file_list = [i for i in py_file_list if i]

            logger_level = logging.getLevelName(logger.level)

            # Executing hooks and script
            ret = None
            from ActionQueue import ActionQueue
            if (command.has_key('commandType') and command['commandType']
                    == ActionQueue.BACKGROUND_EXECUTION_COMMAND
                    and len(filtered_py_file_list) > 1):
                raise AgentException(
                    "Background commands are supported without hooks only")

            for py_file, current_base_dir in filtered_py_file_list:
                script_params = [command_name, json_path, current_base_dir]
                ret = self.python_executor.run_file(py_file,
                                                    script_params,
                                                    self.exec_tmp_dir,
                                                    tmpoutfile,
                                                    tmperrfile,
                                                    timeout,
                                                    tmpstrucoutfile,
                                                    logger_level,
                                                    self.map_task_to_process,
                                                    task_id,
                                                    override_output_files,
                                                    handle=handle)
                # Next run_file() invocations should always append to current output
                override_output_files = False
                if ret['exitcode'] != 0:
                    break

            if not ret:  # Something went wrong
                raise AgentException("No script has been executed")

            # if canceled and not background command
            if handle is None:
                cancel_reason = self.command_canceled_reason(task_id)
                if cancel_reason:
                    ret['stdout'] += cancel_reason
                    ret['stderr'] += cancel_reason

                    with open(tmpoutfile, "a") as f:
                        f.write(cancel_reason)
                    with open(tmperrfile, "a") as f:
                        f.write(cancel_reason)

        except Exception:  # We do not want to let agent fail completely
            exc_type, exc_obj, exc_tb = sys.exc_info()
            message = "Catched an exception while executing "\
              "custom service command: {0}: {1}".format(exc_type, exc_obj)
            logger.exception(message)
            ret = {
                'stdout': message,
                'stderr': message,
                'structuredOut': '{}',
                'exitcode': 1,
            }
        return ret

    def command_canceled_reason(self, task_id):
        with self.commands_in_progress_lock:
            if self.commands_in_progress.has_key(
                    task_id
            ):  #Background command do not push in this collection (TODO)
                logger.debug('Pop with taskId %s' % task_id)
                pid = self.commands_in_progress.pop(task_id)
                if not isinstance(pid, int):
                    return '\nCommand aborted. ' + pid
        return None

    def requestComponentStatus(self, command):
        """
     Component status is determined by exit code, returned by runCommand().
     Exit code 0 means that component is running and any other exit code means that
     component is not running
    """
        override_output_files = True  # by default, we override status command output
        if logger.level == logging.DEBUG:
            override_output_files = False
        res = self.runCommand(command,
                              self.status_commands_stdout,
                              self.status_commands_stderr,
                              self.COMMAND_NAME_STATUS,
                              override_output_files=override_output_files)
        return res

    def resolve_script_path(self, base_dir, script, script_type):
        """
    Incapsulates logic of script location determination.
    """
        path = os.path.join(base_dir, script)
        if not os.path.exists(path):
            message = "Script {0} does not exist".format(path)
            raise AgentException(message)
        return path

    def resolve_hook_script_path(self, stack_hooks_dir, prefix, command_name,
                                 script_type):
        """
    Returns a tuple(path to hook script, hook base dir) according to string prefix
    and command name. If script does not exist, returns None
    """
        if not stack_hooks_dir:
            return None
        hook_dir = "{0}-{1}".format(prefix, command_name)
        hook_base_dir = os.path.join(stack_hooks_dir, hook_dir)
        hook_script_path = os.path.join(hook_base_dir, "scripts", "hook.py")
        if not os.path.isfile(hook_script_path):
            logger.debug(
                "Hook script {0} not found, skipping".format(hook_script_path))
            return None
        return hook_script_path, hook_base_dir

    def dump_command_to_json(self, command):
        """
    Converts command to json file and returns file path
    """
        # Perform few modifications to stay compatible with the way in which
        public_fqdn = self.public_fqdn
        command['public_hostname'] = public_fqdn
        # Now, dump the json file
        command_type = command['commandType']
        from ActionQueue import ActionQueue  # To avoid cyclic dependency
        if command_type == ActionQueue.STATUS_COMMAND:
            # These files are frequently created, thats why we don't
            # store them all, but only the latest one
            file_path = os.path.join(self.tmp_dir, "status_command.json")
        else:
            task_id = command['taskId']
            if 'clusterHostInfo' in command and command['clusterHostInfo']:
                command['clusterHostInfo'] = self.decompressClusterHostInfo(
                    command['clusterHostInfo'])
            file_path = os.path.join(self.tmp_dir,
                                     "command-{0}.json".format(task_id))
        # Json may contain passwords, that's why we need proper permissions
        if os.path.isfile(file_path):
            os.unlink(file_path)
        with os.fdopen(os.open(file_path, os.O_WRONLY | os.O_CREAT, 0600),
                       'w') as f:
            content = json.dumps(command, sort_keys=False, indent=4)
            f.write(content)
        return file_path
Пример #42
0
class CustomServiceOrchestrator():
  """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

  SCRIPT_TYPE_PYTHON = "PYTHON"
  COMMAND_NAME_STATUS = "STATUS"
  CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
  CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'

  PRE_HOOK_PREFIX="before"
  POST_HOOK_PREFIX="after"

  def __init__(self, config, controller):
    self.config = config
    self.tmp_dir = config.get('agent', 'prefix')
    self.file_cache = FileCache(config)
    self.python_executor = PythonExecutor(self.tmp_dir, config)
    self.status_commands_stdout = os.path.join(self.tmp_dir,
                                               'status_command_stdout.txt')
    self.status_commands_stderr = os.path.join(self.tmp_dir,
                                               'status_command_stderr.txt')
    # cache reset will be called on every agent registration
    controller.registration_listeners.append(self.file_cache.reset)
    # Clean up old status command files if any
    try:
      os.unlink(self.status_commands_stdout)
      os.unlink(self.status_commands_stderr)
    except OSError:
      pass # Ignore fail


  def runCommand(self, command, tmpoutfile, tmperrfile, forsed_command_name = None,
                 override_output_files = True):
    """
    forsed_command_name may be specified manually. In this case, value, defined at
    command json, is ignored.
    """
    try:
      script_type = command['commandParams']['script_type']
      script = command['commandParams']['script']
      timeout = int(command['commandParams']['command_timeout'])
      server_url_prefix = command['hostLevelParams']['jdk_location']
      task_id = "status"
      try:
        task_id = command['taskId']
        command_name = command['roleCommand']
      except KeyError:
        pass # Status commands have no taskId

      if forsed_command_name is not None: # If not supplied as an argument
        command_name = forsed_command_name

      if command_name == self.CUSTOM_ACTION_COMMAND:
        base_dir = self.file_cache.get_custom_actions_base_dir(server_url_prefix)
        script_tuple = (os.path.join(base_dir, script) , base_dir)
        hook_dir = None
      else:
        if command_name == self.CUSTOM_COMMAND_COMMAND:
          command_name = command['hostLevelParams']['custom_command']
        hook_dir = self.file_cache.get_hook_base_dir(command, server_url_prefix)
        base_dir = self.file_cache.get_service_base_dir(command, server_url_prefix)
        script_path = self.resolve_script_path(base_dir, script, script_type)
        script_tuple = (script_path, base_dir)


      tmpstrucoutfile = os.path.join(self.tmp_dir,
                                    "structured-out-{0}.json".format(task_id))
      if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
      # We don't support anything else yet
        message = "Unknown script type {0}".format(script_type)
        raise AgentException(message)
      # Execute command using proper interpreter
      json_path = self.dump_command_to_json(command)
      pre_hook_tuple = self.resolve_hook_script_path(hook_dir,
          self.PRE_HOOK_PREFIX, command_name, script_type)
      post_hook_tuple = self.resolve_hook_script_path(hook_dir,
          self.POST_HOOK_PREFIX, command_name, script_type)
      py_file_list = [pre_hook_tuple, script_tuple, post_hook_tuple]
      # filter None values
      filtered_py_file_list = [i for i in py_file_list if i]

      # Executing hooks and script
      ret = None
      for py_file, current_base_dir in filtered_py_file_list:
        script_params = [command_name, json_path, current_base_dir]
        ret = self.python_executor.run_file(py_file, script_params,
                               tmpoutfile, tmperrfile, timeout,
                               tmpstrucoutfile, override_output_files)
        # Next run_file() invocations should always append to current output
        override_output_files = False
        if ret['exitcode'] != 0:
          break

      if not ret: # Something went wrong
        raise AgentException("No script has been executed")

    except Exception: # We do not want to let agent fail completely
      exc_type, exc_obj, exc_tb = sys.exc_info()
      message = "Catched an exception while executing "\
        "custom service command: {0}: {1}".format(exc_type, exc_obj)
      logger.exception(message)
      ret = {
        'stdout' : message,
        'stderr' : message,
        'structuredOut' : '{}',
        'exitcode': 1,
      }
    return ret


  def requestComponentStatus(self, command):
    """
     Component status is determined by exit code, returned by runCommand().
     Exit code 0 means that component is running and any other exit code means that
     component is not running
    """
    override_output_files=True # by default, we override status command output
    if logger.level == logging.DEBUG:
      override_output_files = False
    res = self.runCommand(command, self.status_commands_stdout,
                          self.status_commands_stderr, self.COMMAND_NAME_STATUS,
                          override_output_files=override_output_files)
    if res['exitcode'] == 0:
      return LiveStatus.LIVE_STATUS
    else:
      return LiveStatus.DEAD_STATUS


  def resolve_script_path(self, base_dir, script, script_type):
    """
    Incapsulates logic of script location determination.
    """
    path = os.path.join(base_dir, script)
    if not os.path.exists(path):
      message = "Script {0} does not exist".format(path)
      raise AgentException(message)
    return path


  def resolve_hook_script_path(self, stack_hooks_dir, prefix, command_name, script_type):
    """
    Returns a tuple(path to hook script, hook base dir) according to string prefix
    and command name. If script does not exist, returns None
    """
    if not stack_hooks_dir:
      return None
    hook_dir = "{0}-{1}".format(prefix, command_name)
    hook_base_dir = os.path.join(stack_hooks_dir, hook_dir)
    hook_script_path = os.path.join(hook_base_dir, "scripts", "hook.py")
    if not os.path.isfile(hook_script_path):
      logger.debug("Hook script {0} not found, skipping".format(hook_script_path))
      return None
    return hook_script_path, hook_base_dir


  def dump_command_to_json(self, command):
    """
    Converts command to json file and returns file path
    """
    # Perform few modifications to stay compatible with the way in which
    # site.pp files are generated by manifestGenerator.py
    public_fqdn = hostname.public_hostname()
    command['public_hostname'] = public_fqdn
    # Now, dump the json file
    command_type = command['commandType']
    from ActionQueue import ActionQueue  # To avoid cyclic dependency
    if command_type == ActionQueue.STATUS_COMMAND:
      # These files are frequently created, thats why we don't
      # store them all, but only the latest one
      file_path = os.path.join(self.tmp_dir, "status_command.json")
    else:
      task_id = command['taskId']
      command['clusterHostInfo'] = manifestGenerator.decompressClusterHostInfo(command['clusterHostInfo'])
      file_path = os.path.join(self.tmp_dir, "command-{0}.json".format(task_id))
    # Json may contain passwords, that's why we need proper permissions
    if os.path.isfile(file_path):
      os.unlink(file_path)
    with os.fdopen(os.open(file_path, os.O_WRONLY | os.O_CREAT,
                           0600), 'w') as f:
      content = json.dumps(command, sort_keys = False, indent = 4)
      f.write(content)
    return file_path
Пример #43
0
class CustomServiceOrchestrator():
  """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

  SCRIPT_TYPE_PYTHON = "PYTHON"
  COMMAND_NAME_STATUS = "STATUS"
  CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
  CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'

  PRE_HOOK_PREFIX="before"
  POST_HOOK_PREFIX="after"

  HOSTS_LIST_KEY = "all_hosts"
  PING_PORTS_KEY = "all_ping_ports"
  AMBARI_SERVER_HOST = "ambari_server_host"

  def __init__(self, config, controller):
    self.config = config
    self.tmp_dir = config.get('agent', 'prefix')
    self.exec_tmp_dir = config.get('agent', 'tmp_dir')
    self.file_cache = FileCache(config)
    self.python_executor = PythonExecutor(self.tmp_dir, config)
    self.status_commands_stdout = os.path.join(self.tmp_dir,
                                               'status_command_stdout.txt')
    self.status_commands_stderr = os.path.join(self.tmp_dir,
                                               'status_command_stderr.txt')
    self.public_fqdn = hostname.public_hostname(config)
    # cache reset will be called on every agent registration
    controller.registration_listeners.append(self.file_cache.reset)
    
    # Clean up old status command files if any
    try:
      os.unlink(self.status_commands_stdout)
      os.unlink(self.status_commands_stderr)
    except OSError:
      pass # Ignore fail
    self.commands_in_progress_lock = threading.RLock()
    self.commands_in_progress = {}

  def map_task_to_process(self, task_id, processId):
    with self.commands_in_progress_lock:
      logger.debug('Maps taskId=%s to pid=%s'%(task_id, processId))
      self.commands_in_progress[task_id] = processId

  def cancel_command(self, task_id, reason):
    with self.commands_in_progress_lock:
      if task_id in self.commands_in_progress.keys():
        pid = self.commands_in_progress.get(task_id)
        self.commands_in_progress[task_id] = reason
        logger.info("Canceling command with task_id - {tid}, " \
                    "reason - {reason} . Killing process {pid}"
        .format(tid = str(task_id), reason = reason, pid = pid))
        shell.kill_process_with_children(pid)
      else: 
        logger.warn("Unable to find pid by taskId = %s"%task_id)

  def runCommand(self, command, tmpoutfile, tmperrfile, forced_command_name = None,
                 override_output_files = True):
    """
    forced_command_name may be specified manually. In this case, value, defined at
    command json, is ignored.
    """
    try:
      script_type = command['commandParams']['script_type']
      script = command['commandParams']['script']
      timeout = int(command['commandParams']['command_timeout'])
      
      if 'hostLevelParams' in command and 'jdk_location' in command['hostLevelParams']:
        server_url_prefix = command['hostLevelParams']['jdk_location']
      else:
        server_url_prefix = command['commandParams']['jdk_location']
      task_id = "status"
      try:
        task_id = command['taskId']
        command_name = command['roleCommand']
      except KeyError:
        pass # Status commands have no taskId

      if forced_command_name is not None: # If not supplied as an argument
        command_name = forced_command_name

      if command_name == self.CUSTOM_ACTION_COMMAND:
        base_dir = self.file_cache.get_custom_actions_base_dir(server_url_prefix)
        script_tuple = (os.path.join(base_dir, script) , base_dir)
        hook_dir = None
      else:
        if command_name == self.CUSTOM_COMMAND_COMMAND:
          command_name = command['hostLevelParams']['custom_command']
        hook_dir = self.file_cache.get_hook_base_dir(command, server_url_prefix)
        base_dir = self.file_cache.get_service_base_dir(command, server_url_prefix)
        script_path = self.resolve_script_path(base_dir, script, script_type)
        script_tuple = (script_path, base_dir)

      tmpstrucoutfile = os.path.join(self.tmp_dir,
                                    "structured-out-{0}.json".format(task_id))

      if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
      # We don't support anything else yet
        message = "Unknown script type {0}".format(script_type)
        raise AgentException(message)
      # Execute command using proper interpreter
      handle = None
      if(command.has_key('__handle')):
        handle = command['__handle']
        handle.on_background_command_started = self.map_task_to_process
        del command['__handle']
      
      json_path = self.dump_command_to_json(command)
      pre_hook_tuple = self.resolve_hook_script_path(hook_dir,
          self.PRE_HOOK_PREFIX, command_name, script_type)
      post_hook_tuple = self.resolve_hook_script_path(hook_dir,
          self.POST_HOOK_PREFIX, command_name, script_type)
      py_file_list = [pre_hook_tuple, script_tuple, post_hook_tuple]
      # filter None values
      filtered_py_file_list = [i for i in py_file_list if i]

      logger_level = logging.getLevelName(logger.level)

      # Executing hooks and script
      ret = None
      from ActionQueue import ActionQueue
      if(command.has_key('commandType') and command['commandType'] == ActionQueue.BACKGROUND_EXECUTION_COMMAND and len(filtered_py_file_list) > 1):
        raise AgentException("Background commands are supported without hooks only")

      for py_file, current_base_dir in filtered_py_file_list:
        script_params = [command_name, json_path, current_base_dir]
        ret = self.python_executor.run_file(py_file, script_params,
                               self.exec_tmp_dir, tmpoutfile, tmperrfile, timeout,
                               tmpstrucoutfile, logger_level, self.map_task_to_process,
                               task_id, override_output_files, handle = handle)
        # Next run_file() invocations should always append to current output
        override_output_files = False
        if ret['exitcode'] != 0:
          break

      if not ret: # Something went wrong
        raise AgentException("No script has been executed")

      # if canceled and not background command
      if handle is None:
        cancel_reason = self.command_canceled_reason(task_id)
        if cancel_reason:
          ret['stdout'] += cancel_reason
          ret['stderr'] += cancel_reason
  
          with open(tmpoutfile, "a") as f:
            f.write(cancel_reason)
          with open(tmperrfile, "a") as f:
            f.write(cancel_reason)

    except Exception: # We do not want to let agent fail completely
      exc_type, exc_obj, exc_tb = sys.exc_info()
      message = "Catched an exception while executing "\
        "custom service command: {0}: {1}".format(exc_type, exc_obj)
      logger.exception(message)
      ret = {
        'stdout' : message,
        'stderr' : message,
        'structuredOut' : '{}',
        'exitcode': 1,
      }
    return ret
  def command_canceled_reason(self, task_id):
    with self.commands_in_progress_lock:
      if self.commands_in_progress.has_key(task_id):#Background command do not push in this collection (TODO)
        logger.debug('Pop with taskId %s' % task_id)
        pid = self.commands_in_progress.pop(task_id)
        if not isinstance(pid, int):
          return '\nCommand aborted. ' + pid
    return None
        
  def requestComponentStatus(self, command):
    """
     Component status is determined by exit code, returned by runCommand().
     Exit code 0 means that component is running and any other exit code means that
     component is not running
    """
    override_output_files=True # by default, we override status command output
    if logger.level == logging.DEBUG:
      override_output_files = False
    res = self.runCommand(command, self.status_commands_stdout,
                          self.status_commands_stderr, self.COMMAND_NAME_STATUS,
                          override_output_files=override_output_files)
    return res

  def resolve_script_path(self, base_dir, script, script_type):
    """
    Incapsulates logic of script location determination.
    """
    path = os.path.join(base_dir, script)
    if not os.path.exists(path):
      message = "Script {0} does not exist".format(path)
      raise AgentException(message)
    return path


  def resolve_hook_script_path(self, stack_hooks_dir, prefix, command_name, script_type):
    """
    Returns a tuple(path to hook script, hook base dir) according to string prefix
    and command name. If script does not exist, returns None
    """
    if not stack_hooks_dir:
      return None
    hook_dir = "{0}-{1}".format(prefix, command_name)
    hook_base_dir = os.path.join(stack_hooks_dir, hook_dir)
    hook_script_path = os.path.join(hook_base_dir, "scripts", "hook.py")
    if not os.path.isfile(hook_script_path):
      logger.debug("Hook script {0} not found, skipping".format(hook_script_path))
      return None
    return hook_script_path, hook_base_dir


  def dump_command_to_json(self, command):
    """
    Converts command to json file and returns file path
    """
    # Perform few modifications to stay compatible with the way in which
    public_fqdn = self.public_fqdn
    command['public_hostname'] = public_fqdn
    # Now, dump the json file
    command_type = command['commandType']
    from ActionQueue import ActionQueue  # To avoid cyclic dependency
    if command_type == ActionQueue.STATUS_COMMAND:
      # These files are frequently created, thats why we don't
      # store them all, but only the latest one
      file_path = os.path.join(self.tmp_dir, "status_command.json")
    else:
      task_id = command['taskId']
      if 'clusterHostInfo' in command and command['clusterHostInfo']:
        command['clusterHostInfo'] = self.decompressClusterHostInfo(command['clusterHostInfo'])
      file_path = os.path.join(self.tmp_dir, "command-{0}.json".format(task_id))
    # Json may contain passwords, that's why we need proper permissions
    if os.path.isfile(file_path):
      os.unlink(file_path)
    with os.fdopen(os.open(file_path, os.O_WRONLY | os.O_CREAT,
                           0600), 'w') as f:
      content = json.dumps(command, sort_keys = False, indent = 4)
      f.write(content)
    return file_path
Пример #44
0
class CustomServiceOrchestrator():
  """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

  SCRIPT_TYPE_PYTHON = "PYTHON"
  COMMAND_NAME_STATUS = "STATUS"
  LIVE_STATUS = "STARTED"
  DEAD_STATUS = "INSTALLED"

  def __init__(self, config, controller, agentToggleLogger):
    self.config = config
    self.controller = controller
    self.tmp_dir = config.getResolvedPath(AgentConfig.APP_TASK_DIR)
    self.python_executor = PythonExecutor(self.tmp_dir, config, agentToggleLogger)
    self.status_commands_stdout = os.path.realpath(posixpath.join(self.tmp_dir,
                                                                  'status_command_stdout.txt'))
    self.status_commands_stderr = os.path.realpath(posixpath.join(self.tmp_dir,
                                                                  'status_command_stderr.txt'))
    self.public_fqdn = hostname.public_hostname()
    self.stored_command = {}
    self.allocated_ports = {}
    self.log_folders = {}

    self.allocated_ports_set = set()
    # Clean up old status command files if any
    try:
      os.unlink(self.status_commands_stdout)
      os.unlink(self.status_commands_stderr)
    except OSError:
      pass # Ignore fail
    self.base_dir = os.path.realpath(posixpath.join(
      config.getResolvedPath(AgentConfig.APP_PACKAGE_DIR), "package"))


  def runCommand(self, command, tmpoutfile, tmperrfile,
                 override_output_files=True, store_command=False):
    allocated_ports = {}
    try:
      py_file_list = []
      json_path = None

      script_type = command['commandParams']['script_type']
      task_id = command['taskId']
      command_name = command['roleCommand']
      # transform upgrade specific command names
      if command_name == 'UPGRADE':
          command_name = 'PRE_UPGRADE'
      if command_name == 'UPGRADE_STOP':
        command_name = 'STOP'

      tmpstrucoutfile = os.path.realpath(posixpath.join(self.tmp_dir,
                                                        "structured-out-{0}.json".format(task_id)))
      if script_type.upper() == self.SCRIPT_TYPE_PYTHON:
        script = command['commandParams']['script']
        timeout = int(command['commandParams']['command_timeout'])
        script_path = ''
        if 'package' in command:
            add_on_dir_str = (self.config.getWorkRootPath()
                              + "/"
                              + AgentConfig.ADDON_PKG_ROOT_DIR
                              + "/application.addon."
                              + command['package']
                             )
            command['commandParams']['addonPackageRoot'] = add_on_dir_str
            add_on_base_dir = os.path.realpath(posixpath.join(add_on_dir_str, "package"))
            logger.info("Add on package: %s, add on base dir: %s" 
                        % (command['package'], str(add_on_base_dir)))
            script_path = self.resolve_script_path(add_on_base_dir, script, script_type)
        else:
            self.base_dir = os.path.realpath(posixpath.join(
                              self.config.getResolvedPath(AgentConfig.APP_PACKAGE_DIR),
                              "package"))
            logger.debug("Base dir: " + str(self.base_dir))
            script_path = self.resolve_script_path(self.base_dir, script, script_type)
        script_tuple = (script_path, self.base_dir)
        py_file_list = [script_tuple]

        json_path = self.dump_command_to_json(command, allocated_ports, store_command)
      elif script_type.upper() == "SHELL":
        timeout = int(command['commandParams']['command_timeout'])

        json_path = self.dump_command_to_json(command, allocated_ports, store_command)
        script_path = os.path.realpath(posixpath.join(self.config.getWorkRootPath(),
                                        "infra", "agent", "slider-agent", "scripts",
                                        "shell_cmd", "basic_installer.py"))
        script_tuple = (script_path, self.base_dir)
        py_file_list = [script_tuple]
      else:
        # We don't support anything else yet
        message = "Unknown script type {0}".format(script_type)
        raise AgentException(message)

      # filter None values
      filtered_py_file_list = [i for i in py_file_list if i]
      logger_level = logging.getLevelName(logger.level)

      # Executing hooks and script
      ret = None
      for py_file, current_base_dir in filtered_py_file_list:
        script_params = [command_name, json_path, current_base_dir]
        python_paths = [os.path.realpath(posixpath.join(self.config.getWorkRootPath(),
                                                        "infra", "agent", "slider-agent", "jinja2")),
                        os.path.realpath(posixpath.join(self.config.getWorkRootPath(),
                                                        "infra", "agent", "slider-agent"))]
        if platform.system() != "Windows":
          environment_vars = [("PYTHONPATH", ":".join(python_paths))]
        else:
          environment_vars = [("PYTHONPATH", ";".join(python_paths))]

        ret = self.python_executor.run_file(py_file, script_params,
                                            tmpoutfile, tmperrfile, timeout,
                                            tmpstrucoutfile,
                                            logger_level,
                                            override_output_files,
                                            environment_vars)
        # Next run_file() invocations should always append to current output
        override_output_files = False
        if ret[Constants.EXIT_CODE] != 0:
          break

      if not ret: # Something went wrong
        raise AgentException("No script has been executed")

    except Exception: # We do not want to let agent fail completely
      exc_type, exc_obj, exc_tb = sys.exc_info()
      message = "Caught an exception while executing " \
                "command: {0}: {1}".format(exc_type, exc_obj)
      logger.exception(message)
      ret = {
        'stdout': message,
        'stderr': message,
        'structuredOut': '{}',
        Constants.EXIT_CODE: 1,
      }

    if Constants.EXIT_CODE in ret and ret[Constants.EXIT_CODE] == 0:
      ret[Constants.ALLOCATED_PORTS] = copy.deepcopy(allocated_ports)
      ## Generally all ports are allocated at once but just in case
      self.allocated_ports.update(allocated_ports)

    # Irrespective of the outcome report the folder paths
    if command_name == 'INSTALL':
      self.log_folders = {
        Constants.AGENT_LOG_ROOT: self.config.getLogPath(),
        Constants.AGENT_WORK_ROOT: self.config.getWorkRootPath()
      }
      ret[Constants.FOLDERS] = copy.deepcopy(self.log_folders)
    return ret


  def resolve_script_path(self, base_dir, script, script_type):
    """
    Encapsulates logic of script location determination.
    """
    path = os.path.realpath(posixpath.join(base_dir, script))
    if not os.path.exists(path):
      message = "Script {0} does not exist".format(path)
      raise AgentException(message)
    return path

  def getConfig(self, command):
    if 'configurations' in self.stored_command:
      if 'commandParams' in command and 'config_type' in command['commandParams']:
        config_type = command['commandParams']['config_type']
        logger.info("Requesting applied config for type {0}".format(config_type))
        if config_type in self.stored_command['configurations']:
          return {
            'configurations': {config_type: self.stored_command['configurations'][config_type]}
          }
        else:
          return {
            'configurations': {}
          }
        pass
      else:
        logger.info("Requesting all applied config.")
        return {
          'configurations': self.stored_command['configurations']
        }
      pass
    else:
      return {
        'configurations': {}
      }
    pass

  def requestComponentStatus(self, command):
    """
     Component status is determined by exit code, returned by runCommand().
     Exit code 0 means that component is running and any other exit code means that
     component is not running
    """
    override_output_files = True # by default, we override status command output
    if logger.level == logging.DEBUG:
      override_output_files = False

    if command['roleCommand'] == "GET_CONFIG":
     return self.getConfig(command)

    else:
      res = self.runCommand(command, self.status_commands_stdout,
                            self.status_commands_stderr,
                            override_output_files=override_output_files)
      if res[Constants.EXIT_CODE] == 0:
        res[Constants.EXIT_CODE] = CustomServiceOrchestrator.LIVE_STATUS
      else:
        res[Constants.EXIT_CODE] = CustomServiceOrchestrator.DEAD_STATUS

      return res
    pass

  def dump_command_to_json(self, command, allocated_ports, store_command=False):
    """
    Converts command to json file and returns file path
    """
    # Perform few modifications to stay compatible with the way in which
    # site.pp files are generated by manifestGenerator.py
    command['public_hostname'] = self.public_fqdn
    if 'hostname' in command:
      command['appmaster_hostname'] = command['hostname']
    command['hostname'] = self.public_fqdn

    # Now, dump the json file
    command_type = command['commandType']
    from ActionQueue import ActionQueue  # To avoid cyclic dependency

    if command_type == ActionQueue.STATUS_COMMAND:
      # These files are frequently created, thats why we don't
      # store them all, but only the latest one
      file_path = os.path.realpath(posixpath.join(self.tmp_dir, "status_command.json"))
    else:
      task_id = command['taskId']
      file_path = os.path.realpath(posixpath.join(self.tmp_dir, "command-{0}.json".format(task_id)))
      # Json may contain passwords, that's why we need proper permissions
    if os.path.isfile(file_path) and os.path.exists(file_path):
      os.unlink(file_path)

    self.finalize_command(command, store_command, allocated_ports)
    self.finalize_exec_command(command)

    with os.fdopen(os.open(file_path, os.O_WRONLY | os.O_CREAT,
                           0644), 'w') as f:
      content = json.dumps(command, sort_keys=False, indent=4)
      f.write(content)
    return file_path
Пример #45
0
class CustomServiceOrchestrator():
    """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

    SCRIPT_TYPE_PYTHON = "PYTHON"

    def __init__(self, config):
        self.config = config
        self.tmp_dir = config.get('agent', 'prefix')
        self.file_cache = FileCache(config)
        self.python_executor = PythonExecutor(self.tmp_dir, config)

    def runCommand(self, command, tmpoutfile, tmperrfile):
        try:
            # TODO: Adjust variables
            service_name = command['serviceName']
            component_name = command['role']
            stack_name = command['stackName']  # TODO: add at the server side
            stack_version = command[
                'stackVersion']  # TODO: add at the server side
            script_type = command['scriptType']  # TODO: add at the server side
            script = command['script']
            command_name = command['roleCommand']
            timeout = int(command['timeout'])  # TODO: add at the server side
            base_dir = self.file_cache.get_service_base_dir(
                stack_name, stack_version, service_name, component_name)
            script_path = self.resolve_script_path(base_dir, script,
                                                   script_type)
            if script_type == self.SCRIPT_TYPE_PYTHON:
                json_path = self.dump_command_to_json(command)
                script_params = [command_name, json_path, base_dir]
                ret = self.python_executor.run_file(script_path, script_params,
                                                    tmpoutfile, tmperrfile,
                                                    timeout)
            else:
                message = "Unknown script type {0}".format(script_type)
                raise AgentException(message)
        except Exception:  # We do not want to let agent fail completely
            exc_type, exc_obj, exc_tb = sys.exc_info()
            message = "Catched an exception while executing "\
              "custom service command: {0}: {1}".format(exc_type, exc_obj)
            logger.error(message)
            ret = {
                'stdout': message,
                'stderr': message,
                'exitCode': 1,
            }
        return ret

    def resolve_script_path(self, base_dir, script, script_type):
        """
    Incapsulates logic of script location determination.
    """
        path = os.path.join(base_dir, "package", script)
        if not os.path.exists(path):
            message = "Script {0} does not exist".format(path)
            raise AgentException(message)
        return path

    def dump_command_to_json(self, command):
        """
    Converts command to json file and returns file path
    """
        command_id = command['commandId']
        file_path = os.path.join(self.tmp_dir,
                                 "command-{0}.json".format(command_id))
        with open(file_path, "w") as f:
            content = json.dumps(command)
            f.write(content)
        return file_path
    def test_cancel_backgound_command(self, read_stack_version_mock,
                                      resolve_hook_script_path_mock,
                                      resolve_script_path_mock, FileCache_mock,
                                      kill_process_with_children_mock,
                                      get_py_executor_mock):
        FileCache_mock.return_value = None
        FileCache_mock.cache_dir = MagicMock()
        resolve_hook_script_path_mock.return_value = None
        #     shell.kill_process_with_children = MagicMock()
        dummy_controller = MagicMock()
        cfg = AmbariConfig()
        cfg.set('agent', 'tolerate_download_failures', 'true')
        cfg.set('agent', 'prefix', '.')
        cfg.set('agent', 'cache_dir', 'background_tasks')

        actionQueue = ActionQueue(cfg, dummy_controller)

        dummy_controller.actionQueue = actionQueue
        orchestrator = CustomServiceOrchestrator(cfg, dummy_controller)
        orchestrator.file_cache = MagicMock()

        def f(a, b):
            return ""

        orchestrator.file_cache.get_service_base_dir = f
        actionQueue.customServiceOrchestrator = orchestrator

        import TestActionQueue
        import copy

        pyex = PythonExecutor(actionQueue.customServiceOrchestrator.tmp_dir,
                              actionQueue.customServiceOrchestrator.config)
        TestActionQueue.patch_output_file(pyex)
        pyex.prepare_process_result = MagicMock()
        get_py_executor_mock.return_value = pyex
        orchestrator.dump_command_to_json = MagicMock()

        lock = threading.RLock()
        complete_done = threading.Condition(lock)

        complete_was_called = {}

        def command_complete_w(process_condenced_result, handle):
            with lock:
                complete_was_called['visited'] = ''
                complete_done.wait(3)

        actionQueue.on_background_command_complete_callback = TestActionQueue.wraped(
            actionQueue.on_background_command_complete_callback,
            command_complete_w, None)
        execute_command = copy.deepcopy(
            TestActionQueue.TestActionQueue.background_command)
        actionQueue.put([execute_command])
        actionQueue.processBackgroundQueueSafeEmpty()

        time.sleep(.1)

        orchestrator.cancel_command(19, '')
        self.assertTrue(kill_process_with_children_mock.called)
        kill_process_with_children_mock.assert_called_with(33)

        with lock:
            complete_done.notifyAll()

        with lock:
            self.assertTrue(complete_was_called.has_key('visited'))

        time.sleep(.1)

        runningCommand = actionQueue.commandStatuses.get_command_status(19)
        self.assertTrue(runningCommand is not None)
        self.assertEqual(runningCommand['status'], ActionQueue.FAILED_STATUS)
Пример #47
0
 def get_py_executor(self, forced_command_name):
     """
 Wrapper for unit testing
 :return:
 """
     return PythonExecutor(self.tmp_dir, self.config)
Пример #48
0
class CustomServiceOrchestrator():
    """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

    SCRIPT_TYPE_PYTHON = "PYTHON"
    COMMAND_NAME_STATUS = "STATUS"
    COMMAND_NAME_SECURITY_STATUS = "SECURITY_STATUS"
    CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
    CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'

    PRE_HOOK_PREFIX = "before"
    POST_HOOK_PREFIX = "after"

    HOSTS_LIST_KEY = "all_hosts"
    PING_PORTS_KEY = "all_ping_ports"
    AMBARI_SERVER_HOST = "ambari_server_host"

    def __init__(self, config, controller):
        self.config = config
        self.tmp_dir = config.get('agent', 'prefix')
        self.exec_tmp_dir = config.get('agent', 'tmp_dir')
        self.file_cache = FileCache(config)
        self.python_executor = PythonExecutor(self.tmp_dir, config)
        self.status_commands_stdout = os.path.join(
            self.tmp_dir, 'status_command_stdout.txt')
        self.status_commands_stderr = os.path.join(
            self.tmp_dir, 'status_command_stderr.txt')
        self.public_fqdn = hostname.public_hostname(config)
        # cache reset will be called on every agent registration
        controller.registration_listeners.append(self.file_cache.reset)

        # Clean up old status command files if any
        try:
            os.unlink(self.status_commands_stdout)
            os.unlink(self.status_commands_stderr)
        except OSError:
            pass  # Ignore fail
        self.commands_in_progress_lock = threading.RLock()
        self.commands_in_progress = {}

    def map_task_to_process(self, task_id, processId):
        with self.commands_in_progress_lock:
            logger.debug('Maps taskId=%s to pid=%s' % (task_id, processId))
            self.commands_in_progress[task_id] = processId

    def cancel_command(self, task_id, reason):
        with self.commands_in_progress_lock:
            if task_id in self.commands_in_progress.keys():
                pid = self.commands_in_progress.get(task_id)
                self.commands_in_progress[task_id] = reason
                logger.info("Canceling command with task_id - {tid}, " \
                            "reason - {reason} . Killing process {pid}"
                            .format(tid=str(task_id), reason=reason, pid=pid))
                shell.kill_process_with_children(pid)
            else:
                logger.warn("Unable to find pid by taskId = %s" % task_id)

    def runCommand(self,
                   command,
                   tmpoutfile,
                   tmperrfile,
                   forced_command_name=None,
                   override_output_files=True):
        """
    forced_command_name may be specified manually. In this case, value, defined at
    command json, is ignored.
    """
        try:
            script_type = command['commandParams']['script_type']
            script = command['commandParams']['script']
            timeout = int(command['commandParams']['command_timeout'])

            if 'hostLevelParams' in command and 'jdk_location' in command[
                    'hostLevelParams']:
                server_url_prefix = command['hostLevelParams']['jdk_location']
            else:
                server_url_prefix = command['commandParams']['jdk_location']

            task_id = "status"

            try:
                task_id = command['taskId']
                command_name = command['roleCommand']
            except KeyError:
                pass  # Status commands have no taskId

            if forced_command_name is not None:  # If not supplied as an argument
                command_name = forced_command_name

            if command_name == self.CUSTOM_ACTION_COMMAND:
                base_dir = self.file_cache.get_custom_actions_base_dir(
                    server_url_prefix)
                script_tuple = (os.path.join(base_dir, 'scripts',
                                             script), base_dir)
                hook_dir = None
            else:
                if command_name == self.CUSTOM_COMMAND_COMMAND:
                    command_name = command['hostLevelParams']['custom_command']

                # forces a hash challenge on the directories to keep them updated, even
                # if the return type is not used
                self.file_cache.get_host_scripts_base_dir(server_url_prefix)
                hook_dir = self.file_cache.get_hook_base_dir(
                    command, server_url_prefix)
                base_dir = self.file_cache.get_service_base_dir(
                    command, server_url_prefix)

                script_path = self.resolve_script_path(base_dir, script)
                script_tuple = (script_path, base_dir)

            tmpstrucoutfile = os.path.join(
                self.tmp_dir, "structured-out-{0}.json".format(task_id))

            # We don't support anything else yet
            if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
                message = "Unknown script type {0}".format(script_type)
                raise AgentException(message)

            # Execute command using proper interpreter
            handle = None
            if command.has_key('__handle'):
                handle = command['__handle']
                handle.on_background_command_started = self.map_task_to_process
                del command['__handle']

            json_path = self.dump_command_to_json(command)
            pre_hook_tuple = self.resolve_hook_script_path(
                hook_dir, self.PRE_HOOK_PREFIX, command_name, script_type)
            post_hook_tuple = self.resolve_hook_script_path(
                hook_dir, self.POST_HOOK_PREFIX, command_name, script_type)
            py_file_list = [pre_hook_tuple, script_tuple, post_hook_tuple]
            # filter None values
            filtered_py_file_list = [i for i in py_file_list if i]

            logger_level = logging.getLevelName(logger.level)

            # Executing hooks and script
            ret = None
            from ActionQueue import ActionQueue
            if command.has_key('commandType') and command[
                    'commandType'] == ActionQueue.BACKGROUND_EXECUTION_COMMAND and len(
                        filtered_py_file_list) > 1:
                raise AgentException(
                    "Background commands are supported without hooks only")

            for py_file, current_base_dir in filtered_py_file_list:
                script_params = [command_name, json_path, current_base_dir]
                ret = self.python_executor.run_file(py_file,
                                                    script_params,
                                                    self.exec_tmp_dir,
                                                    tmpoutfile,
                                                    tmperrfile,
                                                    timeout,
                                                    tmpstrucoutfile,
                                                    logger_level,
                                                    self.map_task_to_process,
                                                    task_id,
                                                    override_output_files,
                                                    handle=handle)
                # Next run_file() invocations should always append to current output
                override_output_files = False
                if ret['exitcode'] != 0:
                    break

            if not ret:  # Something went wrong
                raise AgentException("No script has been executed")

            # if canceled and not background command
            if handle is None:
                cancel_reason = self.command_canceled_reason(task_id)
                if cancel_reason:
                    ret['stdout'] += cancel_reason
                    ret['stderr'] += cancel_reason

                    with open(tmpoutfile, "a") as f:
                        f.write(cancel_reason)
                    with open(tmperrfile, "a") as f:
                        f.write(cancel_reason)

        except Exception, e:  # We do not want to let agent fail completely
            exc_type, exc_obj, exc_tb = sys.exc_info()
            message = "Caught an exception while executing "\
              "custom service command: {0}: {1}; {2}".format(exc_type, exc_obj, str(e))
            logger.exception(message)
            ret = {
                'stdout': message,
                'stderr': message,
                'structuredOut': '{}',
                'exitcode': 1,
            }
        return ret
  def test_cancel_backgound_command(self, read_stack_version_mock, resolve_hook_script_path_mock,
                                    resolve_script_path_mock, FileCache_mock, kill_process_with_children_mock,
                                    get_py_executor_mock):
    FileCache_mock.return_value = None
    FileCache_mock.cache_dir = MagicMock()
    resolve_hook_script_path_mock.return_value = None
#     shell.kill_process_with_children = MagicMock()
    dummy_controller = MagicMock()
    cfg = AmbariConfig()
    cfg.set('agent', 'tolerate_download_failures', 'true')
    cfg.set('agent', 'prefix', '.')
    cfg.set('agent', 'cache_dir', 'background_tasks')

    actionQueue = ActionQueue(cfg, dummy_controller)

    dummy_controller.actionQueue = actionQueue
    orchestrator = CustomServiceOrchestrator(cfg, dummy_controller)
    orchestrator.file_cache = MagicMock()
    def f (a, b):
      return ""
    orchestrator.file_cache.get_service_base_dir = f
    actionQueue.customServiceOrchestrator = orchestrator

    import TestActionQueue
    import copy

    pyex = PythonExecutor(actionQueue.customServiceOrchestrator.tmp_dir, actionQueue.customServiceOrchestrator.config)
    TestActionQueue.patch_output_file(pyex)
    pyex.prepare_process_result = MagicMock()
    get_py_executor_mock.return_value = pyex
    orchestrator.dump_command_to_json = MagicMock()

    lock = threading.RLock()
    complete_done = threading.Condition(lock)

    complete_was_called = {}
    def command_complete_w(process_condenced_result, handle):
      with lock:
        complete_was_called['visited']= ''
        complete_done.wait(3)

    actionQueue.on_background_command_complete_callback = TestActionQueue.wraped(actionQueue.on_background_command_complete_callback, command_complete_w, None)
    execute_command = copy.deepcopy(TestActionQueue.TestActionQueue.background_command)
    actionQueue.put([execute_command])
    actionQueue.processBackgroundQueueSafeEmpty()

    time.sleep(.1)

    orchestrator.cancel_command(19,'')
    self.assertTrue(kill_process_with_children_mock.called)
    kill_process_with_children_mock.assert_called_with(33)

    with lock:
      complete_done.notifyAll()

    with lock:
      self.assertTrue(complete_was_called.has_key('visited'))

    time.sleep(.1)

    runningCommand = actionQueue.commandStatuses.get_command_status(19)
    self.assertTrue(runningCommand is not None)
    self.assertEqual(runningCommand['status'], ActionQueue.FAILED_STATUS)
Пример #50
0
class CustomServiceOrchestrator():
    """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

    SCRIPT_TYPE_PYTHON = "PYTHON"
    COMMAND_NAME_STATUS = "STATUS"
    LIVE_STATUS = "STARTED"
    DEAD_STATUS = "INSTALLED"

    def __init__(self, config, controller, agentToggleLogger):
        self.config = config
        self.controller = controller
        self.tmp_dir = config.getResolvedPath(AgentConfig.APP_TASK_DIR)
        self.python_executor = PythonExecutor(self.tmp_dir, config,
                                              agentToggleLogger)
        self.status_commands_stdout = os.path.realpath(
            posixpath.join(self.tmp_dir, 'status_command_stdout.txt'))
        self.status_commands_stderr = os.path.realpath(
            posixpath.join(self.tmp_dir, 'status_command_stderr.txt'))
        self.public_fqdn = hostname.public_hostname()
        self.stored_command = {}
        self.allocated_ports = {}
        self.log_folders = {}
        # Clean up old status command files if any
        try:
            os.unlink(self.status_commands_stdout)
            os.unlink(self.status_commands_stderr)
        except OSError:
            pass  # Ignore fail
        self.base_dir = os.path.realpath(
            posixpath.join(config.getResolvedPath(AgentConfig.APP_PACKAGE_DIR),
                           "package"))

    def runCommand(self,
                   command,
                   tmpoutfile,
                   tmperrfile,
                   override_output_files=True,
                   store_command=False):
        allocated_ports = {}
        try:
            py_file_list = []
            json_path = None

            script_type = command['commandParams']['script_type']
            task_id = command['taskId']
            command_name = command['roleCommand']
            # transform upgrade specific command names
            if command_name == 'UPGRADE':
                command_name = 'PRE_UPGRADE'
            if command_name == 'UPGRADE_STOP':
                command_name = 'STOP'

            tmpstrucoutfile = os.path.realpath(
                posixpath.join(self.tmp_dir,
                               "structured-out-{0}.json".format(task_id)))
            if script_type.upper() == self.SCRIPT_TYPE_PYTHON:
                script = command['commandParams']['script']
                timeout = int(command['commandParams']['command_timeout'])
                script_path = ''
                if 'package' in command:
                    add_on_dir_str = (self.config.getWorkRootPath() + "/" +
                                      AgentConfig.ADDON_PKG_ROOT_DIR +
                                      "/application.addon." +
                                      command['package'])
                    command['commandParams'][
                        'addonPackageRoot'] = add_on_dir_str
                    add_on_base_dir = os.path.realpath(
                        posixpath.join(add_on_dir_str, "package"))
                    logger.info("Add on package: %s, add on base dir: %s" %
                                (command['package'], str(add_on_base_dir)))
                    script_path = self.resolve_script_path(
                        add_on_base_dir, script, script_type)
                else:
                    self.base_dir = os.path.realpath(
                        posixpath.join(
                            self.config.getResolvedPath(
                                AgentConfig.APP_PACKAGE_DIR), "package"))
                    logger.debug("Base dir: " + str(self.base_dir))
                    script_path = self.resolve_script_path(
                        self.base_dir, script, script_type)
                script_tuple = (script_path, self.base_dir)
                py_file_list = [script_tuple]

                json_path = self.dump_command_to_json(command, allocated_ports,
                                                      store_command)
            elif script_type.upper() == "SHELL":
                timeout = int(command['commandParams']['command_timeout'])

                json_path = self.dump_command_to_json(command, allocated_ports,
                                                      store_command)
                script_path = os.path.realpath(
                    posixpath.join(self.config.getWorkRootPath(), "infra",
                                   "agent", "slider-agent", "scripts",
                                   "shell_cmd", "basic_installer.py"))
                script_tuple = (script_path, self.base_dir)
                py_file_list = [script_tuple]
            else:
                # We don't support anything else yet
                message = "Unknown script type {0}".format(script_type)
                raise AgentException(message)

            # filter None values
            filtered_py_file_list = [i for i in py_file_list if i]
            logger_level = logging.getLevelName(logger.level)

            # Executing hooks and script
            ret = None
            for py_file, current_base_dir in filtered_py_file_list:
                script_params = [command_name, json_path, current_base_dir]
                python_paths = [
                    os.path.realpath(
                        posixpath.join(self.config.getWorkRootPath(), "infra",
                                       "agent", "slider-agent", "jinja2")),
                    os.path.realpath(
                        posixpath.join(self.config.getWorkRootPath(), "infra",
                                       "agent", "slider-agent"))
                ]
                if platform.system() != "Windows":
                    environment_vars = [("PYTHONPATH", ":".join(python_paths))]
                else:
                    environment_vars = [("PYTHONPATH", ";".join(python_paths))]

                ret = self.python_executor.run_file(py_file, script_params,
                                                    tmpoutfile, tmperrfile,
                                                    timeout, tmpstrucoutfile,
                                                    logger_level,
                                                    override_output_files,
                                                    environment_vars)
                # Next run_file() invocations should always append to current output
                override_output_files = False
                if ret[Constants.EXIT_CODE] != 0:
                    break

            if not ret:  # Something went wrong
                raise AgentException("No script has been executed")

        except Exception:  # We do not want to let agent fail completely
            exc_type, exc_obj, exc_tb = sys.exc_info()
            message = "Caught an exception while executing " \
                      "command: {0}: {1}".format(exc_type, exc_obj)
            logger.exception(message)
            ret = {
                'stdout': message,
                'stderr': message,
                'structuredOut': '{}',
                Constants.EXIT_CODE: 1,
            }

        if Constants.EXIT_CODE in ret and ret[Constants.EXIT_CODE] == 0:
            ret[Constants.ALLOCATED_PORTS] = copy.deepcopy(allocated_ports)
            ## Generally all ports are allocated at once but just in case
            self.allocated_ports.update(allocated_ports)

        # Irrespective of the outcome report the folder paths
        if command_name == 'INSTALL':
            self.log_folders = {
                Constants.AGENT_LOG_ROOT: self.config.getLogPath(),
                Constants.AGENT_WORK_ROOT: self.config.getWorkRootPath()
            }
            ret[Constants.FOLDERS] = copy.deepcopy(self.log_folders)
        return ret

    def resolve_script_path(self, base_dir, script, script_type):
        """
    Encapsulates logic of script location determination.
    """
        path = os.path.realpath(posixpath.join(base_dir, script))
        if not os.path.exists(path):
            message = "Script {0} does not exist".format(path)
            raise AgentException(message)
        return path

    def getConfig(self, command):
        if 'configurations' in self.stored_command:
            if 'commandParams' in command and 'config_type' in command[
                    'commandParams']:
                config_type = command['commandParams']['config_type']
                logger.info("Requesting applied config for type {0}".format(
                    config_type))
                if config_type in self.stored_command['configurations']:
                    return {
                        'configurations': {
                            config_type:
                            self.stored_command['configurations'][config_type]
                        }
                    }
                else:
                    return {'configurations': {}}
                pass
            else:
                logger.info("Requesting all applied config.")
                return {
                    'configurations': self.stored_command['configurations']
                }
            pass
        else:
            return {'configurations': {}}
        pass

    def requestComponentStatus(self, command):
        """
     Component status is determined by exit code, returned by runCommand().
     Exit code 0 means that component is running and any other exit code means that
     component is not running
    """
        override_output_files = True  # by default, we override status command output
        if logger.level == logging.DEBUG:
            override_output_files = False

        if command['roleCommand'] == "GET_CONFIG":
            return self.getConfig(command)

        else:
            res = self.runCommand(command,
                                  self.status_commands_stdout,
                                  self.status_commands_stderr,
                                  override_output_files=override_output_files)
            if res[Constants.EXIT_CODE] == 0:
                res[Constants.
                    EXIT_CODE] = CustomServiceOrchestrator.LIVE_STATUS
            else:
                res[Constants.
                    EXIT_CODE] = CustomServiceOrchestrator.DEAD_STATUS

            return res
        pass

    def dump_command_to_json(self,
                             command,
                             allocated_ports,
                             store_command=False):
        """
    Converts command to json file and returns file path
    """
        # Perform few modifications to stay compatible with the way in which
        # site.pp files are generated by manifestGenerator.py
        command['public_hostname'] = self.public_fqdn
        if 'hostname' in command:
            command['appmaster_hostname'] = command['hostname']
        command['hostname'] = self.public_fqdn

        # Now, dump the json file
        command_type = command['commandType']
        from ActionQueue import ActionQueue  # To avoid cyclic dependency

        if command_type == ActionQueue.STATUS_COMMAND:
            # These files are frequently created, thats why we don't
            # store them all, but only the latest one
            file_path = os.path.realpath(
                posixpath.join(self.tmp_dir, "status_command.json"))
        else:
            task_id = command['taskId']
            file_path = os.path.realpath(
                posixpath.join(self.tmp_dir,
                               "command-{0}.json".format(task_id)))
            # Json may contain passwords, that's why we need proper permissions
        if os.path.isfile(file_path) and os.path.exists(file_path):
            os.unlink(file_path)

        self.finalize_command(command, store_command, allocated_ports)
        self.finalize_exec_command(command)

        with os.fdopen(os.open(file_path, os.O_WRONLY | os.O_CREAT, 0644),
                       'w') as f:
            content = json.dumps(command, sort_keys=False, indent=4)
            f.write(content)
        return file_path
Пример #51
0
class CustomServiceOrchestrator():
  """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

  SCRIPT_TYPE_PYTHON = "PYTHON"
  COMMAND_NAME_STATUS = "STATUS"
  LIVE_STATUS = "STARTED"
  DEAD_STATUS = "INSTALLED"

  def __init__(self, config, controller):
    self.config = config
    self.tmp_dir = config.getResolvedPath(AgentConfig.APP_TASK_DIR)
    self.python_executor = PythonExecutor(self.tmp_dir, config)
    self.status_commands_stdout = os.path.join(self.tmp_dir,
                                               'status_command_stdout.txt')
    self.status_commands_stderr = os.path.join(self.tmp_dir,
                                               'status_command_stderr.txt')
    self.public_fqdn = hostname.public_hostname()
    self.applied_configs = {}
    # Clean up old status command files if any
    try:
      os.unlink(self.status_commands_stdout)
      os.unlink(self.status_commands_stderr)
    except OSError:
      pass # Ignore fail
    self.base_dir = os.path.join(
      config.getResolvedPath(AgentConfig.APP_PACKAGE_DIR), "package")


  def runCommand(self, command, tmpoutfile, tmperrfile,
                 override_output_files=True, store_config=False):
    try:
      script_type = command['commandParams']['script_type']
      script = command['commandParams']['script']
      timeout = int(command['commandParams']['command_timeout'])
      task_id = command['taskId']
      command_name = command['roleCommand']

      script_path = self.resolve_script_path(self.base_dir, script, script_type)
      script_tuple = (script_path, self.base_dir)

      tmpstrucoutfile = os.path.join(self.tmp_dir,
                                     "structured-out-{0}.json".format(task_id))
      if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
      # We don't support anything else yet
        message = "Unknown script type {0}".format(script_type)
        raise AgentException(message)
        # Execute command using proper interpreter
      json_path = self.dump_command_to_json(command, store_config)
      py_file_list = [script_tuple]
      # filter None values
      filtered_py_file_list = [i for i in py_file_list if i]

      # Executing hooks and script
      ret = None
      for py_file, current_base_dir in filtered_py_file_list:
        script_params = [command_name, json_path, current_base_dir]
        python_paths = [os.path.join(self.config.getWorkRootPath(),
                                     "infra/agent/slider-agent/jinja2"),
                        os.path.join(self.config.getWorkRootPath(),
                                     "infra/agent/slider-agent")]
        environment_vars = [("PYTHONPATH", ":".join(python_paths))]
        ret = self.python_executor.run_file(py_file, script_params,
                                            tmpoutfile, tmperrfile, timeout,
                                            tmpstrucoutfile,
                                            override_output_files,
                                            environment_vars)
        # Next run_file() invocations should always append to current output
        override_output_files = False
        if ret['exitcode'] != 0:
          break

      if not ret: # Something went wrong
        raise AgentException("No script has been executed")

    except Exception: # We do not want to let agent fail completely
      exc_type, exc_obj, exc_tb = sys.exc_info()
      message = "Caught an exception while executing " \
                "command: {0}: {1}".format(exc_type, exc_obj)
      logger.exception(message)
      ret = {
        'stdout': message,
        'stderr': message,
        'structuredOut': '{}',
        'exitcode': 1,
      }

    return ret


  def resolve_script_path(self, base_dir, script, script_type):
    """
    Encapsulates logic of script location determination.
    """
    path = os.path.join(base_dir, script)
    if not os.path.exists(path):
      message = "Script {0} does not exist".format(path)
      raise AgentException(message)
    return path

  def requestComponentStatus(self, command):
    """
     Component status is determined by exit code, returned by runCommand().
     Exit code 0 means that component is running and any other exit code means that
     component is not running
    """
    override_output_files = True # by default, we override status command output
    if logger.level == logging.DEBUG:
      override_output_files = False

    if command['roleCommand'] == "GET_CONFIG":
      logger.info("Requesting applied config ...")
      return {
        'configurations': self.applied_configs
      }

    else:
      res = self.runCommand(command, self.status_commands_stdout,
                            self.status_commands_stderr,
                            override_output_files=override_output_files)
      if res['exitcode'] == 0:
        res['exitcode'] = CustomServiceOrchestrator.LIVE_STATUS
      else:
        res['exitcode'] = CustomServiceOrchestrator.DEAD_STATUS

      return res
    pass

  def dump_command_to_json(self, command, store_config=False):
    """
    Converts command to json file and returns file path
    """
    # Perform few modifications to stay compatible with the way in which
    # site.pp files are generated by manifestGenerator.py
    public_fqdn = self.public_fqdn
    command['public_hostname'] = public_fqdn
    # Now, dump the json file
    command_type = command['commandType']
    from ActionQueue import ActionQueue  # To avoid cyclic dependency

    if command_type == ActionQueue.STATUS_COMMAND:
      # These files are frequently created, thats why we don't
      # store them all, but only the latest one
      file_path = os.path.join(self.tmp_dir, "status_command.json")
    else:
      task_id = command['taskId']
      file_path = os.path.join(self.tmp_dir, "command-{0}.json".format(task_id))
      # Json may contain passwords, that's why we need proper permissions
    if os.path.isfile(file_path):
      os.unlink(file_path)

    self.finalize_command(command, store_config)

    with os.fdopen(os.open(file_path, os.O_WRONLY | os.O_CREAT,
                           0600), 'w') as f:
      content = json.dumps(command, sort_keys=False, indent=4)
      f.write(content)
    return file_path