예제 #1
0
 def test_get_service_base_dir(self, isdir_mock):
     fileCache = FileCache(self.config)
     isdir_mock.return_value = True
     base = fileCache.get_service_base_dir("HDP", "2.0.7", "HBASE",
                                           "REGION_SERVER")
     self.assertEqual(
         base,
         "/var/lib/ambari-agent/cache/stacks/HDP/2.0.7/services/HBASE")
예제 #2
0
 def test_get_service_base_dir(self, provide_directory_mock):
   provide_directory_mock.return_value = "dummy value"
   fileCache = FileCache(self.config)
   command = {
     'commandParams' : {
       'service_package_folder' : os.path.join('stacks', 'HDP', '2.1.1', 'services', 'ZOOKEEPER', 'package')
     }
   }
   res = fileCache.get_service_base_dir(command, "server_url_pref")
   self.assertEquals(
     pprint.pformat(provide_directory_mock.call_args_list[0][0]),
     "('/var/lib/ambari-agent/cache',\n "
     "{0},\n"
     " 'server_url_pref')".format(pprint.pformat(os.path.join('stacks', 'HDP', '2.1.1', 'services', 'ZOOKEEPER', 'package'))))
   self.assertEquals(res, "dummy value")
예제 #3
0
 def test_get_service_base_dir(self, provide_directory_mock):
   provide_directory_mock.return_value = "dummy value"
   fileCache = FileCache(self.config)
   command = {
     'commandParams' : {
       'service_package_folder' : 'HDP/2.1.1/services/ZOOKEEPER/package'
     }
   }
   res = fileCache.get_service_base_dir(command, "server_url_pref")
   self.assertEquals(
     pprint.pformat(provide_directory_mock.call_args_list[0][0]),
     "('/var/lib/ambari-agent/cache',\n "
     "'stacks/HDP/2.1.1/services/ZOOKEEPER/package',\n"
     " 'server_url_pref')")
   self.assertEquals(res, "dummy value")
class CustomServiceOrchestrator():
  """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

  SCRIPT_TYPE_PYTHON = "PYTHON"
  COMMAND_NAME_STATUS = "STATUS"
  COMMAND_NAME_SECURITY_STATUS = "SECURITY_STATUS"
  CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
  CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'

  PRE_HOOK_PREFIX="before"
  POST_HOOK_PREFIX="after"

  HOSTS_LIST_KEY = "all_hosts"
  PING_PORTS_KEY = "all_ping_ports"
  RACKS_KEY = "all_racks"
  IPV4_ADDRESSES_KEY = "all_ipv4_ips"

  AMBARI_SERVER_HOST = "ambari_server_host"
  DONT_DEBUG_FAILURES_FOR_COMMANDS = [COMMAND_NAME_SECURITY_STATUS, COMMAND_NAME_STATUS]
  REFLECTIVELY_RUN_COMMANDS = [COMMAND_NAME_SECURITY_STATUS, COMMAND_NAME_STATUS] # -- commands which run a lot and often (this increases their speed)

  def __init__(self, config, controller):
    self.config = config
    self.tmp_dir = config.get('agent', 'prefix')
    self.exec_tmp_dir = Constants.AGENT_TMP_DIR
    self.file_cache = FileCache(config)
    self.status_commands_stdout = os.path.join(self.tmp_dir,
                                               'status_command_stdout.txt')
    self.status_commands_stderr = os.path.join(self.tmp_dir,
                                               'status_command_stderr.txt')
    self.public_fqdn = hostname.public_hostname(config)
    # cache reset will be called on every agent registration
    controller.registration_listeners.append(self.file_cache.reset)

    # Clean up old status command files if any
    try:
      os.unlink(self.status_commands_stdout)
      os.unlink(self.status_commands_stderr)
    except OSError:
      pass # Ignore fail
    self.commands_in_progress_lock = threading.RLock()
    self.commands_in_progress = {}

  def map_task_to_process(self, task_id, processId):
    with self.commands_in_progress_lock:
      logger.debug('Maps taskId=%s to pid=%s' % (task_id, processId))
      self.commands_in_progress[task_id] = processId

  def cancel_command(self, task_id, reason):
    with self.commands_in_progress_lock:
      if task_id in self.commands_in_progress.keys():
        pid = self.commands_in_progress.get(task_id)
        self.commands_in_progress[task_id] = reason
        logger.info("Canceling command with task_id - {tid}, " \
                    "reason - {reason} . Killing process {pid}"
                    .format(tid=str(task_id), reason=reason, pid=pid))
        shell.kill_process_with_children(pid)
      else: 
        logger.warn("Unable to find pid by taskId = %s" % task_id)

  def get_py_executor(self, forced_command_name):
    """
    Wrapper for unit testing
    :return:
    """
    if forced_command_name in self.REFLECTIVELY_RUN_COMMANDS:
      return PythonReflectiveExecutor(self.tmp_dir, self.config)
    else:
      return PythonExecutor(self.tmp_dir, self.config)

  def runCommand(self, command, tmpoutfile, tmperrfile, forced_command_name=None,
                 override_output_files=True, retry=False):
    """
    forced_command_name may be specified manually. In this case, value, defined at
    command json, is ignored.
    """
    try:
      script_type = command['commandParams']['script_type']
      script = command['commandParams']['script']
      timeout = int(command['commandParams']['command_timeout'])

      if 'hostLevelParams' in command and 'jdk_location' in command['hostLevelParams']:
        server_url_prefix = command['hostLevelParams']['jdk_location']
      else:
        server_url_prefix = command['commandParams']['jdk_location']
        
      task_id = "status"
      
      try:
        task_id = command['taskId']
        command_name = command['roleCommand']
      except KeyError:
        pass  # Status commands have no taskId

      if forced_command_name is not None:  # If not supplied as an argument
        command_name = forced_command_name

      if command_name == self.CUSTOM_ACTION_COMMAND:
        base_dir = self.file_cache.get_custom_actions_base_dir(server_url_prefix)
        script_tuple = (os.path.join(base_dir, 'scripts', script), base_dir)
        hook_dir = None
      else:
        if command_name == self.CUSTOM_COMMAND_COMMAND:
          command_name = command['hostLevelParams']['custom_command']

        # forces a hash challenge on the directories to keep them updated, even
        # if the return type is not used
        self.file_cache.get_host_scripts_base_dir(server_url_prefix)          
        hook_dir = self.file_cache.get_hook_base_dir(command, server_url_prefix)
        base_dir = self.file_cache.get_service_base_dir(command, server_url_prefix)
        
        script_path = self.resolve_script_path(base_dir, script)
        script_tuple = (script_path, base_dir)

      tmpstrucoutfile = os.path.join(self.tmp_dir,
                                    "structured-out-{0}.json".format(task_id))

      # We don't support anything else yet
      if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
        message = "Unknown script type {0}".format(script_type)
        raise AgentException(message)

      # Execute command using proper interpreter
      handle = None
      if command.has_key('__handle'):
        handle = command['__handle']
        handle.on_background_command_started = self.map_task_to_process
        del command['__handle']

      json_path = self.dump_command_to_json(command, retry)
      pre_hook_tuple = self.resolve_hook_script_path(hook_dir,
          self.PRE_HOOK_PREFIX, command_name, script_type)
      post_hook_tuple = self.resolve_hook_script_path(hook_dir,
          self.POST_HOOK_PREFIX, command_name, script_type)
      py_file_list = [pre_hook_tuple, script_tuple, post_hook_tuple]
      # filter None values
      filtered_py_file_list = [i for i in py_file_list if i]

      logger_level = logging.getLevelName(logger.level)

      # Executing hooks and script
      ret = None
      from ActionQueue import ActionQueue
      if command.has_key('commandType') and command['commandType'] == ActionQueue.BACKGROUND_EXECUTION_COMMAND and len(filtered_py_file_list) > 1:
        raise AgentException("Background commands are supported without hooks only")

      python_executor = self.get_py_executor(forced_command_name)
      for py_file, current_base_dir in filtered_py_file_list:
        log_info_on_failure = not command_name in self.DONT_DEBUG_FAILURES_FOR_COMMANDS
        script_params = [command_name, json_path, current_base_dir, tmpstrucoutfile, logger_level, self.exec_tmp_dir]
        ret = python_executor.run_file(py_file, script_params,
                               tmpoutfile, tmperrfile, timeout,
                               tmpstrucoutfile, self.map_task_to_process,
                               task_id, override_output_files, handle = handle, log_info_on_failure=log_info_on_failure)
        # Next run_file() invocations should always append to current output
        override_output_files = False
        if ret['exitcode'] != 0:
          break

      if not ret: # Something went wrong
        raise AgentException("No script has been executed")

      # if canceled and not background command
      if handle is None:
        cancel_reason = self.command_canceled_reason(task_id)
        if cancel_reason:
          ret['stdout'] += cancel_reason
          ret['stderr'] += cancel_reason

          with open(tmpoutfile, "a") as f:
            f.write(cancel_reason)
          with open(tmperrfile, "a") as f:
            f.write(cancel_reason)

    except Exception, e: # We do not want to let agent fail completely
      exc_type, exc_obj, exc_tb = sys.exc_info()
      message = "Caught an exception while executing "\
        "custom service command: {0}: {1}; {2}".format(exc_type, exc_obj, str(e))
      logger.exception(message)
      ret = {
        'stdout' : message,
        'stderr' : message,
        'structuredOut' : '{}',
        'exitcode': 1,
      }
    return ret
예제 #5
0
class CustomServiceOrchestrator():
    """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

    SCRIPT_TYPE_PYTHON = "PYTHON"
    COMMAND_NAME_STATUS = "STATUS"
    COMMAND_NAME_SECURITY_STATUS = "SECURITY_STATUS"
    CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
    CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'

    PRE_HOOK_PREFIX = "before"
    POST_HOOK_PREFIX = "after"

    HOSTS_LIST_KEY = "all_hosts"
    PING_PORTS_KEY = "all_ping_ports"
    AMBARI_SERVER_HOST = "ambari_server_host"

    def __init__(self, config, controller):
        self.config = config
        self.tmp_dir = config.get('agent', 'prefix')
        self.exec_tmp_dir = config.get('agent', 'tmp_dir')
        self.file_cache = FileCache(config)
        self.python_executor = PythonExecutor(self.tmp_dir, config)
        self.status_commands_stdout = os.path.join(
            self.tmp_dir, 'status_command_stdout.txt')
        self.status_commands_stderr = os.path.join(
            self.tmp_dir, 'status_command_stderr.txt')
        self.public_fqdn = hostname.public_hostname(config)
        # cache reset will be called on every agent registration
        controller.registration_listeners.append(self.file_cache.reset)

        # Clean up old status command files if any
        try:
            os.unlink(self.status_commands_stdout)
            os.unlink(self.status_commands_stderr)
        except OSError:
            pass  # Ignore fail
        self.commands_in_progress_lock = threading.RLock()
        self.commands_in_progress = {}

    def map_task_to_process(self, task_id, processId):
        with self.commands_in_progress_lock:
            logger.debug('Maps taskId=%s to pid=%s' % (task_id, processId))
            self.commands_in_progress[task_id] = processId

    def cancel_command(self, task_id, reason):
        with self.commands_in_progress_lock:
            if task_id in self.commands_in_progress.keys():
                pid = self.commands_in_progress.get(task_id)
                self.commands_in_progress[task_id] = reason
                logger.info("Canceling command with task_id - {tid}, " \
                            "reason - {reason} . Killing process {pid}"
                            .format(tid=str(task_id), reason=reason, pid=pid))
                shell.kill_process_with_children(pid)
            else:
                logger.warn("Unable to find pid by taskId = %s" % task_id)

    def runCommand(self,
                   command,
                   tmpoutfile,
                   tmperrfile,
                   forced_command_name=None,
                   override_output_files=True):
        """
    forced_command_name may be specified manually. In this case, value, defined at
    command json, is ignored.
    """
        try:
            script_type = command['commandParams']['script_type']
            script = command['commandParams']['script']
            timeout = int(command['commandParams']['command_timeout'])

            if 'hostLevelParams' in command and 'jdk_location' in command[
                    'hostLevelParams']:
                server_url_prefix = command['hostLevelParams']['jdk_location']
            else:
                server_url_prefix = command['commandParams']['jdk_location']

            task_id = "status"

            try:
                task_id = command['taskId']
                command_name = command['roleCommand']
            except KeyError:
                pass  # Status commands have no taskId

            if forced_command_name is not None:  # If not supplied as an argument
                command_name = forced_command_name

            if command_name == self.CUSTOM_ACTION_COMMAND:
                base_dir = self.file_cache.get_custom_actions_base_dir(
                    server_url_prefix)
                script_tuple = (os.path.join(base_dir, 'scripts',
                                             script), base_dir)
                hook_dir = None
            else:
                if command_name == self.CUSTOM_COMMAND_COMMAND:
                    command_name = command['hostLevelParams']['custom_command']

                # forces a hash challenge on the directories to keep them updated, even
                # if the return type is not used
                self.file_cache.get_host_scripts_base_dir(server_url_prefix)
                hook_dir = self.file_cache.get_hook_base_dir(
                    command, server_url_prefix)
                base_dir = self.file_cache.get_service_base_dir(
                    command, server_url_prefix)

                script_path = self.resolve_script_path(base_dir, script)
                script_tuple = (script_path, base_dir)

            tmpstrucoutfile = os.path.join(
                self.tmp_dir, "structured-out-{0}.json".format(task_id))

            # We don't support anything else yet
            if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
                message = "Unknown script type {0}".format(script_type)
                raise AgentException(message)

            # Execute command using proper interpreter
            handle = None
            if command.has_key('__handle'):
                handle = command['__handle']
                handle.on_background_command_started = self.map_task_to_process
                del command['__handle']

            json_path = self.dump_command_to_json(command)
            pre_hook_tuple = self.resolve_hook_script_path(
                hook_dir, self.PRE_HOOK_PREFIX, command_name, script_type)
            post_hook_tuple = self.resolve_hook_script_path(
                hook_dir, self.POST_HOOK_PREFIX, command_name, script_type)
            py_file_list = [pre_hook_tuple, script_tuple, post_hook_tuple]
            # filter None values
            filtered_py_file_list = [i for i in py_file_list if i]

            logger_level = logging.getLevelName(logger.level)

            # Executing hooks and script
            ret = None
            from ActionQueue import ActionQueue
            if command.has_key('commandType') and command[
                    'commandType'] == ActionQueue.BACKGROUND_EXECUTION_COMMAND and len(
                        filtered_py_file_list) > 1:
                raise AgentException(
                    "Background commands are supported without hooks only")

            for py_file, current_base_dir in filtered_py_file_list:
                script_params = [command_name, json_path, current_base_dir]
                ret = self.python_executor.run_file(py_file,
                                                    script_params,
                                                    self.exec_tmp_dir,
                                                    tmpoutfile,
                                                    tmperrfile,
                                                    timeout,
                                                    tmpstrucoutfile,
                                                    logger_level,
                                                    self.map_task_to_process,
                                                    task_id,
                                                    override_output_files,
                                                    handle=handle)
                # Next run_file() invocations should always append to current output
                override_output_files = False
                if ret['exitcode'] != 0:
                    break

            if not ret:  # Something went wrong
                raise AgentException("No script has been executed")

            # if canceled and not background command
            if handle is None:
                cancel_reason = self.command_canceled_reason(task_id)
                if cancel_reason:
                    ret['stdout'] += cancel_reason
                    ret['stderr'] += cancel_reason

                    with open(tmpoutfile, "a") as f:
                        f.write(cancel_reason)
                    with open(tmperrfile, "a") as f:
                        f.write(cancel_reason)

        except Exception, e:  # We do not want to let agent fail completely
            exc_type, exc_obj, exc_tb = sys.exc_info()
            message = "Caught an exception while executing "\
              "custom service command: {0}: {1}; {2}".format(exc_type, exc_obj, str(e))
            logger.exception(message)
            ret = {
                'stdout': message,
                'stderr': message,
                'structuredOut': '{}',
                'exitcode': 1,
            }
        return ret
예제 #6
0
class CustomServiceOrchestrator():
    """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

    SCRIPT_TYPE_PYTHON = "PYTHON"
    COMMAND_TYPE = "commandType"
    COMMAND_NAME_STATUS = "STATUS"
    COMMAND_NAME_SECURITY_STATUS = "SECURITY_STATUS"
    CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
    CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'

    PRE_HOOK_PREFIX = "before"
    POST_HOOK_PREFIX = "after"

    HOSTS_LIST_KEY = "all_hosts"
    PING_PORTS_KEY = "all_ping_ports"
    RACKS_KEY = "all_racks"
    IPV4_ADDRESSES_KEY = "all_ipv4_ips"

    AMBARI_SERVER_HOST = "ambari_server_host"
    AMBARI_SERVER_PORT = "ambari_server_port"
    AMBARI_SERVER_USE_SSL = "ambari_server_use_ssl"

    FREQUENT_COMMANDS = [COMMAND_NAME_SECURITY_STATUS, COMMAND_NAME_STATUS]
    DONT_DEBUG_FAILURES_FOR_COMMANDS = FREQUENT_COMMANDS
    REFLECTIVELY_RUN_COMMANDS = FREQUENT_COMMANDS  # -- commands which run a lot and often (this increases their speed)
    DONT_BACKUP_LOGS_FOR_COMMANDS = FREQUENT_COMMANDS

    # Path where hadoop credential JARS will be available
    DEFAULT_CREDENTIAL_SHELL_LIB_PATH = '/var/lib/ambari-agent/cred/lib'
    DEFAULT_CREDENTIAL_CONF_DIR = '/var/lib/ambari-agent/cred/conf'
    DEFAULT_CREDENTIAL_SHELL_CMD = 'org.apache.hadoop.security.alias.CredentialShell'

    # The property name used by the hadoop credential provider
    CREDENTIAL_PROVIDER_PROPERTY_NAME = 'hadoop.security.credential.provider.path'

    def __init__(self, config, controller):
        self.config = config
        self.tmp_dir = config.get('agent', 'prefix')
        self.force_https_protocol = config.get_force_https_protocol()
        self.exec_tmp_dir = Constants.AGENT_TMP_DIR
        self.file_cache = FileCache(config)
        self.status_commands_stdout = os.path.join(
            self.tmp_dir, 'status_command_stdout.txt')
        self.status_commands_stderr = os.path.join(
            self.tmp_dir, 'status_command_stderr.txt')
        self.public_fqdn = hostname.public_hostname(config)
        # cache reset will be called on every agent registration
        controller.registration_listeners.append(self.file_cache.reset)

        # Construct the hadoop credential lib JARs path
        self.credential_shell_lib_path = os.path.join(
            config.get('security', 'credential_lib_dir',
                       self.DEFAULT_CREDENTIAL_SHELL_LIB_PATH), '*')

        self.credential_conf_dir = config.get('security',
                                              'credential_conf_dir',
                                              self.DEFAULT_CREDENTIAL_CONF_DIR)

        self.credential_shell_cmd = config.get(
            'security', 'credential_shell_cmd',
            self.DEFAULT_CREDENTIAL_SHELL_CMD)

        # Clean up old status command files if any
        try:
            os.unlink(self.status_commands_stdout)
            os.unlink(self.status_commands_stderr)
        except OSError:
            pass  # Ignore fail
        self.commands_in_progress_lock = threading.RLock()
        self.commands_in_progress = {}

    def map_task_to_process(self, task_id, processId):
        with self.commands_in_progress_lock:
            logger.debug('Maps taskId=%s to pid=%s' % (task_id, processId))
            self.commands_in_progress[task_id] = processId

    def cancel_command(self, task_id, reason):
        with self.commands_in_progress_lock:
            if task_id in self.commands_in_progress.keys():
                pid = self.commands_in_progress.get(task_id)
                self.commands_in_progress[task_id] = reason
                logger.info("Canceling command with taskId = {tid}, " \
                            "reason - {reason} . Killing process {pid}"
                            .format(tid=str(task_id), reason=reason, pid=pid))
                shell.kill_process_with_children(pid)
            else:
                logger.warn(
                    "Unable to find process associated with taskId = %s" %
                    task_id)

    def get_py_executor(self, forced_command_name):
        """
    Wrapper for unit testing
    :return:
    """
        if forced_command_name in self.REFLECTIVELY_RUN_COMMANDS:
            return PythonReflectiveExecutor(self.tmp_dir, self.config)
        else:
            return PythonExecutor(self.tmp_dir, self.config)

    def getProviderDirectory(self, service_name):
        """
    Gets the path to the service conf folder where the JCEKS file will be created.

    :param service_name: Name of the service, for example, HIVE
    :return: lower case path to the service conf folder
    """

        # The stack definition scripts of the service can move the
        # JCEKS file around to where it wants, which is usually
        # /etc/<service_name>/conf

        conf_dir = os.path.join(self.credential_conf_dir, service_name.lower())
        if not os.path.exists(conf_dir):
            os.makedirs(conf_dir, 0644)

        return conf_dir

    def getConfigTypeCredentials(self, commandJson):
        """
    Gets the affected config types for the service in this command
    with the password aliases and values.

    Input:
    {
        "config-type1" : {
          "password_key_name1":"password_value_name1",
          "password_key_name2":"password_value_name2",
            :
        },
        "config-type2" : {
          "password_key_name1":"password_value_name1",
          "password_key_name2":"password_value_name2",
            :
        },
           :
    }

    Output:
    {
        "config-type1" : {
          "alias1":"password1",
          "alias2":"password2",
            :
        },
        "config-type2" : {
          "alias1":"password1",
          "alias2":"password2",
            :
        },
           :
    }

    If password_key_name is the same as password_value_name, then password_key_name is the password alias itself.
    The value it points to is the password value.

    If password_key_name is not the same as the password_value_name, then password_key_name points to the alias.
    The value is pointed to by password_value_name.

    For example:
    Input:
    {
      "oozie-site" : {"oozie.service.JPAService.jdbc.password" : "oozie.service.JPAService.jdbc.password"},
      "admin-properties" {"db_user":"******", "ranger.jpa.jdbc.credential.alias:ranger-admin-site" : "db_password"}
    }

    Output:
    {
      "oozie-site" : {"oozie.service.JPAService.jdbc.password" : "MyOozieJdbcPassword"},
      "admin-properties" {"rangerdba" : "MyRangerDbaPassword", "rangeradmin":"MyRangerDbaPassword"},
    }

    :param commandJson:
    :return:
    """
        configtype_credentials = {}
        if 'configuration_credentials' in commandJson:
            for config_type, password_properties in commandJson[
                    'configuration_credentials'].items():
                if config_type in commandJson['configurations']:
                    value_names = []
                    config = commandJson['configurations'][config_type]
                    credentials = {}
                    for key_name, value_name in password_properties.items():
                        if key_name == value_name:
                            if value_name in config:
                                # password name is the alias
                                credentials[key_name] = config[value_name]
                                value_names.append(
                                    value_name
                                )  # Gather the value_name for deletion
                        else:
                            keyname_keyconfig = key_name.split(':')
                            key_name = keyname_keyconfig[0]
                            # if the key is in another configuration (cross reference),
                            # get the value of the key from that configuration
                            if (len(keyname_keyconfig) > 1):
                                if keyname_keyconfig[1] not in commandJson[
                                        'configurations']:
                                    continue
                                key_config = commandJson['configurations'][
                                    keyname_keyconfig[1]]
                            else:
                                key_config = config
                            if key_name in key_config and value_name in config:
                                # password name points to the alias
                                credentials[
                                    key_config[key_name]] = config[value_name]
                                value_names.append(
                                    value_name
                                )  # Gather the value_name for deletion
                    if len(credentials) > 0:
                        configtype_credentials[config_type] = credentials
                    for value_name in value_names:
                        # Remove the clear text password
                        config.pop(value_name, None)
        return configtype_credentials

    def generateJceks(self, commandJson):
        """
    Generates the JCEKS file with passwords for the service specified in commandJson

    :param commandJson: command JSON
    :return: An exit value from the external process that generated the JCEKS file. None if
    there are no passwords in the JSON.
    """
        cmd_result = None
        roleCommand = None
        if 'roleCommand' in commandJson:
            roleCommand = commandJson['roleCommand']

        logger.info('generateJceks: roleCommand={0}'.format(roleCommand))

        # Set up the variables for the external command to generate a JCEKS file
        java_home = commandJson['hostLevelParams']['java_home']
        java_bin = '{java_home}/bin/java'.format(java_home=java_home)

        cs_lib_path = self.credential_shell_lib_path
        serviceName = commandJson['serviceName']

        # Gather the password values and remove them from the configuration
        provider_paths = []  # A service may depend on multiple configs
        configtype_credentials = self.getConfigTypeCredentials(commandJson)
        for config_type, credentials in configtype_credentials.items():
            config = commandJson['configurations'][config_type]
            file_path = os.path.join(self.getProviderDirectory(serviceName),
                                     "{0}.jceks".format(config_type))
            if os.path.exists(file_path):
                os.remove(file_path)
            provider_path = 'jceks://file{file_path}'.format(
                file_path=file_path)
            provider_paths.append(provider_path)
            logger.info('provider_path={0}'.format(provider_path))
            for alias, pwd in credentials.items():
                logger.debug("config={0}".format(config))
                protected_pwd = PasswordString(pwd)
                # Generate the JCEKS file
                cmd = (java_bin, '-cp', cs_lib_path, self.credential_shell_cmd,
                       'create', alias, '-value', protected_pwd, '-provider',
                       provider_path)
                logger.info(cmd)
                cmd_result = subprocess.call(cmd)
                logger.info('cmd_result = {0}'.format(cmd_result))
                os.chmod(
                    file_path, 0644
                )  # group and others should have read access so that the service user can read

        if provider_paths:
            # Add JCEKS provider paths instead
            config[self.CREDENTIAL_PROVIDER_PROPERTY_NAME] = ','.join(
                provider_paths)

        return cmd_result

    def runCommand(self,
                   command,
                   tmpoutfile,
                   tmperrfile,
                   forced_command_name=None,
                   override_output_files=True,
                   retry=False):
        """
    forced_command_name may be specified manually. In this case, value, defined at
    command json, is ignored.
    """
        try:
            script_type = command['commandParams']['script_type']
            script = command['commandParams']['script']
            timeout = int(command['commandParams']['command_timeout'])

            if 'hostLevelParams' in command and 'jdk_location' in command[
                    'hostLevelParams']:
                server_url_prefix = command['hostLevelParams']['jdk_location']
            else:
                server_url_prefix = command['commandParams']['jdk_location']

            task_id = "status"

            try:
                task_id = command['taskId']
                command_name = command['roleCommand']
            except KeyError:
                pass  # Status commands have no taskId

            if forced_command_name is not None:  # If not supplied as an argument
                command_name = forced_command_name

            if command_name == self.CUSTOM_ACTION_COMMAND:
                base_dir = self.file_cache.get_custom_actions_base_dir(
                    server_url_prefix)
                script_tuple = (os.path.join(base_dir, 'scripts',
                                             script), base_dir)
                hook_dir = None
            else:
                if command_name == self.CUSTOM_COMMAND_COMMAND:
                    command_name = command['hostLevelParams']['custom_command']

                # forces a hash challenge on the directories to keep them updated, even
                # if the return type is not used
                self.file_cache.get_host_scripts_base_dir(server_url_prefix)
                hook_dir = self.file_cache.get_hook_base_dir(
                    command, server_url_prefix)
                base_dir = self.file_cache.get_service_base_dir(
                    command, server_url_prefix)
                self.file_cache.get_custom_resources_subdir(
                    command, server_url_prefix)

                script_path = self.resolve_script_path(base_dir, script)
                script_tuple = (script_path, base_dir)

            tmpstrucoutfile = os.path.join(
                self.tmp_dir, "structured-out-{0}.json".format(task_id))

            # We don't support anything else yet
            if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
                message = "Unknown script type {0}".format(script_type)
                raise AgentException(message)

            # Execute command using proper interpreter
            handle = None
            if command.has_key('__handle'):
                handle = command['__handle']
                handle.on_background_command_started = self.map_task_to_process
                del command['__handle']

            # If command contains credentialStoreEnabled, then
            # generate the JCEKS file for the configurations.
            credentialStoreEnabled = False
            if 'credentialStoreEnabled' in command:
                credentialStoreEnabled = (
                    command['credentialStoreEnabled'] == "true")

            if credentialStoreEnabled == True:
                self.generateJceks(command)

            json_path = self.dump_command_to_json(command, retry)
            pre_hook_tuple = self.resolve_hook_script_path(
                hook_dir, self.PRE_HOOK_PREFIX, command_name, script_type)
            post_hook_tuple = self.resolve_hook_script_path(
                hook_dir, self.POST_HOOK_PREFIX, command_name, script_type)
            py_file_list = [pre_hook_tuple, script_tuple, post_hook_tuple]
            # filter None values
            filtered_py_file_list = [i for i in py_file_list if i]

            logger_level = logging.getLevelName(logger.level)

            # Executing hooks and script
            ret = None
            from ActionQueue import ActionQueue
            if command.has_key('commandType') and command[
                    'commandType'] == ActionQueue.BACKGROUND_EXECUTION_COMMAND and len(
                        filtered_py_file_list) > 1:
                raise AgentException(
                    "Background commands are supported without hooks only")

            python_executor = self.get_py_executor(forced_command_name)
            backup_log_files = not command_name in self.DONT_BACKUP_LOGS_FOR_COMMANDS
            log_out_files = self.config.get(
                "logging", "log_out_files", default="0") != "0"

            for py_file, current_base_dir in filtered_py_file_list:
                log_info_on_failure = not command_name in self.DONT_DEBUG_FAILURES_FOR_COMMANDS
                script_params = [
                    command_name, json_path, current_base_dir, tmpstrucoutfile,
                    logger_level, self.exec_tmp_dir, self.force_https_protocol
                ]

                if log_out_files:
                    script_params.append("-o")

                ret = python_executor.run_file(
                    py_file,
                    script_params,
                    tmpoutfile,
                    tmperrfile,
                    timeout,
                    tmpstrucoutfile,
                    self.map_task_to_process,
                    task_id,
                    override_output_files,
                    backup_log_files=backup_log_files,
                    handle=handle,
                    log_info_on_failure=log_info_on_failure)
                # Next run_file() invocations should always append to current output
                override_output_files = False
                if ret['exitcode'] != 0:
                    break

            if not ret:  # Something went wrong
                raise AgentException("No script has been executed")

            # if canceled and not background command
            if handle is None:
                cancel_reason = self.command_canceled_reason(task_id)
                if cancel_reason is not None:
                    ret['stdout'] += cancel_reason
                    ret['stderr'] += cancel_reason

                    with open(tmpoutfile, "a") as f:
                        f.write(cancel_reason)
                    with open(tmperrfile, "a") as f:
                        f.write(cancel_reason)

        except Exception, e:  # We do not want to let agent fail completely
            exc_type, exc_obj, exc_tb = sys.exc_info()
            message = "Caught an exception while executing "\
              "custom service command: {0}: {1}; {2}".format(exc_type, exc_obj, str(e))
            logger.exception(message)
            ret = {
                'stdout': message,
                'stderr': message,
                'structuredOut': '{}',
                'exitcode': 1,
            }
        return ret
예제 #7
0
class CustomServiceOrchestrator():
    """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

    SCRIPT_TYPE_PYTHON = "PYTHON"
    COMMAND_NAME_STATUS = "STATUS"
    CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
    CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'

    PRE_HOOK_PREFIX = "before"
    POST_HOOK_PREFIX = "after"

    HOSTS_LIST_KEY = "all_hosts"
    PING_PORTS_KEY = "all_ping_ports"
    AMBARI_SERVER_HOST = "ambari_server_host"

    def __init__(self, config, controller):
        self.config = config
        self.tmp_dir = config.get('agent', 'prefix')
        self.exec_tmp_dir = config.get('agent', 'tmp_dir')
        self.file_cache = FileCache(config)
        self.python_executor = PythonExecutor(self.tmp_dir, config)
        self.status_commands_stdout = os.path.join(
            self.tmp_dir, 'status_command_stdout.txt')
        self.status_commands_stderr = os.path.join(
            self.tmp_dir, 'status_command_stderr.txt')
        self.public_fqdn = hostname.public_hostname(config)
        # cache reset will be called on every agent registration
        controller.registration_listeners.append(self.file_cache.reset)

        # Clean up old status command files if any
        try:
            os.unlink(self.status_commands_stdout)
            os.unlink(self.status_commands_stderr)
        except OSError:
            pass  # Ignore fail
        self.commands_in_progress_lock = threading.RLock()
        self.commands_in_progress = {}

    def map_task_to_process(self, task_id, processId):
        with self.commands_in_progress_lock:
            logger.debug('Maps taskId=%s to pid=%s' % (task_id, processId))
            self.commands_in_progress[task_id] = processId

    def cancel_command(self, task_id, reason):
        with self.commands_in_progress_lock:
            if task_id in self.commands_in_progress.keys():
                pid = self.commands_in_progress.get(task_id)
                self.commands_in_progress[task_id] = reason
                logger.info("Canceling command with task_id - {tid}, " \
                            "reason - {reason} . Killing process {pid}"
                .format(tid = str(task_id), reason = reason, pid = pid))
                shell.kill_process_with_children(pid)
            else:
                logger.warn("Unable to find pid by taskId = %s" % task_id)

    def runCommand(self,
                   command,
                   tmpoutfile,
                   tmperrfile,
                   forced_command_name=None,
                   override_output_files=True):
        """
    forced_command_name may be specified manually. In this case, value, defined at
    command json, is ignored.
    """
        try:
            script_type = command['commandParams']['script_type']
            script = command['commandParams']['script']
            timeout = int(command['commandParams']['command_timeout'])

            if 'hostLevelParams' in command and 'jdk_location' in command[
                    'hostLevelParams']:
                server_url_prefix = command['hostLevelParams']['jdk_location']
            else:
                server_url_prefix = command['commandParams']['jdk_location']
            task_id = "status"
            try:
                task_id = command['taskId']
                command_name = command['roleCommand']
            except KeyError:
                pass  # Status commands have no taskId

            if forced_command_name is not None:  # If not supplied as an argument
                command_name = forced_command_name

            if command_name == self.CUSTOM_ACTION_COMMAND:
                base_dir = self.file_cache.get_custom_actions_base_dir(
                    server_url_prefix)
                script_tuple = (os.path.join(base_dir, script), base_dir)
                hook_dir = None
            else:
                if command_name == self.CUSTOM_COMMAND_COMMAND:
                    command_name = command['hostLevelParams']['custom_command']
                hook_dir = self.file_cache.get_hook_base_dir(
                    command, server_url_prefix)
                base_dir = self.file_cache.get_service_base_dir(
                    command, server_url_prefix)
                script_path = self.resolve_script_path(base_dir, script,
                                                       script_type)
                script_tuple = (script_path, base_dir)

            tmpstrucoutfile = os.path.join(
                self.tmp_dir, "structured-out-{0}.json".format(task_id))

            if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
                # We don't support anything else yet
                message = "Unknown script type {0}".format(script_type)
                raise AgentException(message)
            # Execute command using proper interpreter
            handle = None
            if (command.has_key('__handle')):
                handle = command['__handle']
                handle.on_background_command_started = self.map_task_to_process
                del command['__handle']

            json_path = self.dump_command_to_json(command)
            pre_hook_tuple = self.resolve_hook_script_path(
                hook_dir, self.PRE_HOOK_PREFIX, command_name, script_type)
            post_hook_tuple = self.resolve_hook_script_path(
                hook_dir, self.POST_HOOK_PREFIX, command_name, script_type)
            py_file_list = [pre_hook_tuple, script_tuple, post_hook_tuple]
            # filter None values
            filtered_py_file_list = [i for i in py_file_list if i]

            logger_level = logging.getLevelName(logger.level)

            # Executing hooks and script
            ret = None
            from ActionQueue import ActionQueue
            if (command.has_key('commandType') and command['commandType']
                    == ActionQueue.BACKGROUND_EXECUTION_COMMAND
                    and len(filtered_py_file_list) > 1):
                raise AgentException(
                    "Background commands are supported without hooks only")

            for py_file, current_base_dir in filtered_py_file_list:
                script_params = [command_name, json_path, current_base_dir]
                ret = self.python_executor.run_file(py_file,
                                                    script_params,
                                                    self.exec_tmp_dir,
                                                    tmpoutfile,
                                                    tmperrfile,
                                                    timeout,
                                                    tmpstrucoutfile,
                                                    logger_level,
                                                    self.map_task_to_process,
                                                    task_id,
                                                    override_output_files,
                                                    handle=handle)
                # Next run_file() invocations should always append to current output
                override_output_files = False
                if ret['exitcode'] != 0:
                    break

            if not ret:  # Something went wrong
                raise AgentException("No script has been executed")

            # if canceled and not background command
            if handle is None:
                cancel_reason = self.command_canceled_reason(task_id)
                if cancel_reason:
                    ret['stdout'] += cancel_reason
                    ret['stderr'] += cancel_reason

                    with open(tmpoutfile, "a") as f:
                        f.write(cancel_reason)
                    with open(tmperrfile, "a") as f:
                        f.write(cancel_reason)

        except Exception:  # We do not want to let agent fail completely
            exc_type, exc_obj, exc_tb = sys.exc_info()
            message = "Catched an exception while executing "\
              "custom service command: {0}: {1}".format(exc_type, exc_obj)
            logger.exception(message)
            ret = {
                'stdout': message,
                'stderr': message,
                'structuredOut': '{}',
                'exitcode': 1,
            }
        return ret

    def command_canceled_reason(self, task_id):
        with self.commands_in_progress_lock:
            if self.commands_in_progress.has_key(
                    task_id
            ):  #Background command do not push in this collection (TODO)
                logger.debug('Pop with taskId %s' % task_id)
                pid = self.commands_in_progress.pop(task_id)
                if not isinstance(pid, int):
                    return '\nCommand aborted. ' + pid
        return None

    def requestComponentStatus(self, command):
        """
     Component status is determined by exit code, returned by runCommand().
     Exit code 0 means that component is running and any other exit code means that
     component is not running
    """
        override_output_files = True  # by default, we override status command output
        if logger.level == logging.DEBUG:
            override_output_files = False
        res = self.runCommand(command,
                              self.status_commands_stdout,
                              self.status_commands_stderr,
                              self.COMMAND_NAME_STATUS,
                              override_output_files=override_output_files)
        return res

    def resolve_script_path(self, base_dir, script, script_type):
        """
    Incapsulates logic of script location determination.
    """
        path = os.path.join(base_dir, script)
        if not os.path.exists(path):
            message = "Script {0} does not exist".format(path)
            raise AgentException(message)
        return path

    def resolve_hook_script_path(self, stack_hooks_dir, prefix, command_name,
                                 script_type):
        """
    Returns a tuple(path to hook script, hook base dir) according to string prefix
    and command name. If script does not exist, returns None
    """
        if not stack_hooks_dir:
            return None
        hook_dir = "{0}-{1}".format(prefix, command_name)
        hook_base_dir = os.path.join(stack_hooks_dir, hook_dir)
        hook_script_path = os.path.join(hook_base_dir, "scripts", "hook.py")
        if not os.path.isfile(hook_script_path):
            logger.debug(
                "Hook script {0} not found, skipping".format(hook_script_path))
            return None
        return hook_script_path, hook_base_dir

    def dump_command_to_json(self, command):
        """
    Converts command to json file and returns file path
    """
        # Perform few modifications to stay compatible with the way in which
        public_fqdn = self.public_fqdn
        command['public_hostname'] = public_fqdn
        # Now, dump the json file
        command_type = command['commandType']
        from ActionQueue import ActionQueue  # To avoid cyclic dependency
        if command_type == ActionQueue.STATUS_COMMAND:
            # These files are frequently created, thats why we don't
            # store them all, but only the latest one
            file_path = os.path.join(self.tmp_dir, "status_command.json")
        else:
            task_id = command['taskId']
            if 'clusterHostInfo' in command and command['clusterHostInfo']:
                command['clusterHostInfo'] = self.decompressClusterHostInfo(
                    command['clusterHostInfo'])
            file_path = os.path.join(self.tmp_dir,
                                     "command-{0}.json".format(task_id))
        # Json may contain passwords, that's why we need proper permissions
        if os.path.isfile(file_path):
            os.unlink(file_path)
        with os.fdopen(os.open(file_path, os.O_WRONLY | os.O_CREAT, 0600),
                       'w') as f:
            content = json.dumps(command, sort_keys=False, indent=4)
            f.write(content)
        return file_path
예제 #8
0
class CustomServiceOrchestrator():
  """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

  SCRIPT_TYPE_PYTHON = "PYTHON"
  COMMAND_NAME_STATUS = "STATUS"
  CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
  CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'

  PRE_HOOK_PREFIX="before"
  POST_HOOK_PREFIX="after"

  def __init__(self, config, controller):
    self.config = config
    self.tmp_dir = config.get('agent', 'prefix')
    self.file_cache = FileCache(config)
    self.python_executor = PythonExecutor(self.tmp_dir, config)
    self.status_commands_stdout = os.path.join(self.tmp_dir,
                                               'status_command_stdout.txt')
    self.status_commands_stderr = os.path.join(self.tmp_dir,
                                               'status_command_stderr.txt')
    # cache reset will be called on every agent registration
    controller.registration_listeners.append(self.file_cache.reset)
    # Clean up old status command files if any
    try:
      os.unlink(self.status_commands_stdout)
      os.unlink(self.status_commands_stderr)
    except OSError:
      pass # Ignore fail


  def runCommand(self, command, tmpoutfile, tmperrfile, forsed_command_name = None,
                 override_output_files = True):
    """
    forsed_command_name may be specified manually. In this case, value, defined at
    command json, is ignored.
    """
    try:
      script_type = command['commandParams']['script_type']
      script = command['commandParams']['script']
      timeout = int(command['commandParams']['command_timeout'])
      server_url_prefix = command['hostLevelParams']['jdk_location']
      task_id = "status"
      try:
        task_id = command['taskId']
        command_name = command['roleCommand']
      except KeyError:
        pass # Status commands have no taskId

      if forsed_command_name is not None: # If not supplied as an argument
        command_name = forsed_command_name

      if command_name == self.CUSTOM_ACTION_COMMAND:
        base_dir = self.file_cache.get_custom_actions_base_dir(server_url_prefix)
        script_tuple = (os.path.join(base_dir, script) , base_dir)
        hook_dir = None
      else:
        if command_name == self.CUSTOM_COMMAND_COMMAND:
          command_name = command['hostLevelParams']['custom_command']
        hook_dir = self.file_cache.get_hook_base_dir(command, server_url_prefix)
        base_dir = self.file_cache.get_service_base_dir(command, server_url_prefix)
        script_path = self.resolve_script_path(base_dir, script, script_type)
        script_tuple = (script_path, base_dir)


      tmpstrucoutfile = os.path.join(self.tmp_dir,
                                    "structured-out-{0}.json".format(task_id))
      if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
      # We don't support anything else yet
        message = "Unknown script type {0}".format(script_type)
        raise AgentException(message)
      # Execute command using proper interpreter
      json_path = self.dump_command_to_json(command)
      pre_hook_tuple = self.resolve_hook_script_path(hook_dir,
          self.PRE_HOOK_PREFIX, command_name, script_type)
      post_hook_tuple = self.resolve_hook_script_path(hook_dir,
          self.POST_HOOK_PREFIX, command_name, script_type)
      py_file_list = [pre_hook_tuple, script_tuple, post_hook_tuple]
      # filter None values
      filtered_py_file_list = [i for i in py_file_list if i]

      # Executing hooks and script
      ret = None
      for py_file, current_base_dir in filtered_py_file_list:
        script_params = [command_name, json_path, current_base_dir]
        ret = self.python_executor.run_file(py_file, script_params,
                               tmpoutfile, tmperrfile, timeout,
                               tmpstrucoutfile, override_output_files)
        # Next run_file() invocations should always append to current output
        override_output_files = False
        if ret['exitcode'] != 0:
          break

      if not ret: # Something went wrong
        raise AgentException("No script has been executed")

    except Exception: # We do not want to let agent fail completely
      exc_type, exc_obj, exc_tb = sys.exc_info()
      message = "Catched an exception while executing "\
        "custom service command: {0}: {1}".format(exc_type, exc_obj)
      logger.exception(message)
      ret = {
        'stdout' : message,
        'stderr' : message,
        'structuredOut' : '{}',
        'exitcode': 1,
      }
    return ret


  def requestComponentStatus(self, command):
    """
     Component status is determined by exit code, returned by runCommand().
     Exit code 0 means that component is running and any other exit code means that
     component is not running
    """
    override_output_files=True # by default, we override status command output
    if logger.level == logging.DEBUG:
      override_output_files = False
    res = self.runCommand(command, self.status_commands_stdout,
                          self.status_commands_stderr, self.COMMAND_NAME_STATUS,
                          override_output_files=override_output_files)
    if res['exitcode'] == 0:
      return LiveStatus.LIVE_STATUS
    else:
      return LiveStatus.DEAD_STATUS


  def resolve_script_path(self, base_dir, script, script_type):
    """
    Incapsulates logic of script location determination.
    """
    path = os.path.join(base_dir, script)
    if not os.path.exists(path):
      message = "Script {0} does not exist".format(path)
      raise AgentException(message)
    return path


  def resolve_hook_script_path(self, stack_hooks_dir, prefix, command_name, script_type):
    """
    Returns a tuple(path to hook script, hook base dir) according to string prefix
    and command name. If script does not exist, returns None
    """
    if not stack_hooks_dir:
      return None
    hook_dir = "{0}-{1}".format(prefix, command_name)
    hook_base_dir = os.path.join(stack_hooks_dir, hook_dir)
    hook_script_path = os.path.join(hook_base_dir, "scripts", "hook.py")
    if not os.path.isfile(hook_script_path):
      logger.debug("Hook script {0} not found, skipping".format(hook_script_path))
      return None
    return hook_script_path, hook_base_dir


  def dump_command_to_json(self, command):
    """
    Converts command to json file and returns file path
    """
    # Perform few modifications to stay compatible with the way in which
    # site.pp files are generated by manifestGenerator.py
    public_fqdn = hostname.public_hostname()
    command['public_hostname'] = public_fqdn
    # Now, dump the json file
    command_type = command['commandType']
    from ActionQueue import ActionQueue  # To avoid cyclic dependency
    if command_type == ActionQueue.STATUS_COMMAND:
      # These files are frequently created, thats why we don't
      # store them all, but only the latest one
      file_path = os.path.join(self.tmp_dir, "status_command.json")
    else:
      task_id = command['taskId']
      command['clusterHostInfo'] = manifestGenerator.decompressClusterHostInfo(command['clusterHostInfo'])
      file_path = os.path.join(self.tmp_dir, "command-{0}.json".format(task_id))
    # Json may contain passwords, that's why we need proper permissions
    if os.path.isfile(file_path):
      os.unlink(file_path)
    with os.fdopen(os.open(file_path, os.O_WRONLY | os.O_CREAT,
                           0600), 'w') as f:
      content = json.dumps(command, sort_keys = False, indent = 4)
      f.write(content)
    return file_path
예제 #9
0
class CustomServiceOrchestrator():
  """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

  SCRIPT_TYPE_PYTHON = "PYTHON"
  COMMAND_NAME_STATUS = "STATUS"
  CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
  CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'

  PRE_HOOK_PREFIX="before"
  POST_HOOK_PREFIX="after"

  HOSTS_LIST_KEY = "all_hosts"
  PING_PORTS_KEY = "all_ping_ports"
  AMBARI_SERVER_HOST = "ambari_server_host"

  def __init__(self, config, controller):
    self.config = config
    self.tmp_dir = config.get('agent', 'prefix')
    self.exec_tmp_dir = config.get('agent', 'tmp_dir')
    self.file_cache = FileCache(config)
    self.python_executor = PythonExecutor(self.tmp_dir, config)
    self.status_commands_stdout = os.path.join(self.tmp_dir,
                                               'status_command_stdout.txt')
    self.status_commands_stderr = os.path.join(self.tmp_dir,
                                               'status_command_stderr.txt')
    self.public_fqdn = hostname.public_hostname(config)
    # cache reset will be called on every agent registration
    controller.registration_listeners.append(self.file_cache.reset)
    
    # Clean up old status command files if any
    try:
      os.unlink(self.status_commands_stdout)
      os.unlink(self.status_commands_stderr)
    except OSError:
      pass # Ignore fail
    self.commands_in_progress_lock = threading.RLock()
    self.commands_in_progress = {}

  def map_task_to_process(self, task_id, processId):
    with self.commands_in_progress_lock:
      logger.debug('Maps taskId=%s to pid=%s'%(task_id, processId))
      self.commands_in_progress[task_id] = processId

  def cancel_command(self, task_id, reason):
    with self.commands_in_progress_lock:
      if task_id in self.commands_in_progress.keys():
        pid = self.commands_in_progress.get(task_id)
        self.commands_in_progress[task_id] = reason
        logger.info("Canceling command with task_id - {tid}, " \
                    "reason - {reason} . Killing process {pid}"
        .format(tid = str(task_id), reason = reason, pid = pid))
        shell.kill_process_with_children(pid)
      else: 
        logger.warn("Unable to find pid by taskId = %s"%task_id)

  def runCommand(self, command, tmpoutfile, tmperrfile, forced_command_name = None,
                 override_output_files = True):
    """
    forced_command_name may be specified manually. In this case, value, defined at
    command json, is ignored.
    """
    try:
      script_type = command['commandParams']['script_type']
      script = command['commandParams']['script']
      timeout = int(command['commandParams']['command_timeout'])
      
      if 'hostLevelParams' in command and 'jdk_location' in command['hostLevelParams']:
        server_url_prefix = command['hostLevelParams']['jdk_location']
      else:
        server_url_prefix = command['commandParams']['jdk_location']
      task_id = "status"
      try:
        task_id = command['taskId']
        command_name = command['roleCommand']
      except KeyError:
        pass # Status commands have no taskId

      if forced_command_name is not None: # If not supplied as an argument
        command_name = forced_command_name

      if command_name == self.CUSTOM_ACTION_COMMAND:
        base_dir = self.file_cache.get_custom_actions_base_dir(server_url_prefix)
        script_tuple = (os.path.join(base_dir, script) , base_dir)
        hook_dir = None
      else:
        if command_name == self.CUSTOM_COMMAND_COMMAND:
          command_name = command['hostLevelParams']['custom_command']
        hook_dir = self.file_cache.get_hook_base_dir(command, server_url_prefix)
        base_dir = self.file_cache.get_service_base_dir(command, server_url_prefix)
        script_path = self.resolve_script_path(base_dir, script, script_type)
        script_tuple = (script_path, base_dir)

      tmpstrucoutfile = os.path.join(self.tmp_dir,
                                    "structured-out-{0}.json".format(task_id))

      if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
      # We don't support anything else yet
        message = "Unknown script type {0}".format(script_type)
        raise AgentException(message)
      # Execute command using proper interpreter
      handle = None
      if(command.has_key('__handle')):
        handle = command['__handle']
        handle.on_background_command_started = self.map_task_to_process
        del command['__handle']
      
      json_path = self.dump_command_to_json(command)
      pre_hook_tuple = self.resolve_hook_script_path(hook_dir,
          self.PRE_HOOK_PREFIX, command_name, script_type)
      post_hook_tuple = self.resolve_hook_script_path(hook_dir,
          self.POST_HOOK_PREFIX, command_name, script_type)
      py_file_list = [pre_hook_tuple, script_tuple, post_hook_tuple]
      # filter None values
      filtered_py_file_list = [i for i in py_file_list if i]

      logger_level = logging.getLevelName(logger.level)

      # Executing hooks and script
      ret = None
      from ActionQueue import ActionQueue
      if(command.has_key('commandType') and command['commandType'] == ActionQueue.BACKGROUND_EXECUTION_COMMAND and len(filtered_py_file_list) > 1):
        raise AgentException("Background commands are supported without hooks only")

      for py_file, current_base_dir in filtered_py_file_list:
        script_params = [command_name, json_path, current_base_dir]
        ret = self.python_executor.run_file(py_file, script_params,
                               self.exec_tmp_dir, tmpoutfile, tmperrfile, timeout,
                               tmpstrucoutfile, logger_level, self.map_task_to_process,
                               task_id, override_output_files, handle = handle)
        # Next run_file() invocations should always append to current output
        override_output_files = False
        if ret['exitcode'] != 0:
          break

      if not ret: # Something went wrong
        raise AgentException("No script has been executed")

      # if canceled and not background command
      if handle is None:
        cancel_reason = self.command_canceled_reason(task_id)
        if cancel_reason:
          ret['stdout'] += cancel_reason
          ret['stderr'] += cancel_reason
  
          with open(tmpoutfile, "a") as f:
            f.write(cancel_reason)
          with open(tmperrfile, "a") as f:
            f.write(cancel_reason)

    except Exception: # We do not want to let agent fail completely
      exc_type, exc_obj, exc_tb = sys.exc_info()
      message = "Catched an exception while executing "\
        "custom service command: {0}: {1}".format(exc_type, exc_obj)
      logger.exception(message)
      ret = {
        'stdout' : message,
        'stderr' : message,
        'structuredOut' : '{}',
        'exitcode': 1,
      }
    return ret
  def command_canceled_reason(self, task_id):
    with self.commands_in_progress_lock:
      if self.commands_in_progress.has_key(task_id):#Background command do not push in this collection (TODO)
        logger.debug('Pop with taskId %s' % task_id)
        pid = self.commands_in_progress.pop(task_id)
        if not isinstance(pid, int):
          return '\nCommand aborted. ' + pid
    return None
        
  def requestComponentStatus(self, command):
    """
     Component status is determined by exit code, returned by runCommand().
     Exit code 0 means that component is running and any other exit code means that
     component is not running
    """
    override_output_files=True # by default, we override status command output
    if logger.level == logging.DEBUG:
      override_output_files = False
    res = self.runCommand(command, self.status_commands_stdout,
                          self.status_commands_stderr, self.COMMAND_NAME_STATUS,
                          override_output_files=override_output_files)
    return res

  def resolve_script_path(self, base_dir, script, script_type):
    """
    Incapsulates logic of script location determination.
    """
    path = os.path.join(base_dir, script)
    if not os.path.exists(path):
      message = "Script {0} does not exist".format(path)
      raise AgentException(message)
    return path


  def resolve_hook_script_path(self, stack_hooks_dir, prefix, command_name, script_type):
    """
    Returns a tuple(path to hook script, hook base dir) according to string prefix
    and command name. If script does not exist, returns None
    """
    if not stack_hooks_dir:
      return None
    hook_dir = "{0}-{1}".format(prefix, command_name)
    hook_base_dir = os.path.join(stack_hooks_dir, hook_dir)
    hook_script_path = os.path.join(hook_base_dir, "scripts", "hook.py")
    if not os.path.isfile(hook_script_path):
      logger.debug("Hook script {0} not found, skipping".format(hook_script_path))
      return None
    return hook_script_path, hook_base_dir


  def dump_command_to_json(self, command):
    """
    Converts command to json file and returns file path
    """
    # Perform few modifications to stay compatible with the way in which
    public_fqdn = self.public_fqdn
    command['public_hostname'] = public_fqdn
    # Now, dump the json file
    command_type = command['commandType']
    from ActionQueue import ActionQueue  # To avoid cyclic dependency
    if command_type == ActionQueue.STATUS_COMMAND:
      # These files are frequently created, thats why we don't
      # store them all, but only the latest one
      file_path = os.path.join(self.tmp_dir, "status_command.json")
    else:
      task_id = command['taskId']
      if 'clusterHostInfo' in command and command['clusterHostInfo']:
        command['clusterHostInfo'] = self.decompressClusterHostInfo(command['clusterHostInfo'])
      file_path = os.path.join(self.tmp_dir, "command-{0}.json".format(task_id))
    # Json may contain passwords, that's why we need proper permissions
    if os.path.isfile(file_path):
      os.unlink(file_path)
    with os.fdopen(os.open(file_path, os.O_WRONLY | os.O_CREAT,
                           0600), 'w') as f:
      content = json.dumps(command, sort_keys = False, indent = 4)
      f.write(content)
    return file_path
class CustomServiceOrchestrator():
    """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

    SCRIPT_TYPE_PYTHON = "PYTHON"

    def __init__(self, config):
        self.config = config
        self.tmp_dir = config.get('agent', 'prefix')
        self.file_cache = FileCache(config)
        self.python_executor = PythonExecutor(self.tmp_dir, config)

    def runCommand(self, command, tmpoutfile, tmperrfile):
        try:
            component_name = command['role']
            stack_name = command['hostLevelParams']['stack_name']
            stack_version = command['hostLevelParams']['stack_version']
            script_type = command['commandParams']['script_type']
            script = command['commandParams']['script']
            command_name = command['roleCommand']
            timeout = int(command['commandParams']['command_timeout'])
            metadata_folder = command['commandParams'][
                'service_metadata_folder']
            base_dir = self.file_cache.get_service_base_dir(
                stack_name, stack_version, metadata_folder, component_name)
            script_path = self.resolve_script_path(base_dir, script,
                                                   script_type)
            if script_type.upper() == self.SCRIPT_TYPE_PYTHON:
                json_path = self.dump_command_to_json(command)
                script_params = [command_name, json_path, base_dir]
                ret = self.python_executor.run_file(script_path, script_params,
                                                    tmpoutfile, tmperrfile,
                                                    timeout)
            else:
                message = "Unknown script type {0}".format(script_type)
                raise AgentException(message)
        except Exception:  # We do not want to let agent fail completely
            exc_type, exc_obj, exc_tb = sys.exc_info()
            message = "Catched an exception while executing "\
              "custom service command: {0}: {1}".format(exc_type, exc_obj)
            logger.error(message)
            ret = {
                'stdout': message,
                'stderr': message,
                'exitcode': 1,
            }
        return ret

    def resolve_script_path(self, base_dir, script, script_type):
        """
    Incapsulates logic of script location determination.
    """
        path = os.path.join(base_dir, "package", script)
        if not os.path.exists(path):
            message = "Script {0} does not exist".format(path)
            raise AgentException(message)
        return path

    def dump_command_to_json(self, command):
        """
    Converts command to json file and returns file path
    """
        task_id = command['taskId']
        file_path = os.path.join(self.tmp_dir,
                                 "command-{0}.json".format(task_id))
        # Command json contains passwords, that's why we need proper permissions
        with os.fdopen(os.open(file_path, os.O_WRONLY | os.O_CREAT, 0600),
                       'w') as f:
            content = json.dumps(command, sort_keys=False, indent=4)
            f.write(content)
        return file_path
예제 #11
0
class CustomServiceOrchestrator():
    """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

    SCRIPT_TYPE_PYTHON = "PYTHON"

    def __init__(self, config):
        self.config = config
        self.tmp_dir = config.get('agent', 'prefix')
        self.file_cache = FileCache(config)
        self.python_executor = PythonExecutor(self.tmp_dir, config)

    def runCommand(self, command, tmpoutfile, tmperrfile):
        try:
            # TODO: Adjust variables
            service_name = command['serviceName']
            component_name = command['role']
            stack_name = command['stackName']  # TODO: add at the server side
            stack_version = command[
                'stackVersion']  # TODO: add at the server side
            script_type = command['scriptType']  # TODO: add at the server side
            script = command['script']
            command_name = command['roleCommand']
            timeout = int(command['timeout'])  # TODO: add at the server side
            base_dir = self.file_cache.get_service_base_dir(
                stack_name, stack_version, service_name, component_name)
            script_path = self.resolve_script_path(base_dir, script,
                                                   script_type)
            if script_type == self.SCRIPT_TYPE_PYTHON:
                json_path = self.dump_command_to_json(command)
                script_params = [command_name, json_path, base_dir]
                ret = self.python_executor.run_file(script_path, script_params,
                                                    tmpoutfile, tmperrfile,
                                                    timeout)
            else:
                message = "Unknown script type {0}".format(script_type)
                raise AgentException(message)
        except Exception:  # We do not want to let agent fail completely
            exc_type, exc_obj, exc_tb = sys.exc_info()
            message = "Catched an exception while executing "\
              "custom service command: {0}: {1}".format(exc_type, exc_obj)
            logger.error(message)
            ret = {
                'stdout': message,
                'stderr': message,
                'exitCode': 1,
            }
        return ret

    def resolve_script_path(self, base_dir, script, script_type):
        """
    Incapsulates logic of script location determination.
    """
        path = os.path.join(base_dir, "package", script)
        if not os.path.exists(path):
            message = "Script {0} does not exist".format(path)
            raise AgentException(message)
        return path

    def dump_command_to_json(self, command):
        """
    Converts command to json file and returns file path
    """
        command_id = command['commandId']
        file_path = os.path.join(self.tmp_dir,
                                 "command-{0}.json".format(command_id))
        with open(file_path, "w") as f:
            content = json.dumps(command)
            f.write(content)
        return file_path