示例#1
0
def namenode(action=None,
             hdfs_binary=None,
             do_format=True,
             upgrade_type=None,
             env=None):
    if action is None:
        raise Fail('"action" parameter is required for function namenode().')

    if action in ["start", "stop"] and hdfs_binary is None:
        raise Fail(
            '"hdfs_binary" parameter is required for function namenode().')

    if action == "configure":
        import params
        #we need this directory to be present before any action(HA manual steps for
        #additional namenode)
        create_name_dirs(params.dfs_name_dir)
    elif action == "start":
        Logger.info("Called service {0} with upgrade_type: {1}".format(
            action, str(upgrade_type)))
        setup_ranger_hdfs(upgrade_type=upgrade_type)
        import params
        if do_format and not params.hdfs_namenode_format_disabled:
            format_namenode()
            pass

        File(params.exclude_file_path,
             content=Template("exclude_hosts_list.j2"),
             owner=params.hdfs_user,
             group=params.user_group)

        if params.dfs_ha_enabled and \
          params.dfs_ha_namenode_standby is not None and \
          params.hostname == params.dfs_ha_namenode_standby:
            # if the current host is the standby NameNode in an HA deployment
            # run the bootstrap command, to start the NameNode in standby mode
            # this requires that the active NameNode is already up and running,
            # so this execute should be re-tried upon failure, up to a timeout
            success = bootstrap_standby_namenode(params)
            if not success:
                raise Fail("Could not bootstrap standby namenode")

        if upgrade_type == "rolling" and params.dfs_ha_enabled:
            # Most likely, ZKFC is up since RU will initiate the failover command. However, if that failed, it would have tried
            # to kill ZKFC manually, so we need to start it if not already running.
            safe_zkfc_op(action, env)

        options = ""
        if upgrade_type == "rolling":
            if params.upgrade_direction == Direction.UPGRADE:
                options = "-rollingUpgrade started"
            elif params.upgrade_direction == Direction.DOWNGRADE:
                options = "-rollingUpgrade downgrade"

        elif upgrade_type == "nonrolling":
            is_previous_image_dir = is_previous_fs_image()
            Logger.info(
                format(
                    "Previous file system image dir present is {is_previous_image_dir}"
                ))

            if params.upgrade_direction == Direction.UPGRADE:
                options = "-rollingUpgrade started"
            elif params.upgrade_direction == Direction.DOWNGRADE:
                options = "-rollingUpgrade downgrade"

        Logger.info(format("Option for start command: {options}"))

        service(action="start",
                name="namenode",
                user=params.hdfs_user,
                options=options,
                create_pid_dir=True,
                create_log_dir=True)

        if params.security_enabled:
            Execute(format(
                "{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"
            ),
                    user=params.hdfs_user)

        if params.dfs_ha_enabled:
            is_active_namenode_cmd = as_user(format(
                "{hdfs_binary} --config {hadoop_conf_dir} haadmin -getServiceState {namenode_id} | grep active"
            ),
                                             params.hdfs_user,
                                             env={
                                                 'PATH': params.hadoop_bin_dir
                                             })
        else:
            is_active_namenode_cmd = True

        # During NonRolling Upgrade, both NameNodes are initially down,
        # so no point in checking if this is the active or standby.
        if upgrade_type == "nonrolling":
            is_active_namenode_cmd = False

        # ___Scenario___________|_Expected safemode state__|_Wait for safemode OFF____|
        # no-HA                 | ON -> OFF                | Yes                      |
        # HA and active         | ON -> OFF                | Yes                      |
        # HA and standby        | no change                | no check                 |
        # RU with HA on active  | ON -> OFF                | Yes                      |
        # RU with HA on standby | ON -> OFF                | Yes                      |
        # EU with HA on active  | no change                | no check                 |
        # EU with HA on standby | no change                | no check                 |
        # EU non-HA             | no change                | no check                 |

        check_for_safemode_off = False
        msg = ""
        if params.dfs_ha_enabled:
            if upgrade_type is not None:
                check_for_safemode_off = True
                msg = "Must wait to leave safemode since High Availability is enabled during a Stack Upgrade"
            else:
                Logger.info("Wait for NameNode to become active.")
                if is_active_namenode(hdfs_binary):  # active
                    check_for_safemode_off = True
                    msg = "Must wait to leave safemode since High Availability is enabled and this is the Active NameNode."
                else:
                    msg = "Will remain in the current safemode state."
        else:
            msg = "Must wait to leave safemode since High Availability is not enabled."
            check_for_safemode_off = True

        Logger.info(msg)

        # During a NonRolling (aka Express Upgrade), stay in safemode since the DataNodes are down.
        stay_in_safe_mode = False
        if upgrade_type == "nonrolling":
            stay_in_safe_mode = True

        if check_for_safemode_off:
            Logger.info("Stay in safe mode: {0}".format(stay_in_safe_mode))
            if not stay_in_safe_mode:
                wait_for_safemode_off(hdfs_binary)

        # Always run this on non-HA, or active NameNode during HA.
        create_hdfs_directories(is_active_namenode_cmd)
        create_ranger_audit_hdfs_directories(is_active_namenode_cmd)

    elif action == "stop":
        import params
        service(action="stop", name="namenode", user=params.hdfs_user)
    elif action == "status":
        import status_params
        check_process_status(status_params.namenode_pid_file)
    elif action == "decommission":
        decommission()
示例#2
0
    try:
      is_package_install_successful = False
      ret_code = self.install_packages(package_list)
      if ret_code == 0:
        self.structured_output['package_installation_result'] = 'SUCCESS'
        self.put_structured_out(self.structured_output)
        is_package_install_successful = True
      else:
        num_errors += 1
    except Exception, err:
      num_errors += 1
      Logger.logger.exception("Could not install packages. Error: {0}".format(str(err)))

    # Provide correct exit code
    if num_errors > 0:
      raise Fail("Failed to distribute repositories/install packages")

    # if installing a version of HDP that needs some symlink love, then create them
    if is_package_install_successful and 'actual_version' in self.structured_output:
      self._create_config_links_if_necessary(stack_id, self.structured_output['actual_version'])


  def _create_config_links_if_necessary(self, stack_id, stack_version):
    """
    Sets up the required structure for /etc/<component>/conf symlinks and <stack-root>/current
    configuration symlinks IFF the current stack is < HDP 2.3+ and the new stack is >= HDP 2.3

    stack_id:  stack id, ie HDP-2.3
    stack_version:  version to set, ie 2.3.0.0-1234
    """
    if stack_id is None:
示例#3
0
  def actionexecute(self, env):
    num_errors = 0

    # Parse parameters
    config = Script.get_config()

    repo_rhel_suse = config['configurations']['cluster-env']['repo_suse_rhel_template']
    repo_ubuntu = config['configurations']['cluster-env']['repo_ubuntu_template']
    template = repo_rhel_suse if OSCheck.is_redhat_family() or OSCheck.is_suse_family() else repo_ubuntu

    # Handle a SIGTERM and SIGINT gracefully
    signal.signal(signal.SIGTERM, self.abort_handler)
    signal.signal(signal.SIGINT, self.abort_handler)

    self.repository_version_id = None

    base_urls = []
    # Select dict that contains parameters
    try:
      if 'base_urls' in config['roleParams']:
        base_urls = json.loads(config['roleParams']['base_urls'])

      self.repository_version = config['roleParams']['repository_version']
      package_list = json.loads(config['roleParams']['package_list'])
      stack_id = config['roleParams']['stack_id']

      if 'repository_version_id' in config['roleParams']:
        self.repository_version_id = config['roleParams']['repository_version_id']
    except KeyError:
      pass

    # current stack information
    self.current_stack_version_formatted = None
    if 'stack_version' in config['hostLevelParams']:
      current_stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
      self.current_stack_version_formatted = format_stack_version(current_stack_version_unformatted)


    self.stack_name = Script.get_stack_name()
    if self.stack_name is None:
      raise Fail("Cannot determine the stack name")

    self.stack_root_folder = Script.get_stack_root()
    if self.stack_root_folder is None:
      raise Fail("Cannot determine the stack's root directory")

    if self.repository_version is None:
      raise Fail("Cannot determine the repository version to install")

    self.repository_version = self.repository_version.strip()

    # Install/update repositories
    self.current_repositories = []
    self.current_repo_files = set()

    # Enable base system repositories
    # We don't need that for RHEL family, because we leave all repos enabled
    # except disabled HDP* ones
    if OSCheck.is_suse_family():
      self.current_repositories.append('base')
    elif OSCheck.is_ubuntu_family():
      self.current_repo_files.add('base')

    Logger.info("Will install packages for repository version {0}".format(self.repository_version))

    if 0 == len(base_urls):
      Logger.warning("Repository list is empty. Ambari may not be managing the repositories for {0}.".format(self.repository_version))

    try:
      if 'repositoryFile' in config:
        create_repo_files(template, CommandRepository(config['repositoryFile']))
      else:
        append_to_file = False
        for url_info in base_urls:
          repo_name, repo_file = self.install_repository(url_info, append_to_file, template)
          self.current_repositories.append(repo_name)
          self.current_repo_files.add(repo_file)
          append_to_file = True

    except Exception, err:
      Logger.logger.exception("Cannot install repository files. Error: {0}".format(str(err)))
      num_errors += 1
示例#4
0
            elif driver_name == "oracle.jdbc.driver.OracleDriver":
                jdbc_name = default(
                    "/ambariLevelParams/custom_oracle_jdbc_name", None)
                previous_jdbc_jar_name = default(
                    "/ambariLevelParams/previous_custom_oracle_jdbc_name",
                    None)
                jdbc_driver_name = "oracle"
            elif driver_name == "org.hsqldb.jdbc.JDBCDriver":
                jdbc_name = default(
                    "/ambariLevelParams/custom_hsqldb_jdbc_name", None)
                previous_jdbc_jar_name = default(
                    "/ambariLevelParams/previous_custom_hsqldb_jdbc_name",
                    None)
                jdbc_driver_name = "hsqldb"
            else:
                raise Fail(
                    format("JDBC driver '{driver_name}' not supported."))
        else:
            continue
        sqoop_jdbc_drivers_dict.append(jdbc_name)
        sqoop_jdbc_drivers_to_remove[jdbc_name] = previous_jdbc_jar_name
        sqoop_jdbc_drivers_name_dict[jdbc_name] = jdbc_driver_name
jdk_location = config['ambariLevelParams']['jdk_location']

########################################################
############# Atlas related params #####################
########################################################
#region Atlas Hooks
sqoop_atlas_application_properties = default(
    '/configurations/sqoop-atlas-application.properties', {})
enable_atlas_hook = default('/configurations/sqoop-env/sqoop.atlas.hook',
                            False)
示例#5
0
        for check_name in FALLIBLE_CHECKS:
            if check_name in structured_output and "exit_code" in structured_output[check_name] \
                and structured_output[check_name]["exit_code"] != 0:
                error_message += "Check {0} was unsuccessful. Exit code: {1}.".format(check_name, \
                                                                                     structured_output[check_name]["exit_code"])
                if "message" in structured_output[check_name]:
                    error_message += " Message: {0}".format(
                        structured_output[check_name]["message"])
                error_message += "\n"

        Logger.info("Host checks completed.")
        Logger.debug("Structured output: " + str(structured_output))

        if error_message:
            Logger.error(error_message)
            raise Fail(error_message)

    def execute_transparent_huge_page_check(self, config):
        Logger.info("Transparent huge page check started.")

        thp_regex = "\[(.+)\]"
        file_name = None
        if OSCheck.is_ubuntu_family():
            file_name = THP_FILE_UBUNTU
        elif OSCheck.is_redhat_family():
            file_name = THP_FILE_REDHAT
        if file_name and os.path.isfile(file_name):
            with open(file_name) as f:
                file_content = f.read()
                transparent_huge_page_check_structured_output = {
                    "exit_code": 0,
示例#6
0
class InstallPackages(Script):
    """
  This script is a part of Rolling Upgrade workflow and is described at
  appropriate design doc.
  It installs repositories to the node and then installs packages.
  For now, repositories are installed into individual files.
  """

    UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]

    def __init__(self):
        super(InstallPackages, self).__init__()

        self.pkg_provider = get_provider("Package")
        self.repo_files = {}

    def actionexecute(self, env):
        num_errors = 0

        # Parse parameters
        config = Script.get_config()

        try:
            command_repository = CommandRepository(config['repositoryFile'])
        except KeyError:
            raise Fail(
                "The command repository indicated by 'repositoryFile' was not found"
            )

        repo_rhel_suse = config['configurations']['cluster-env'][
            'repo_suse_rhel_template']
        repo_ubuntu = config['configurations']['cluster-env'][
            'repo_ubuntu_template']
        template = repo_rhel_suse if OSCheck.is_redhat_family(
        ) or OSCheck.is_suse_family() else repo_ubuntu

        # Handle a SIGTERM and SIGINT gracefully
        signal.signal(signal.SIGTERM, self.abort_handler)
        signal.signal(signal.SIGINT, self.abort_handler)

        self.repository_version = command_repository.version_string

        # Select dict that contains parameters
        try:
            package_list = json.loads(config['roleParams']['package_list'])
            stack_id = config['roleParams']['stack_id']
        except KeyError:
            pass

        self.stack_name = Script.get_stack_name()
        if self.stack_name is None:
            raise Fail("Cannot determine the stack name")

        self.stack_root_folder = Script.get_stack_root()
        if self.stack_root_folder is None:
            raise Fail("Cannot determine the stack's root directory")

        if self.repository_version is None:
            raise Fail("Cannot determine the repository version to install")

        self.repository_version = self.repository_version.strip()

        try:
            if not command_repository.items:
                Logger.warning(
                    "Repository list is empty. Ambari may not be managing the repositories for {0}."
                    .format(self.repository_version))
            else:
                Logger.info(
                    "Will install packages for repository version {0}".format(
                        self.repository_version))
                new_repo_files = create_repo_files(template,
                                                   command_repository)
                self.repo_files.update(new_repo_files)
        except Exception, err:
            Logger.logger.exception(
                "Cannot install repository files. Error: {0}".format(str(err)))
            num_errors += 1

        # Build structured output with initial values
        self.structured_output = {
            'package_installation_result': 'FAIL',
            'repository_version_id': command_repository.version_id
        }

        self.put_structured_out(self.structured_output)

        if num_errors > 0:
            raise Fail("Failed to distribute repositories/install packages")

        # Initial list of versions, used to compute the new version installed
        self.old_versions = get_stack_versions(self.stack_root_folder)

        try:
            is_package_install_successful = False
            ret_code = self.install_packages(package_list)
            if ret_code == 0:
                self.structured_output[
                    'package_installation_result'] = 'SUCCESS'
                self.put_structured_out(self.structured_output)
                is_package_install_successful = True
            else:
                num_errors += 1
        except Exception, err:
            num_errors += 1
            Logger.logger.exception(
                "Could not install packages. Error: {0}".format(str(err)))
示例#7
0
                format(
                    "Will run prepare war cmd since marker file {libext_content_file} is missing."
                ))

    if run_prepare_war:
        # Time-consuming to run
        return_code, output = shell.call(command, user=params.oozie_user)
        if output is None:
            output = ""

        if return_code != 0 or "New Oozie WAR file with added".lower(
        ) not in output.lower():
            message = "Unexpected Oozie WAR preparation output {0}".format(
                output)
            Logger.error(message)
            raise Fail(message)

        # Generate marker files
        File(
            prepare_war_cmd_file,
            content=command_to_file,
            mode=0644,
        )
        File(
            libext_content_file,
            content=libext_content,
            mode=0644,
        )
    else:
        Logger.info(
            format(
示例#8
0
    def _llap_start(self, env, cleanup=False):
      import params
      env.set_params(params)
      Logger.info("Starting LLAP")
      LLAP_APP_NAME = 'llap0'

      # TODO, start only if not already running.
      # TODO : Currently hardcoded the params. Need to read the suggested values from hive2/hive-site.xml.
      # TODO, ensure that script works as hive from cmd when not cd'ed in /home/hive
      # Needs permission to write to hive home dir.

      unique_name = "llap-slider%s" % datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')

      cmd = format("{stack_root}/current/hive-server2-hive2/bin/hive --service llap --instances {params.num_llap_nodes}"
                   " --slider-am-container-mb {params.slider_am_container_mb} --size {params.llap_daemon_container_size}m "
                   " --cache {params.hive_llap_io_mem_size}m --xmx {params.llap_heap_size}m --loglevel {params.llap_log_level}"
                   " --output {unique_name}")
      if params.security_enabled:
        llap_keytab_splits = params.hive_llap_keytab_file.split("/")
        Logger.debug("llap_keytab_splits : {0}".format(llap_keytab_splits))
        cmd += format(" --slider-keytab-dir .slider/keytabs/{params.hive_user}/ --slider-keytab "
                      "{llap_keytab_splits[4]} --slider-principal {hive_headless_keytab}")

      # Append args.
      cmd+= " --args \" -XX:+UseG1GC -XX:TLABSize=8m -XX:+ResizeTLAB -XX:+UseNUMA -XX:+AggressiveOpts" \
            " -XX:+AlwaysPreTouch -XX:MetaspaceSize=1024m -XX:InitiatingHeapOccupancyPercent=80 -XX:MaxGCPauseMillis=200\""
      # TODO: Remove adding "-XX:MaxDirectMemorySize" when Driver starts appending itself.
      if params.hive_llap_io_mem_size > params.llap_heap_size:
        max_dir_mem_size = long(params.hive_llap_io_mem_size) + 256
        cmd = cmd[0:len(cmd)-1] + format(" -XX:MaxDirectMemorySize={max_dir_mem_size}m\"")

      run_file_path = None
      try:
        Logger.info(format("Command: {cmd}"))
        code, output, error = shell.checked_call(cmd, user=params.hive_user, stderr=subprocess.PIPE, logoutput=True)

        if code != 0 or output is None:
          raise Fail("Command failed with either non-zero return code or no output.")

        # E.g., output:
        # Prepared llap-slider-05Apr2016/run.sh for running LLAP on Slider
        exp = r"Prepared (.*?run.sh) for running LLAP"
        run_file_path = None
        out_splits = output.split("\n")
        for line in out_splits:
          line = line.strip()
          m = re.match(exp, line, re.I)
          if m and len(m.groups()) == 1:
            run_file_name = m.group(1)
            run_file_path = os.path.join(params.hive_user_home_dir, run_file_name)
            break
        if not run_file_path:
          raise Fail("Did not find run.sh file in output: " + str(output))

        Logger.info(format("Run file path: {run_file_path}"))
        if os.path.isfile(run_file_path):
          Execute(run_file_path, user=params.hive_user)

          # TODO : Sleep below is not a good idea. We need to check the status of LLAP app to figure out it got
          # launched properly and is in running state. Then go ahead with Hive Interactive Server start.
          Logger.info("Sleeping for 30 secs")
          time.sleep(30)
          Logger.info("Submitted LLAP app name : {0}".format(LLAP_APP_NAME))

          # TODO: Uncomment this when 'llapstatus' commands returns correct output.
          '''
          status = self.check_llap_app_status(LLAP_APP_NAME, params.num_retries_for_checking_llap_status)
          if status:
            Logger.info("LLAP app '{0}' deployed successfully.".format(LLAP_APP_NAME))
            return True
          else:
            return False
          '''
          return True
        else:
          raise Fail(format("Did not find run file {run_file_path}"))
      except:
        # Attempt to clean up the packaged application, or potentially rename it with a .bak
        if run_file_path is not None and cleanup:
          try:
            parent_dir = os.path.dirname(run_file_path)
            if os.path.isdir(parent_dir):
              shutil.rmtree(parent_dir)
          except Exception, e:
            Logger.error("Could not cleanup LLAP app package. Error: " + str(e))

        # throw the original exception
        raise
示例#9
0
文件: script.py 项目: totongn/ambari
    def restart(self, env):
        """
    Default implementation of restart command is to call stop and start methods
    Feel free to override restart() method with your implementation.
    For client components we call install
    """
        config = self.get_config()
        componentCategory = None
        try:
            componentCategory = config['roleParams']['component_category']
        except KeyError:
            pass

        upgrade_type_command_param = ""
        direction = None
        if config is not None:
            command_params = config[
                "commandParams"] if "commandParams" in config else None
            if command_params is not None:
                upgrade_type_command_param = command_params[
                    "upgrade_type"] if "upgrade_type" in command_params else ""
                direction = command_params[
                    "upgrade_direction"] if "upgrade_direction" in command_params else None

        upgrade_type = Script.get_upgrade_type(upgrade_type_command_param)
        is_stack_upgrade = upgrade_type is not None

        # need this before actually executing so that failures still report upgrade info
        if is_stack_upgrade:
            upgrade_info = {"upgrade_type": upgrade_type_command_param}
            if direction is not None:
                upgrade_info["direction"] = direction.upper()

            Script.structuredOut.update(upgrade_info)

        if componentCategory and componentCategory.strip().lower(
        ) == 'CLIENT'.lower():
            if is_stack_upgrade:
                # Remain backward compatible with the rest of the services that haven't switched to using
                # the pre_upgrade_restart method. Once done. remove the else-block.
                if "pre_upgrade_restart" in dir(self):
                    self.pre_upgrade_restart(env, upgrade_type=upgrade_type)
                else:
                    self.pre_rolling_restart(env)

            self.install(env)
        else:
            # To remain backward compatible with older stacks, only pass upgrade_type if available.
            # TODO, remove checking the argspec for "upgrade_type" once all of the services support that optional param.
            if "upgrade_type" in inspect.getargspec(self.stop).args:
                self.stop(env, upgrade_type=upgrade_type)
            else:
                if is_stack_upgrade:
                    self.stop(
                        env,
                        rolling_restart=(upgrade_type == UPGRADE_TYPE_ROLLING))
                else:
                    self.stop(env)

            if is_stack_upgrade:
                # Remain backward compatible with the rest of the services that haven't switched to using
                # the pre_upgrade_restart method. Once done. remove the else-block.
                if "pre_upgrade_restart" in dir(self):
                    self.pre_upgrade_restart(env, upgrade_type=upgrade_type)
                else:
                    self.pre_rolling_restart(env)

            service_name = config[
                'serviceName'] if config is not None and 'serviceName' in config else None
            try:
                #TODO Once the logic for pid is available from Ranger and Ranger KMS code, will remove the below if block.
                services_to_skip = ['RANGER', 'RANGER_KMS']
                if service_name in services_to_skip:
                    Logger.info(
                        'Temporarily skipping status check for {0} service only.'
                        .format(service_name))
                elif is_stack_upgrade:
                    Logger.info(
                        'Skipping status check for {0} service during upgrade'.
                        format(service_name))
                else:
                    self.status(env)
                    raise Fail(
                        "Stop command finished but process keep running.")
            except ComponentIsNotRunning as e:
                pass  # expected
            except ClientComponentHasNoStatus as e:
                pass  # expected

            # To remain backward compatible with older stacks, only pass upgrade_type if available.
            # TODO, remove checking the argspec for "upgrade_type" once all of the services support that optional param.
            self.pre_start()
            if "upgrade_type" in inspect.getargspec(self.start).args:
                self.start(env, upgrade_type=upgrade_type)
            else:
                if is_stack_upgrade:
                    self.start(
                        env,
                        rolling_restart=(upgrade_type == UPGRADE_TYPE_ROLLING))
                else:
                    self.start(env)
            self.post_start()

            if is_stack_upgrade:
                # Remain backward compatible with the rest of the services that haven't switched to using
                # the post_upgrade_restart method. Once done. remove the else-block.
                if "post_upgrade_restart" in dir(self):
                    self.post_upgrade_restart(env, upgrade_type=upgrade_type)
                else:
                    self.post_rolling_restart(env)

        if self.should_expose_component_version("restart"):
            self.save_component_version_to_structured_out()
示例#10
0
def _validate_msi_install():
  if not _is_msi_installed() and os.path.exists(os.path.join(_working_dir, INSTALL_MARKER_FAILED)):
    raise Fail("Current or previous hdp.msi install failed. Check hdp.msi install logs")
  return _is_msi_installed()
示例#11
0
def copy_jdbc_connector(ranger_home):
    import params

    if params.jdbc_jar_name is None and params.driver_curl_source.endswith(
            "/None"):
        error_message = format(
            "{db_flavor} jdbc driver cannot be downloaded from {jdk_location}\nPlease run 'ambari-server setup --jdbc-db={db_flavor} --jdbc-driver={{path_to_jdbc}}' on ambari-server host."
        )
        raise Fail(error_message)

    if params.driver_curl_source and not params.driver_curl_source.endswith(
            "/None"):
        if params.previous_jdbc_jar and os.path.isfile(
                params.previous_jdbc_jar):
            File(params.previous_jdbc_jar, action='delete')

    File(params.downloaded_custom_connector,
         content=DownloadSource(params.driver_curl_source),
         mode=0644)

    driver_curl_target = format("{ranger_home}/ews/lib/{jdbc_jar_name}")

    if params.db_flavor.lower() == 'sqla':
        Execute(('tar', '-xvf', params.downloaded_custom_connector, '-C',
                 params.tmp_dir),
                sudo=True)

        Execute(('cp', '--remove-destination', params.jar_path_in_archive,
                 os.path.join(ranger_home, 'ews', 'lib')),
                path=["/bin", "/usr/bin/"],
                sudo=True)

        File(os.path.join(ranger_home, 'ews', 'lib', 'sajdbc4.jar'), mode=0644)

        Directory(params.jdbc_libs_dir, cd_access="a", create_parents=True)

        Execute(as_sudo([
            'yes', '|', 'cp', params.libs_path_in_archive, params.jdbc_libs_dir
        ],
                        auto_escape=False),
                path=["/bin", "/usr/bin/"])
    else:
        Execute(
            ('cp', '--remove-destination', params.downloaded_custom_connector,
             os.path.join(ranger_home, 'ews', 'lib')),
            path=["/bin", "/usr/bin/"],
            sudo=True)

        File(os.path.join(ranger_home, 'ews', 'lib', params.jdbc_jar_name),
             mode=0644)

    ModifyPropertiesFile(
        format("{ranger_home}/install.properties"),
        properties=params.config['configurations']['admin-properties'],
        owner=params.unix_user,
    )

    if params.db_flavor.lower() == 'sqla':
        ModifyPropertiesFile(
            format("{ranger_home}/install.properties"),
            properties={
                'SQL_CONNECTOR_JAR':
                format('{ranger_home}/ews/lib/sajdbc4.jar')
            },
            owner=params.unix_user,
        )
    else:
        ModifyPropertiesFile(
            format("{ranger_home}/install.properties"),
            properties={'SQL_CONNECTOR_JAR': format('{driver_curl_target}')},
            owner=params.unix_user,
        )
示例#12
0
def setup_ranger_plugin(component_select_name,
                        service_name,
                        downloaded_custom_connector,
                        driver_curl_source,
                        driver_curl_target,
                        java_home,
                        repo_name,
                        plugin_repo_dict,
                        ranger_env_properties,
                        plugin_properties,
                        policy_user,
                        policymgr_mgr_url,
                        plugin_enabled,
                        component_user,
                        component_group,
                        api_version=None,
                        **kwargs):
    File(downloaded_custom_connector,
         content=DownloadSource(driver_curl_source),
         mode=0644)

    Execute(('cp', '--remove-destination', downloaded_custom_connector,
             driver_curl_target),
            path=["/bin", "/usr/bin/"],
            sudo=True)

    File(driver_curl_target, mode=0644)

    hdp_version = get_hdp_version(component_select_name)
    file_path = format(
        '/usr/hdp/{hdp_version}/ranger-{service_name}-plugin/install.properties'
    )

    if not os.path.isfile(file_path):
        raise Fail(
            format(
                'Ranger {service_name} plugin install.properties file does not exist at {file_path}'
            ))

    ModifyPropertiesFile(file_path, properties=plugin_properties)

    custom_plugin_properties = dict()
    custom_plugin_properties['CUSTOM_USER'] = component_user
    custom_plugin_properties['CUSTOM_GROUP'] = component_group
    ModifyPropertiesFile(file_path, properties=custom_plugin_properties)

    if plugin_enabled:
        cmd = (format('enable-{service_name}-plugin.sh'), )
        if api_version == 'v2' and api_version is not None:
            ranger_adm_obj = RangeradminV2(url=policymgr_mgr_url)
        else:
            ranger_adm_obj = Rangeradmin(url=policymgr_mgr_url)

        ranger_adm_obj.create_ranger_repository(
            service_name, repo_name, plugin_repo_dict,
            ranger_env_properties['ranger_admin_username'],
            ranger_env_properties['ranger_admin_password'],
            ranger_env_properties['admin_username'],
            ranger_env_properties['admin_password'], policy_user)
    else:
        cmd = (format('disable-{service_name}-plugin.sh'), )

    cmd_env = {
        'JAVA_HOME': java_home,
        'PWD': format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin'),
        'PATH': format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin')
    }

    Execute(
        cmd,
        environment=cmd_env,
        logoutput=True,
        sudo=True,
    )
示例#13
0
def get_stack_feature_version(config):
  """
  Uses the specified ConfigDictionary to determine which version to use for stack
  feature checks.

  Normally, the commandParams/version is the correct value to use as it represent the 4-digit
  exact stack version/build being upgrade to or downgraded to. However, there are cases where the
  commands being sent are to stop running services which are on a different stack version from the
  version being upgraded/downgraded to. As a result, the configurations sent for these specific
  stop commands do not match commandParams/version.
  :param config:  a ConfigDictionary instance to extra the hostLevelParams
                  and commandParams from.
  :return: the version to use when checking stack features.
  """
  from resource_management.libraries.functions.default import default

  if "hostLevelParams" not in config or "commandParams" not in config:
    raise Fail("Unable to determine the correct version since hostLevelParams and commandParams were not present in the configuration dictionary")

  # should always be there
  stack_version = config['hostLevelParams']['stack_version']

  # something like 2.4.0.0-1234; represents the version for the command
  # (or None if this is a cluster install and it hasn't been calculated yet)
  # this is always guaranteed to be the correct version for the command, even in
  # upgrade and downgrade scenarios
  command_version = default("/commandParams/version", None)
  command_stack = default("/commandParams/target_stack", None)

  # something like 2.4.0.0-1234
  # (or None if this is a cluster install and it hasn't been calculated yet)
  current_cluster_version = default("/hostLevelParams/current_version", None)

  # UPGRADE or DOWNGRADE (or None)
  upgrade_direction = default("/commandParams/upgrade_direction", None)

  # start out with the value that's right 99% of the time
  version_for_stack_feature_checks = command_version if command_version is not None else stack_version

  # if this is not an upgrade, then we take the simple path
  if upgrade_direction is None:
    Logger.info(
      "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3} -> {4}".format(
        stack_version, current_cluster_version, command_stack, command_version, version_for_stack_feature_checks))

    return version_for_stack_feature_checks

  # STOP commands are the trouble maker as they are intended to stop a service not on the
  # version of the stack being upgrade/downgraded to
  is_stop_command = _is_stop_command(config)
  if not is_stop_command:
    Logger.info(
      "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3}, Upgrade Direction={4} -> {5}".format(
        stack_version, current_cluster_version, command_stack, command_version, upgrade_direction,
        version_for_stack_feature_checks))

    return version_for_stack_feature_checks

  # something like 2.5.0.0-5678 (or None)
  downgrade_from_version = default("/commandParams/downgrade_from_version", None)

  # guaranteed to have a STOP command now during an UPGRADE/DOWNGRADE, check direction
  if upgrade_direction.lower() == Direction.DOWNGRADE.lower():
    if downgrade_from_version is None:
      Logger.warning(
        "Unable to determine the version being downgraded when stopping services, using {0}".format(
          version_for_stack_feature_checks))
    else:
      version_for_stack_feature_checks = downgrade_from_version
  else:
    # UPGRADE
    if current_cluster_version is not None:
      version_for_stack_feature_checks = current_cluster_version
    else:
      version_for_stack_feature_checks = command_version if command_version is not None else stack_version

  Logger.info(
    "Stack Feature Version Info: Cluster Stack={0}, Cluster Current Version={1}, Command Stack={2}, Command Version={3}, Upgrade Direction={4}, stop_command={5} -> {6}".format(
      stack_version, current_cluster_version, command_stack, command_version, upgrade_direction,
      is_stop_command, version_for_stack_feature_checks))

  return version_for_stack_feature_checks
示例#14
0
def download_database_connector_if_needed():
    """
  Downloads the database connector to use when connecting to the metadata storage
  """
    import params
    if params.streamline_storage_type != 'mysql' and params.streamline_storage_type != 'oracle':
        # In any other case than oracle and mysql, e.g. postgres, just return.
        return

    if params.jdbc_driver_jar == None:
        if "mysql" in params.streamline_storage_type:
            Logger.error(
                "Failed to find mysql-java-connector jar. Make sure you followed the steps to register mysql driver"
            )
            Logger.info("Users should register the mysql java driver jar.")
            Logger.info("yum install mysql-connector-java*")
            Logger.info(
                "sudo ambari-server setup --jdbc-db=mysql --jdbc-driver=/usr/share/java/mysql-connector-java.jar"
            )
            raise Fail('Unable to establish jdbc connection to your ' +
                       params.streamline_storage_type + ' instance.')
        if "oracle" in params.streamline_storage_type:
            Logger.error(
                "Failed to find ojdbc jar. Please download and make sure you followed the steps to register oracle jdbc driver"
            )
            Logger.info("Users should register the oracle ojdbc driver jar.")
            Logger.info(
                "Create a symlink e.g. ln -s /usr/share/java/ojdbc6.jar /usr/share/java/ojdbc.jar"
            )
            Logger.info(
                "sudo ambari-server setup --jdbc-db=oracle --jdbc-driver=/usr/share/java/ojdbc.jar"
            )
            raise Fail('Unable to establish jdbc connection to your ' +
                       params.streamline_storage_type + ' instance.')

    File(params.check_db_connection_jar,
         content=DownloadSource(
             format("{jdk_location}/{check_db_connection_jar_name}")))

    target_jar_with_directory = params.connector_download_dir + os.path.sep + params.jdbc_driver_jar
    target_jar_bootstrap_dir = params.connector_bootstrap_download_dir + os.path.sep + params.jdbc_driver_jar

    if not os.path.exists(target_jar_with_directory):
        File(params.downloaded_custom_connector,
             content=DownloadSource(params.connector_curl_source))

        Execute(
            ('cp', '--remove-destination', params.downloaded_custom_connector,
             target_jar_with_directory),
            path=["/bin", "/usr/bin/"],
            sudo=True)

        File(target_jar_with_directory, owner="root", group=params.user_group)

    if not os.path.exists(target_jar_bootstrap_dir):
        File(params.downloaded_custom_connector,
             content=DownloadSource(params.connector_curl_source))

        Execute(('cp', '--remove-destination',
                 params.downloaded_custom_connector, target_jar_bootstrap_dir),
                path=["/bin", "/usr/bin/"],
                sudo=True)

        File(target_jar_with_directory, owner="root", group=params.user_group)
示例#15
0
def setup_infra_solr(name=None):
    import params

    if name == 'server':
        Directory([
            params.infra_solr_log_dir, params.infra_solr_piddir,
            params.infra_solr_datadir, params.infra_solr_data_resources_dir
        ],
                  mode=0755,
                  cd_access='a',
                  create_parents=True,
                  owner=params.infra_solr_user,
                  group=params.user_group)

        Directory([params.solr_dir, params.infra_solr_conf],
                  mode=0755,
                  cd_access='a',
                  owner=params.infra_solr_user,
                  group=params.user_group,
                  create_parents=True,
                  recursive_ownership=True)

        File(params.infra_solr_log,
             mode=0644,
             owner=params.infra_solr_user,
             group=params.user_group,
             content='')

        File(format("{infra_solr_conf}/infra-solr-env.sh"),
             content=InlineTemplate(params.solr_env_content),
             mode=0755,
             owner=params.infra_solr_user,
             group=params.user_group)

        File(format("{infra_solr_datadir}/solr.xml"),
             content=InlineTemplate(params.solr_xml_content),
             owner=params.infra_solr_user,
             group=params.user_group)

        if params.is_support_multi:
            for index, mount in enumerate(params.mounts):
                infra_solr_datadir = mount + '/ambari_infra_solr'
                infra_solr_data_resources_dir = os.path.join(
                    infra_solr_datadir, 'resources')
                Directory([infra_solr_datadir, infra_solr_data_resources_dir],
                          mode=0755,
                          cd_access='a',
                          create_parents=True,
                          owner=params.infra_solr_user,
                          group=params.user_group)
                File(format("{infra_solr_conf}/infra-solr-env" + str(index) +
                            ".sh"),
                     content=InlineTemplate(params.solr_env_content_multi),
                     mode=0755,
                     owner=params.infra_solr_user,
                     group=params.user_group)

                File(infra_solr_datadir + "/solr.xml",
                     content=InlineTemplate(params.solr_xml_content),
                     owner=params.infra_solr_user,
                     group=params.user_group)

        File(format("{infra_solr_conf}/log4j.properties"),
             content=InlineTemplate(params.solr_log4j_content),
             owner=params.infra_solr_user,
             group=params.user_group)

        custom_security_json_location = format(
            "{infra_solr_conf}/custom-security.json")
        File(custom_security_json_location,
             content=InlineTemplate(params.infra_solr_security_json_content),
             owner=params.infra_solr_user,
             group=params.user_group,
             mode=0640)

        jaas_file = params.infra_solr_jaas_file if params.security_enabled else None
        url_scheme = 'https' if params.infra_solr_ssl_enabled else 'http'

        if params.security_enabled:
            kinit_cmd = format(
                "{kinit_path_local} -kt {infra_solr_kerberos_keytab} {infra_solr_kerberos_principal};"
            )
            Execute(kinit_cmd, user=params.infra_solr_user)

        create_ambari_solr_znode()

        security_json_file_location = custom_security_json_location \
            if params.infra_solr_security_json_content and str(params.infra_solr_security_json_content).strip() \
            else format("{infra_solr_conf}/security.json")  # security.json file to upload

        if params.security_enabled:
            File(format("{infra_solr_jaas_file}"),
                 content=Template("infra_solr_jaas.conf.j2"),
                 owner=params.infra_solr_user)

            File(format("{infra_solr_conf}/security.json"),
                 content=Template("infra-solr-security.json.j2"),
                 owner=params.infra_solr_user,
                 group=params.user_group,
                 mode=0640)

        solr_cloud_util.set_cluster_prop(
            zookeeper_quorum=params.zookeeper_quorum,
            solr_znode=params.infra_solr_znode,
            java64_home=params.java64_home,
            prop_name="urlScheme",
            prop_value=url_scheme,
            jaas_file=jaas_file)

        solr_cloud_util.setup_kerberos_plugin(
            zookeeper_quorum=params.zookeeper_quorum,
            solr_znode=params.infra_solr_znode,
            jaas_file=jaas_file,
            java64_home=params.java64_home,
            secure=params.security_enabled,
            security_json_location=security_json_file_location)

        if params.security_enabled:
            solr_cloud_util.secure_solr_znode(
                zookeeper_quorum=params.zookeeper_quorum,
                solr_znode=params.infra_solr_znode,
                jaas_file=jaas_file,
                java64_home=params.java64_home,
                sasl_users_str=params.infra_solr_sasl_user)

    elif name == 'client':
        solr_cloud_util.setup_solr_client(params.config)

    else:
        raise Fail('Nor client or server were selected to install.')
示例#16
0
文件: script.py 项目: totongn/ambari
 def __init__(self):
     if Script.instance is not None:
         raise Fail(
             "An instantiation already exists! Use, get_instance() method.")
示例#17
0
    def compute_actual_version(self):
        """
    After packages are installed, determine what the new actual version is.
    """

        # If the repo contains a build number, optimistically assume it to be the actual_version. It will get changed
        # to correct value if it is not
        self.actual_version = None
        self.repo_version_with_build_number = None
        if self.repository_version:
            m = re.search("[\d\.]+-\d+", self.repository_version)
            if m:
                # Contains a build number
                self.repo_version_with_build_number = self.repository_version
                self.structured_output[
                    'actual_version'] = self.repo_version_with_build_number  # This is the best value known so far.
                self.put_structured_out(self.structured_output)

        Logger.info(
            "Attempting to determine actual version with build number.")
        Logger.info("Old versions: {0}".format(self.old_versions))

        new_versions = get_stack_versions(self.stack_root_folder)
        Logger.info("New versions: {0}".format(new_versions))

        deltas = set(new_versions) - set(self.old_versions)
        Logger.info("Deltas: {0}".format(deltas))

        # Get version without build number
        normalized_repo_version = self.repository_version.split('-')[0]

        if 1 == len(deltas):
            self.actual_version = next(iter(deltas)).strip()
            self.structured_output['actual_version'] = self.actual_version
            self.put_structured_out(self.structured_output)
            write_actual_version_to_history_file(normalized_repo_version,
                                                 self.actual_version)
            Logger.info(
                "Found actual version {0} by checking the delta between versions before and after installing packages"
                .format(self.actual_version))
        else:
            # If the first install attempt does a partial install and is unable to report this to the server,
            # then a subsequent attempt will report an empty delta. For this reason, we search for a best fit version for the repo version
            Logger.info(
                "Cannot determine actual version installed by checking the delta between versions "
                "before and after installing package")
            Logger.info(
                "Will try to find for the actual version by searching for best possible match in the list of versions installed"
            )
            self.actual_version = self.find_best_fit_version(
                new_versions, self.repository_version)
            if self.actual_version is not None:
                self.actual_version = self.actual_version.strip()
                self.structured_output['actual_version'] = self.actual_version
                self.put_structured_out(self.structured_output)
                Logger.info(
                    "Found actual version {0} by searching for best possible match"
                    .format(self.actual_version))
            else:
                msg = "Could not determine actual version installed. Try reinstalling packages again."
                raise Fail(msg)
示例#18
0
文件: storm.py 项目: mbigelow/ambari
def _find_real_user_min_uid():
  with open('/etc/login.defs') as f:
    for line in f:
      if line.strip().startswith('UID_MIN') and len(line.split()) == 2 and line.split()[1].isdigit():
        return int(line.split()[1])
  raise Fail("Unable to find UID_MIN in file /etc/login.defs. Expecting format e.g.: 'UID_MIN    500'")  
示例#19
0
    def actionexecute(self, env):
        num_errors = 0

        # Parse parameters
        config = Script.get_config()

        try:
            command_repository = CommandRepository(config['repositoryFile'])
        except KeyError:
            raise Fail(
                "The command repository indicated by 'repositoryFile' was not found"
            )

        repo_rhel_suse = config['configurations']['cluster-env'][
            'repo_suse_rhel_template']
        repo_ubuntu = config['configurations']['cluster-env'][
            'repo_ubuntu_template']
        template = repo_rhel_suse if OSCheck.is_redhat_family(
        ) or OSCheck.is_suse_family() else repo_ubuntu

        # Handle a SIGTERM and SIGINT gracefully
        signal.signal(signal.SIGTERM, self.abort_handler)
        signal.signal(signal.SIGINT, self.abort_handler)

        self.repository_version = command_repository.version_string

        # Select dict that contains parameters
        try:
            package_list = json.loads(config['roleParams']['package_list'])
            stack_id = config['roleParams']['stack_id']
        except KeyError:
            pass

        self.stack_name = Script.get_stack_name()
        if self.stack_name is None:
            raise Fail("Cannot determine the stack name")

        self.stack_root_folder = Script.get_stack_root()
        if self.stack_root_folder is None:
            raise Fail("Cannot determine the stack's root directory")

        if self.repository_version is None:
            raise Fail("Cannot determine the repository version to install")

        self.repository_version = self.repository_version.strip()

        try:
            if not command_repository.items:
                Logger.warning(
                    "Repository list is empty. Ambari may not be managing the repositories for {0}."
                    .format(self.repository_version))
            else:
                Logger.info(
                    "Will install packages for repository version {0}".format(
                        self.repository_version))
                new_repo_files = create_repo_files(template,
                                                   command_repository)
                self.repo_files.update(new_repo_files)
        except Exception, err:
            Logger.logger.exception(
                "Cannot install repository files. Error: {0}".format(str(err)))
            num_errors += 1
示例#20
0
文件: flume.py 项目: tsingfu/bigdata
def flume(action=None):
    import params

    if action == 'config':
        # remove previously defined meta's
        for n in find_expected_agent_names(params.flume_conf_dir):
            File(
                os.path.join(params.flume_conf_dir, n, 'ambari-meta.json'),
                action="delete",
            )
        if params.security_enabled:
            File(format("{conf_dir}/flume_jaas.conf"),
                 owner=params.flume_user,
                 content=InlineTemplate(params.flume_jaas_conf_template))

        Directory(
            params.flume_run_dir,
            group=params.user_group,
            owner=params.flume_user,
        )

        Directory(
            params.flume_conf_dir,
            create_parents=True,
            owner=params.flume_user,
        )
        Directory(
            params.flume_log_dir,
            group=params.user_group,
            owner=params.flume_user,
            create_parents=True,
            cd_access="a",
            mode=0755,
        )

        flume_agents = {}
        if params.flume_conf_content is not None:
            flume_agents = build_flume_topology(params.flume_conf_content)

        for agent in flume_agents.keys():
            flume_agent_conf_dir = os.path.join(params.flume_conf_dir, agent)
            flume_agent_conf_file = os.path.join(flume_agent_conf_dir,
                                                 'flume.conf')
            flume_agent_meta_file = os.path.join(flume_agent_conf_dir,
                                                 'ambari-meta.json')
            flume_agent_log4j_file = os.path.join(flume_agent_conf_dir,
                                                  'log4j.properties')
            flume_agent_env_file = os.path.join(flume_agent_conf_dir,
                                                'flume-env.sh')

            Directory(
                flume_agent_conf_dir,
                owner=params.flume_user,
            )

            PropertiesFile(flume_agent_conf_file,
                           properties=flume_agents[agent],
                           owner=params.flume_user,
                           mode=0644)

            File(flume_agent_log4j_file,
                 content=Template('log4j.properties.j2', agent_name=agent),
                 owner=params.flume_user,
                 mode=0644)

            File(flume_agent_meta_file,
                 content=json.dumps(ambari_meta(agent, flume_agents[agent])),
                 owner=params.flume_user,
                 mode=0644)

            File(flume_agent_env_file,
                 owner=params.flume_user,
                 content=InlineTemplate(params.flume_env_sh_template))

            if params.has_metric_collector:
                File(os.path.join(flume_agent_conf_dir,
                                  "flume-metrics2.properties"),
                     owner=params.flume_user,
                     content=Template("flume-metrics2.properties.j2"))

    elif action == 'start':
        # desired state for service should be STARTED
        if len(params.flume_command_targets) == 0:
            _set_desired_state('STARTED')

        # It is important to run this command as a background process.

        flume_base = as_user(format(
            "{flume_bin} agent --name {{0}} --conf {{1}} --conf-file {{2}} {{3}} > {flume_log_dir}/{{4}}.out 2>&1"
        ),
                             params.flume_user,
                             env={'JAVA_HOME': params.java_home}) + " &"

        for agent in cmd_target_names():
            flume_agent_conf_dir = params.flume_conf_dir + os.sep + agent
            flume_agent_conf_file = flume_agent_conf_dir + os.sep + "flume.conf"
            flume_agent_pid_file = params.flume_run_dir + os.sep + agent + ".pid"

            if not os.path.isfile(flume_agent_conf_file):
                continue

            if not is_flume_process_live(flume_agent_pid_file):
                # TODO someday make the ganglia ports configurable
                extra_args = ''
                if params.ganglia_server_host is not None:
                    extra_args = '-Dflume.monitoring.type=ganglia -Dflume.monitoring.hosts={0}:{1}'
                    extra_args = extra_args.format(params.ganglia_server_host,
                                                   '8655')
                if params.has_metric_collector:
                    extra_args = '-Dflume.monitoring.type=org.apache.hadoop.metrics2.sink.flume.FlumeTimelineMetricsSink ' \
                                 '-Dflume.monitoring.node={0}:{1}'
                    # TODO check if this is used.
                    extra_args = extra_args.format(
                        params.metric_collector_host,
                        params.metric_collector_port)

                flume_cmd = flume_base.format(agent, flume_agent_conf_dir,
                                              flume_agent_conf_file,
                                              extra_args, agent)

                Execute(flume_cmd,
                        wait_for_finish=False,
                        environment={'JAVA_HOME': params.java_home})
                # sometimes startup spawns a couple of threads - so only the first line may count
                pid_cmd = as_sudo(('pgrep', '-o', '-u', params.flume_user, '-f', format('^{java_home}.*{agent}.*'))) + \
                          " | " + as_sudo(('tee', flume_agent_pid_file)) + "  && test ${PIPESTATUS[0]} -eq 0"

                try:
                    Execute(pid_cmd, logoutput=True, tries=20, try_sleep=10)
                except:
                    show_logs(params.flume_log_dir, params.flume_user)
                    raise

        pass
    elif action == 'stop':
        # desired state for service should be INSTALLED
        if len(params.flume_command_targets) == 0:
            _set_desired_state('INSTALLED')

        pid_files = glob.glob(params.flume_run_dir + os.sep + "*.pid")

        if 0 == len(pid_files):
            return

        agent_names = cmd_target_names()

        for agent in agent_names:
            pid_file = format("{flume_run_dir}/{agent}.pid")

            if is_flume_process_live(pid_file):
                pid = shell.checked_call(("cat", pid_file),
                                         sudo=True)[1].strip()
                Execute(("kill", "-15", pid),
                        sudo=True)  # kill command has to be a tuple
                if not await_flume_process_termination(pid_file, try_count=30):
                    Execute(("kill", "-9", pid), sudo=True)

            if not await_flume_process_termination(pid_file, try_count=10):
                show_logs(params.flume_log_dir, params.flume_user)
                raise Fail("Can't stop flume agent: {0}".format(agent))

            File(pid_file, action='delete')
示例#21
0
def namenode(action=None,
             hdfs_binary=None,
             do_format=True,
             upgrade_type=None,
             upgrade_suspended=False,
             env=None):

    if action is None:
        raise Fail('"action" parameter is required for function namenode().')

    if action in ["start", "stop"] and hdfs_binary is None:
        raise Fail(
            '"hdfs_binary" parameter is required for function namenode().')

    if action == "configure":
        import params
        #we need this directory to be present before any action(HA manual steps for
        #additional namenode)
        create_name_dirs(params.dfs_name_dir)

        # set up failover /  secure zookeper ACLs, this feature is supported from HDP 2.6 ownwards
        set_up_zkfc_security(params)
    elif action == "start":
        Logger.info("Called service {0} with upgrade_type: {1}".format(
            action, str(upgrade_type)))
        setup_ranger_hdfs(upgrade_type=upgrade_type)
        import params

        File(params.exclude_file_path,
             content=Template("exclude_hosts_list.j2"),
             owner=params.hdfs_user,
             group=params.user_group)

        if params.hdfs_include_file:
            File(params.include_file_path,
                 content=Template("include_hosts_list.j2"),
                 owner=params.hdfs_user,
                 group=params.user_group)
            pass

        if do_format and not params.hdfs_namenode_format_disabled:
            format_namenode()
            pass


        if params.dfs_ha_enabled and \
          params.dfs_ha_namenode_standby is not None and \
          (params.hostname == params.dfs_ha_namenode_standby or params.public_hostname == params.dfs_ha_namenode_standby):
            # if the current host is the standby NameNode in an HA deployment
            # run the bootstrap command, to start the NameNode in standby mode
            # this requires that the active NameNode is already up and running,
            # so this execute should be re-tried upon failure, up to a timeout
            success = bootstrap_standby_namenode(params)
            if not success:
                raise Fail("Could not bootstrap standby namenode")

        if upgrade_type == constants.UPGRADE_TYPE_ROLLING and params.dfs_ha_enabled:
            # Most likely, ZKFC is up since RU will initiate the failover command. However, if that failed, it would have tried
            # to kill ZKFC manually, so we need to start it if not already running.
            safe_zkfc_op(action, env)

        options = ""
        if upgrade_type == constants.UPGRADE_TYPE_ROLLING:
            if params.upgrade_direction == Direction.UPGRADE:
                options = "-rollingUpgrade started"
            elif params.upgrade_direction == Direction.DOWNGRADE:
                options = "-rollingUpgrade downgrade"
        elif upgrade_type == constants.UPGRADE_TYPE_NON_ROLLING:
            is_previous_image_dir = is_previous_fs_image()
            Logger.info("Previous file system image dir present is {0}".format(
                str(is_previous_image_dir)))

            if params.upgrade_direction == Direction.UPGRADE:
                options = "-rollingUpgrade started"
            elif params.upgrade_direction == Direction.DOWNGRADE:
                options = "-rollingUpgrade downgrade"
        elif upgrade_type == constants.UPGRADE_TYPE_HOST_ORDERED:
            # nothing special to do for HOU - should be very close to a normal restart
            pass
        elif upgrade_type is None and upgrade_suspended is True:
            # the rollingUpgrade flag must be passed in during a suspended upgrade when starting NN
            if os.path.exists(
                    namenode_upgrade.get_upgrade_in_progress_marker()):
                options = "-rollingUpgrade started"
            else:
                Logger.info(
                    "The NameNode upgrade marker file {0} does not exist, yet an upgrade is currently suspended. "
                    "Assuming that the upgrade of NameNode has not occurred yet."
                    .format(namenode_upgrade.get_upgrade_in_progress_marker()))

        Logger.info("Options for start command are: {0}".format(options))

        service(action="start",
                name="namenode",
                user=params.hdfs_user,
                options=options,
                create_pid_dir=True,
                create_log_dir=True)

        if params.security_enabled:
            Execute(format(
                "{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"
            ),
                    user=params.hdfs_user)

        # ___Scenario___________|_Expected safemode state__|_Wait for safemode OFF____|
        # no-HA                 | ON -> OFF                | Yes                      |
        # HA and active         | ON -> OFF                | Yes                      |
        # HA and standby        | no change                | No                       |
        # RU with HA on active  | ON -> OFF                | Yes                      |
        # RU with HA on standby | ON -> OFF                | Yes                      |
        # EU with HA on active  | ON -> OFF                | No                       |
        # EU with HA on standby | ON -> OFF                | No                       |
        # EU non-HA             | ON -> OFF                | No                       |

        # because we do things like create directories after starting NN,
        # the vast majority of the time this should be True - it should only
        # be False if this is HA and we are the Standby NN
        ensure_safemode_off = True

        # True if this is the only NameNode (non-HA) or if its the Active one in HA
        is_active_namenode = True

        if params.dfs_ha_enabled:
            Logger.info(
                "Waiting for the NameNode to broadcast whether it is Active or Standby..."
            )

            if is_this_namenode_active() is False:
                # we are the STANDBY NN
                is_active_namenode = False

                # we are the STANDBY NN and this restart is not part of an upgrade
                if upgrade_type is None:
                    ensure_safemode_off = False

        # During an Express Upgrade, NameNode will not leave SafeMode until the DataNodes are started,
        # so always disable the Safemode check
        if upgrade_type == constants.UPGRADE_TYPE_NON_ROLLING:
            ensure_safemode_off = False

        # some informative logging separate from the above logic to keep things a little cleaner
        if ensure_safemode_off:
            Logger.info(
                "Waiting for this NameNode to leave Safemode due to the following conditions: HA: {0}, isActive: {1}, upgradeType: {2}"
                .format(params.dfs_ha_enabled, is_active_namenode,
                        upgrade_type))
        else:
            Logger.info(
                "Skipping Safemode check due to the following conditions: HA: {0}, isActive: {1}, upgradeType: {2}"
                .format(params.dfs_ha_enabled, is_active_namenode,
                        upgrade_type))

        # wait for Safemode to end
        if ensure_safemode_off:
            wait_for_safemode_off(hdfs_binary)

        # Always run this on the "Active" NN unless Safemode has been ignored
        # in the case where safemode was ignored (like during an express upgrade), then
        # NN will be in SafeMode and cannot have directories created
        if is_active_namenode and ensure_safemode_off:
            create_hdfs_directories()
            create_ranger_audit_hdfs_directories()
        else:
            Logger.info(
                "Skipping creation of HDFS directories since this is either not the Active NameNode or we did not wait for Safemode to finish."
            )

    elif action == "stop":
        import params
        service(action="stop", name="namenode", user=params.hdfs_user)
    elif action == "status":
        import status_params
        check_process_status(status_params.namenode_pid_file)
    elif action == "decommission":
        decommission()
示例#22
0
                return None
        except urllib2.URLError, e:
            if isinstance(e, urllib2.HTTPError):
                raise Fail(
                    "Error getting {0} repository for component {1}. Http status code - {2}. \n {3}"
                    .format(name, component, e.code, e.read()))
            else:
                raise Fail(
                    "Error getting {0} repository for component {1}. Reason - {2}."
                    .format(name, component, e.reason))
        except httplib.BadStatusLine:
            raise Fail(
                "Ranger Admin service is not reachable, please restart the service and then try again"
            )
        except TimeoutError:
            raise Fail("Connection to Ranger Admin failed. Reason - timeout")

    def create_ranger_repository(self, component, repo_name, repo_properties,
                                 ambari_ranger_admin, ambari_ranger_password,
                                 admin_uname, admin_password, policy_user):
        """
    :param component: name of the component, from which it will get or create repository
    :param repo_name: name of the repository to be get or create
    :param repo_properties: dict of repository to be create if not exist
    :param ambari_ranger_admin: ambari admin user creation username
    :param ambari_ranger_password: ambari admin user creation password
    :param admin_uname: ranger admin username
    :param admin_password: ranger admin password
    :param policy_user: use this policy user for policies that will be used during repository creation
    """
        response_code = self.check_ranger_login_urllib2(self.baseUrl)
示例#23
0
def setup_infra_solr(name=None):
    import params

    if name == 'server':
        Directory([
            params.infra_solr_log_dir, params.infra_solr_piddir,
            params.infra_solr_datadir, params.infra_solr_data_resources_dir
        ],
                  mode=0755,
                  cd_access='a',
                  create_parents=True,
                  owner=params.infra_solr_user,
                  group=params.user_group)

        Directory([params.solr_dir, params.infra_solr_conf],
                  mode=0755,
                  cd_access='a',
                  owner=params.infra_solr_user,
                  group=params.user_group,
                  create_parents=True,
                  recursive_ownership=True)

        File(params.infra_solr_log,
             mode=0644,
             owner=params.infra_solr_user,
             group=params.user_group,
             content='')

        File(format("{infra_solr_conf}/infra-solr-env.sh"),
             content=InlineTemplate(params.solr_env_content),
             mode=0755,
             owner=params.infra_solr_user,
             group=params.user_group)

        File(format("{infra_solr_datadir}/solr.xml"),
             content=InlineTemplate(params.solr_xml_content),
             owner=params.infra_solr_user,
             group=params.user_group)

        File(format("{infra_solr_conf}/log4j.properties"),
             content=InlineTemplate(params.solr_log4j_content),
             owner=params.infra_solr_user,
             group=params.user_group)

        custom_security_json_location = format(
            "{infra_solr_conf}/custom-security.json")
        File(custom_security_json_location,
             content=InlineTemplate(params.infra_solr_security_json_content),
             owner=params.infra_solr_user,
             group=params.user_group,
             mode=0640)

        if params.security_enabled:
            File(format("{infra_solr_jaas_file}"),
                 content=Template("infra_solr_jaas.conf.j2"),
                 owner=params.infra_solr_user)

            File(format("{infra_solr_conf}/security.json"),
                 content=Template("infra-solr-security.json.j2"),
                 owner=params.infra_solr_user,
                 group=params.user_group,
                 mode=0640)

        File(os.path.join(params.limits_conf_dir, 'infra-solr.conf'),
             owner='root',
             group='root',
             mode=0644,
             content=Template("infra-solr.conf.j2"))

    elif name == 'client':
        solr_cloud_util.setup_solr_client(params.config)

    else:
        raise Fail('Nor client or server were selected to install.')
示例#24
0
 def create_repository_urllib2(self, data, usernamepassword, policy_user):
     """
 :param data: repository dict
 :param usernamepassword: user credentials using which repository needs to be created
 :param policy_user: use this policy user for policies that will be used during repository creation
 :return Returns created repository response else None
 """
     try:
         searchRepoURL = self.urlReposPub
         base64string = base64.encodestring(
             '{0}'.format(usernamepassword)).replace('\n', '')
         headers = {
             'Accept': 'application/json',
             "Content-Type": "application/json"
         }
         request = urllib2.Request(searchRepoURL, data, headers)
         request.add_header("Authorization",
                            "Basic {0}".format(base64string))
         result = openurl(request, timeout=20)
         response_code = result.getcode()
         response = json.loads(json.JSONEncoder().encode(result.read()))
         if response_code == 200:
             Logger.info('Repository created Successfully')
             # Get Policies
             repoData = json.loads(data)
             repoName = repoData['name']
             typeOfPolicy = repoData['repositoryType']
             # Get Policies by repo name
             policyList = self.get_policy_by_repo_name(
                 name=repoName,
                 component=typeOfPolicy,
                 status="true",
                 usernamepassword=usernamepassword)
             if policyList is not None and (len(policyList)) > 0:
                 policiesUpdateCount = 0
                 for policy in policyList:
                     updatedPolicyObj = self.get_policy_params(
                         typeOfPolicy, policy, policy_user)
                     policyResCode = self.update_ranger_policy(
                         updatedPolicyObj['id'],
                         json.dumps(updatedPolicyObj), usernamepassword)
                     if policyResCode == 200:
                         policiesUpdateCount = policiesUpdateCount + 1
                     else:
                         Logger.info('Policy Update failed')
                         # Check for count of updated policies
                 if len(policyList) == policiesUpdateCount:
                     Logger.info(
                         "Ranger Repository created successfully and policies updated successfully providing ambari-qa user all permissions"
                     )
                     return response
                 else:
                     return None
             else:
                 Logger.info(
                     "Policies not found for the newly created Repository")
             return None
         else:
             Logger.info('Repository creation failed')
             return None
     except urllib2.URLError, e:
         if isinstance(e, urllib2.HTTPError):
             raise Fail(
                 "Error creating repository. Http status code - {0}. \n {1}"
                 .format(e.code, e.read()))
         else:
             raise Fail("Error creating repository. Reason - {0}.".format(
                 e.reason))
示例#25
0
文件: kms.py 项目: totongn/ambari
    result = urllib2.urlopen(request, timeout=20)
    response_code = result.getcode()
    response = json.loads(json.JSONEncoder().encode(result.read()))
    if response_code == 200:
      Logger.info('Repository created Successfully')
      return True
    else:
      Logger.info('Repository not created')
      return False
  except urllib2.URLError, e:
    if isinstance(e, urllib2.HTTPError):
      raise Fail("Error creating service. Http status code - {0}. \n {1}".format(e.code, e.read()))
    else:
      raise Fail("Error creating service. Reason - {0}.".format(e.reason))
  except socket.timeout as e:
    raise Fail("Error creating service. Reason - {0}".format(e))

@safe_retry(times=5, sleep_time=8, backoff_factor=1.5, err_class=Fail, return_on_fail=False)
def get_repo(url, name, usernamepassword):
  try:
    base_url = url + '/service/public/v2/api/service?serviceName=' + name + '&serviceType=kms&isEnabled=true'
    request = urllib2.Request(base_url)
    base64string = base64.encodestring(usernamepassword).replace('\n', '')
    request.add_header("Content-Type", "application/json")
    request.add_header("Accept", "application/json")
    request.add_header("Authorization", "Basic {0}".format(base64string))
    result = urllib2.urlopen(request, timeout=20)
    response_code = result.getcode()
    response = json.loads(result.read())
    if response_code == 200 and len(response) > 0:
      for repo in response:
示例#26
0
class Rangeradmin:
    sInstance = None

    def __init__(self,
                 url='http://localhost:6080',
                 skip_if_rangeradmin_down=True):

        self.baseUrl = url
        self.urlLogin = self.baseUrl + '/login.jsp'
        self.urlLoginPost = self.baseUrl + '/j_spring_security_check'
        self.urlRepos = self.baseUrl + '/service/assets/assets'
        self.urlReposPub = self.baseUrl + '/service/public/api/repository'
        self.urlPolicies = self.baseUrl + '/service/public/api/policy'
        self.urlGroups = self.baseUrl + '/service/xusers/groups'
        self.urlUsers = self.baseUrl + '/service/xusers/users'
        self.urlSecUsers = self.baseUrl + '/service/xusers/secure/users'
        self.skip_if_rangeradmin_down = skip_if_rangeradmin_down

        if self.skip_if_rangeradmin_down:
            Logger.info("Rangeradmin: Skip ranger admin if it's down !")

    @safe_retry(times=5,
                sleep_time=8,
                backoff_factor=1.5,
                err_class=Fail,
                return_on_fail=None)
    def get_repository_by_name_urllib2(self, name, component, status,
                                       usernamepassword):
        """
    :param name: name of the component, from which, function will search in list of repositories
    :param component: component for which repository has to be checked
    :param status: active or inactive
    :param usernamepassword: user credentials using which repository needs to be searched
    :return Returns Ranger repository dict if found otherwise None
    """
        try:
            searchRepoURL = self.urlReposPub + "?name=" + name + "&type=" + component + "&status=" + status
            request = urllib2.Request(searchRepoURL)
            base64string = base64.encodestring(usernamepassword).replace(
                '\n', '')
            request.add_header("Content-Type", "application/json")
            request.add_header("Accept", "application/json")
            request.add_header("Authorization",
                               "Basic {0}".format(base64string))
            result = openurl(request, timeout=20)
            response_code = result.getcode()
            response = json.loads(result.read())
            if response_code == 200 and len(response['vXRepositories']) > 0:
                for repo in response['vXRepositories']:
                    repoDump = json.loads(json.JSONEncoder().encode(repo))
                    if repoDump['name'].lower() == name.lower():
                        return repoDump
                return None
            else:
                return None
        except urllib2.URLError, e:
            if isinstance(e, urllib2.HTTPError):
                raise Fail(
                    "Error getting {0} repository for component {1}. Http status code - {2}. \n {3}"
                    .format(name, component, e.code, e.read()))
            else:
                raise Fail(
                    "Error getting {0} repository for component {1}. Reason - {2}."
                    .format(name, component, e.reason))
        except httplib.BadStatusLine:
            raise Fail(
                "Ranger Admin service is not reachable, please restart the service and then try again"
            )
示例#27
0
class InstallPackages(Script):
  """
  This script is a part of Rolling Upgrade workflow and is described at
  appropriate design doc.
  It installs repositories to the node and then installs packages.
  For now, repositories are installed into individual files.
  """

  UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]

  def actionexecute(self, env):
    num_errors = 0

    # Parse parameters
    config = Script.get_config()

    repo_rhel_suse = config['configurations']['cluster-env']['repo_suse_rhel_template']
    repo_ubuntu = config['configurations']['cluster-env']['repo_ubuntu_template']
    template = repo_rhel_suse if OSCheck.is_redhat_family() or OSCheck.is_suse_family() else repo_ubuntu

    # Handle a SIGTERM and SIGINT gracefully
    signal.signal(signal.SIGTERM, self.abort_handler)
    signal.signal(signal.SIGINT, self.abort_handler)

    self.repository_version_id = None

    base_urls = []
    # Select dict that contains parameters
    try:
      if 'base_urls' in config['roleParams']:
        base_urls = json.loads(config['roleParams']['base_urls'])

      self.repository_version = config['roleParams']['repository_version']
      package_list = json.loads(config['roleParams']['package_list'])
      stack_id = config['roleParams']['stack_id']

      if 'repository_version_id' in config['roleParams']:
        self.repository_version_id = config['roleParams']['repository_version_id']
    except KeyError:
      pass

    # current stack information
    self.current_stack_version_formatted = None
    if 'stack_version' in config['hostLevelParams']:
      current_stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
      self.current_stack_version_formatted = format_stack_version(current_stack_version_unformatted)


    self.stack_name = Script.get_stack_name()
    if self.stack_name is None:
      raise Fail("Cannot determine the stack name")

    self.stack_root_folder = Script.get_stack_root()
    if self.stack_root_folder is None:
      raise Fail("Cannot determine the stack's root directory")

    if self.repository_version is None:
      raise Fail("Cannot determine the repository version to install")

    self.repository_version = self.repository_version.strip()

    # Install/update repositories
    self.current_repositories = []
    self.current_repo_files = set()

    # Enable base system repositories
    # We don't need that for RHEL family, because we leave all repos enabled
    # except disabled HDP* ones
    if OSCheck.is_suse_family():
      self.current_repositories.append('base')
    elif OSCheck.is_ubuntu_family():
      self.current_repo_files.add('base')

    Logger.info("Will install packages for repository version {0}".format(self.repository_version))

    if 0 == len(base_urls):
      Logger.warning("Repository list is empty. Ambari may not be managing the repositories for {0}.".format(self.repository_version))

    try:
      if 'repositoryFile' in config:
        create_repo_files(template, CommandRepository(config['repositoryFile']))
      else:
        append_to_file = False
        for url_info in base_urls:
          repo_name, repo_file = self.install_repository(url_info, append_to_file, template)
          self.current_repositories.append(repo_name)
          self.current_repo_files.add(repo_file)
          append_to_file = True

    except Exception, err:
      Logger.logger.exception("Cannot install repository files. Error: {0}".format(str(err)))
      num_errors += 1

    # Build structured output with initial values
    self.structured_output = {
      'installed_repository_version': self.repository_version,
      'stack_id': stack_id,
      'package_installation_result': 'FAIL'
    }

    if self.repository_version_id is not None:
      self.structured_output['repository_version_id'] = self.repository_version_id

    self.put_structured_out(self.structured_output)

    if num_errors > 0:
      raise Fail("Failed to distribute repositories/install packages")

    # Initial list of versions, used to compute the new version installed
    self.old_versions = get_stack_versions(self.stack_root_folder)

    try:
      is_package_install_successful = False
      ret_code = self.install_packages(package_list)
      if ret_code == 0:
        self.structured_output['package_installation_result'] = 'SUCCESS'
        self.put_structured_out(self.structured_output)
        is_package_install_successful = True
      else:
        num_errors += 1
    except Exception, err:
      num_errors += 1
      Logger.logger.exception("Could not install packages. Error: {0}".format(str(err)))
示例#28
0
    def create_ambari_admin_user(self, ambari_admin_username,
                                 ambari_admin_password, usernamepassword):
        """
    :param ambari_admin_username: username of user to be created
    :param ambari_admin_username: user password of user to be created
    :return Returns response code for successful user creation else None
    """
        flag_ambari_admin_present = False
        match = re.match('[a-zA-Z0-9_\S]+$', ambari_admin_password)
        if match is None:
            raise Fail(
                'Invalid password given for Ranger Admin user for Ambari')
        try:
            url = self.urlUsers + '?name=' + str(ambari_admin_username)
            request = urllib2.Request(url)
            base64string = base64.encodestring(usernamepassword).replace(
                '\n', '')
            request.add_header("Content-Type", "application/json")
            request.add_header("Accept", "application/json")
            request.add_header("Authorization",
                               "Basic {0}".format(base64string))
            result = openurl(request, timeout=20)
            response_code = result.getcode()
            response = json.loads(result.read())
            if response_code == 200 and len(response['vXUsers']) >= 0:
                for vxuser in response['vXUsers']:
                    if vxuser['name'] == ambari_admin_username:
                        flag_ambari_admin_present = True
                        break
                    else:
                        flag_ambari_admin_present = False

                if flag_ambari_admin_present:
                    Logger.info(ambari_admin_username +
                                ' user already exists.')
                    return response_code
                else:
                    Logger.info(
                        ambari_admin_username +
                        ' user is not present, creating user using given configurations'
                    )
                    url = self.urlSecUsers
                    admin_user = dict()
                    admin_user['status'] = 1
                    admin_user['userRoleList'] = ['ROLE_SYS_ADMIN']
                    admin_user['name'] = ambari_admin_username
                    admin_user['password'] = ambari_admin_password
                    admin_user['description'] = ambari_admin_username
                    admin_user['firstName'] = ambari_admin_username
                    data = json.dumps(admin_user)
                    base64string = base64.encodestring(
                        '{0}'.format(usernamepassword)).replace('\n', '')
                    headers = {
                        'Accept': 'application/json',
                        "Content-Type": "application/json"
                    }
                    request = urllib2.Request(url, data, headers)
                    request.add_header("Authorization",
                                       "Basic {0}".format(base64string))
                    result = openurl(request, timeout=20)
                    response_code = result.getcode()
                    response = json.loads(json.JSONEncoder().encode(
                        result.read()))
                    if response_code == 200 and response is not None:
                        Logger.info('Ambari admin user creation successful.')
                        return response_code
                    else:
                        Logger.info('Ambari admin user creation failed.')
                        return None
            else:
                return None
        except urllib2.URLError, e:
            if isinstance(e, urllib2.HTTPError):
                raise Fail(
                    "Error creating ambari admin user. Http status code - {0}. \n {1}"
                    .format(e.code, e.read()))
            else:
                raise Fail(
                    "Error creating ambari admin user. Reason - {0}.".format(
                        e.reason))
示例#29
0
  def actionexecute(self, env):
    resolve_ambari_config()

    # Parse parameters from command json file.
    config = Script.get_config()

    host_name = socket.gethostname()
    version = default('/roleParams/version', None)

    # These 2 variables are optional
    service_package_folder = default('/commandParams/service_package_folder', None)
    if service_package_folder is None:
      service_package_folder = default('/serviceLevelParams/service_package_folder', None)
    hooks_folder = default('/commandParams/hooks_folder', None)

    tasks = json.loads(config['roleParams']['tasks'])
    if tasks:
      for t in tasks:
        task = ExecuteTask(t)
        Logger.info(str(task))

        # If a (script, function) exists, it overwrites the command.
        if task.script and task.function:
          file_cache = FileCache(agent_config)

          server_url_prefix = default('/ambariLevelParams/jdk_location', "")

          if service_package_folder and hooks_folder:
            command_paths = {
              "commandParams": {
                "service_package_folder": service_package_folder,
              },
              "clusterLevelParams": {
                   "hooks_folder": hooks_folder
              }
            } 

            base_dir = file_cache.get_service_base_dir(command_paths, server_url_prefix)
          else:
            base_dir = file_cache.get_custom_actions_base_dir(server_url_prefix)

          script_path = os.path.join(base_dir, task.script)
          if not os.path.exists(script_path):
            message = "Script %s does not exist" % str(script_path)
            raise Fail(message)

          # Notice that the script_path is now the fully qualified path, and the
          # same command-#.json file is used.
          # Also, the python wrapper is used, since it sets up the correct environment variables
          command_params = ["/usr/bin/ambari-python-wrap",
                            script_path,
                            task.function,
                            self.command_data_file,
                            self.basedir,
                            self.stroutfile,
                            self.logging_level,
                            Script.get_tmp_dir()]

          task.command = "source /var/lib/ambari-agent/ambari-env.sh ; " + " ".join(command_params)
          # Replace redundant whitespace to make the unit tests easier to validate
          task.command = re.sub("\s+", " ", task.command).strip()

        if task.command:
          task.command = replace_variables(task.command, host_name, version)
          shell.checked_call(task.command, logoutput=True, quiet=True)
示例#30
0
    def prepare_libext_directory():
        """
    Performs the following actions on libext:
      - creates <stack-root>/current/oozie/libext and recursively
      - set 777 permissions on it and its parents.
      - downloads JDBC driver JAR if needed
      - copies Falcon JAR for the Oozie WAR if needed
    """
        import params

        # some stack versions don't need the lzo compression libraries
        target_version_needs_compression_libraries = params.version and check_stack_feature(
            StackFeature.LZO, params.version)

        # ensure the directory exists
        Directory(params.oozie_libext_dir, mode=0777)

        # get all hadooplzo* JAR files
        # <stack-selector-tool> set hadoop-client has not run yet, therefore we cannot use
        # <stack-root>/current/hadoop-client ; we must use params.version directly
        # however, this only works when upgrading beyond 2.2.0.0; don't do this
        # for downgrade to 2.2.0.0 since hadoop-lzo will not be present
        # This can also be called during a Downgrade.
        # When a version is Installed, it is responsible for downloading the hadoop-lzo packages
        # if lzo is enabled.
        if params.lzo_enabled and (
                params.upgrade_direction == Direction.UPGRADE
                or target_version_needs_compression_libraries):
            hadoop_lzo_pattern = 'hadoop-lzo*.jar'
            hadoop_client_new_lib_dir = format(
                "{stack_root}/{version}/hadoop/lib")

            files = glob.iglob(
                os.path.join(hadoop_client_new_lib_dir, hadoop_lzo_pattern))
            if not files:
                raise Fail("There are no files at {0} matching {1}".format(
                    hadoop_client_new_lib_dir, hadoop_lzo_pattern))

            # copy files into libext
            files_copied = False
            for file in files:
                if os.path.isfile(file):
                    Logger.info("Copying {0} to {1}".format(
                        str(file), params.oozie_libext_dir))
                    shutil.copy2(file, params.oozie_libext_dir)
                    files_copied = True

            if not files_copied:
                raise Fail("There are no files at {0} matching {1}".format(
                    hadoop_client_new_lib_dir, hadoop_lzo_pattern))

        # copy ext ZIP to libext dir
        oozie_ext_zip_file = params.ext_js_path

        # something like <stack-root>/current/oozie-server/libext/ext-2.2.zip
        oozie_ext_zip_target_path = os.path.join(params.oozie_libext_dir,
                                                 params.ext_js_file)

        if not os.path.isfile(oozie_ext_zip_file):
            raise Fail("Unable to copy {0} because it does not exist".format(
                oozie_ext_zip_file))

        Logger.info("Copying {0} to {1}".format(oozie_ext_zip_file,
                                                params.oozie_libext_dir))
        Execute(("cp", oozie_ext_zip_file, params.oozie_libext_dir), sudo=True)
        Execute(("chown", format("{oozie_user}:{user_group}"),
                 oozie_ext_zip_target_path),
                sudo=True)
        File(oozie_ext_zip_target_path, mode=0644)

        # Redownload jdbc driver to a new current location
        oozie.download_database_library_if_needed()

        # get the upgrade version in the event that it's needed
        upgrade_stack = stack_select._get_upgrade_stack()
        if upgrade_stack is None or len(
                upgrade_stack) < 2 or upgrade_stack[1] is None:
            raise Fail(
                "Unable to determine the stack that is being upgraded to or downgraded to."
            )

        stack_version = upgrade_stack[1]

        # copy the Falcon JAR if needed; falcon has not upgraded yet, so we must
        # use the versioned falcon directory
        if params.has_falcon_host:
            versioned_falcon_jar_directory = "{0}/{1}/falcon/oozie/ext/falcon-oozie-el-extension-*.jar".format(
                params.stack_root, stack_version)
            Logger.info("Copying {0} to {1}".format(
                versioned_falcon_jar_directory, params.oozie_libext_dir))

            Execute(
                format(
                    '{sudo} cp {versioned_falcon_jar_directory} {oozie_libext_dir}'
                ))
            Execute(
                format(
                    '{sudo} chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'
                ))