コード例 #1
0
ファイル: stack_select.py プロジェクト: maduhu/HDP2.5-ambari
def get_hadoop_dir(target, force_latest_on_upgrade=False):
  """
  Return the hadoop shared directory in the following override order
  1. Use default for 2.1 and lower
  2. If 2.2 and higher, use <stack-root>/current/hadoop-client/{target}
  3. If 2.2 and higher AND for an upgrade, use <stack-root>/<version>/hadoop/{target}.
  However, if the upgrade has not yet invoked <stack-selector-tool>, return the current
  version of the component.
  :target: the target directory
  :force_latest_on_upgrade: if True, then this will return the "current" directory
  without the stack version built into the path, such as <stack-root>/current/hadoop-client
  """
  stack_root = Script.get_stack_root()
  stack_version = Script.get_stack_version()

  if not target in HADOOP_DIR_DEFAULTS:
    raise Fail("Target {0} not defined".format(target))

  hadoop_dir = HADOOP_DIR_DEFAULTS[target]

  formatted_stack_version = format_stack_version(stack_version)
  if formatted_stack_version and  check_stack_feature(StackFeature.ROLLING_UPGRADE, formatted_stack_version):
    # home uses a different template
    if target == "home":
      hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, "current", "hadoop-client")
    else:
      hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, "current", "hadoop-client", target)

    # if we are not forcing "current" for HDP 2.2, then attempt to determine
    # if the exact version needs to be returned in the directory
    if not force_latest_on_upgrade:
      stack_info = _get_upgrade_stack()

      if stack_info is not None:
        stack_version = stack_info[1]

        # determine if <stack-selector-tool> has been run and if not, then use the current
        # hdp version until this component is upgraded
        current_stack_version = get_role_component_current_stack_version()
        if current_stack_version is not None and stack_version != current_stack_version:
          stack_version = current_stack_version

        if target == "home":
          # home uses a different template
          hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop")
        else:
          hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop", target)

  return hadoop_dir
コード例 #2
0
def setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file, xa_audit_db_password,
                                ssl_truststore_password, ssl_keystore_password, component_user, component_group, java_home):

  stack_root = Script.get_stack_root()
  service_name = str(service_name).lower()
  cred_lib_path = format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/install/lib/*')
  cred_setup_prefix = (format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/ranger_credential_helper.py'), '-l', cred_lib_path)

  if service_name == 'nifi':
    cred_lib_path = format('{stack_root}/{stack_version}/{service_name}/ext/ranger/install/lib/*')
    cred_setup_prefix = (format('{stack_root}/{stack_version}/{service_name}/ext/ranger/scripts/ranger_credential_helper.py'), '-l', cred_lib_path)

  if audit_db_is_enabled:
    cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'auditDBCred', '-v', PasswordString(xa_audit_db_password), '-c', '1')
    Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True, sudo=True)

  cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'sslKeyStore', '-v', PasswordString(ssl_keystore_password), '-c', '1')
  Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True, sudo=True)

  cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'sslTrustStore', '-v', PasswordString(ssl_truststore_password), '-c', '1')
  Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True, sudo=True)

  File(credential_file,
    owner = component_user,
    group = component_group,
    mode = 0640
  )
コード例 #3
0
ファイル: copy_tarball.py プロジェクト: maduhu/HDP2.5-ambari
def get_tarball_paths(name, use_upgrading_version_during_upgrade=True, custom_source_file=None, custom_dest_file=None):
  """
  For a given tarball name, get the source and destination paths to use.
  :param name: Tarball name
  :param use_upgrading_version_during_upgrade:
  :param custom_source_file: If specified, use this source path instead of the default one from the map.
  :param custom_dest_file: If specified, use this destination path instead of the default one from the map.
  :return: A tuple of (success status, source path, destination path)
  """
  stack_name = Script.get_stack_name()

  if not stack_name:
    Logger.error("Cannot copy {0} tarball to HDFS because stack name could not be determined.".format(str(name)))
    return (False, None, None)

  stack_version = get_current_version(use_upgrading_version_during_upgrade)
  if not stack_version:
    Logger.error("Cannot copy {0} tarball to HDFS because stack version could be be determined.".format(str(name)))
    return (False, None, None)

  stack_root = Script.get_stack_root()
  if not stack_root:
    Logger.error("Cannot copy {0} tarball to HDFS because stack root could be be determined.".format(str(name)))
    return (False, None, None)

  if name is None or name.lower() not in TARBALL_MAP:
    Logger.error("Cannot copy tarball to HDFS because {0} is not supported in stack {1} for this operation.".format(str(name), str(stack_name)))
    return (False, None, None)
  (source_file, dest_file) = TARBALL_MAP[name.lower()]

  if custom_source_file is not None:
    source_file = custom_source_file

  if custom_dest_file is not None:
    dest_file = custom_dest_file

  source_file = source_file.replace(STACK_NAME_PATTERN, stack_name.lower())
  dest_file = dest_file.replace(STACK_NAME_PATTERN, stack_name.lower())

  source_file = source_file.replace(STACK_ROOT_PATTERN, stack_root.lower())
  dest_file = dest_file.replace(STACK_ROOT_PATTERN, stack_root.lower())

  source_file = source_file.replace(STACK_VERSION_PATTERN, stack_version)
  dest_file = dest_file.replace(STACK_VERSION_PATTERN, stack_version)

  return (True, source_file, dest_file)
コード例 #4
0
def setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list):

  stack_root = Script.get_stack_root()
  jar_files = os.listdir(format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/lib'))

  for jar_file in jar_files:
    for component in component_list:
      Execute(('ln','-sf',format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/lib/{jar_file}'),format('{stack_root}/current/{component}/lib/{jar_file}')),
      not_if=format('ls {stack_root}/current/{component}/lib/{jar_file}'),
      only_if=format('ls {stack_root}/{stack_version}/ranger-{service_name}-plugin/lib/{jar_file}'),
      sudo=True)
コード例 #5
0
ファイル: hive.py プロジェクト: OpenPOWER-BigData/HDP-ambari
def hive(name=None):
  import params

  XmlConfig("hive-site.xml",
            conf_dir = params.hive_conf_dir,
            configurations = params.config['configurations']['hive-site'],
            owner=params.hive_user,
            configuration_attributes=params.config['configuration_attributes']['hive-site']
  )

  if name in ["hiveserver2","metastore"]:
    # Manually overriding service logon user & password set by the installation package
    service_name = params.service_map[name]
    ServiceConfig(service_name,
                  action="change_user",
                  username = params.hive_user,
                  password = Script.get_password(params.hive_user))
    Execute(format("cmd /c hadoop fs -mkdir -p {hive_warehouse_dir}"), logoutput=True, user=params.hadoop_user)

  if name == 'metastore':
    if params.init_metastore_schema:
      check_schema_created_cmd = format('cmd /c "{hive_bin}\\hive.cmd --service schematool -info '
                                        '-dbType {hive_metastore_db_type} '
                                        '-userName {hive_metastore_user_name} '
                                        '-passWord {hive_metastore_user_passwd!p}'
                                        '&set EXITCODE=%ERRORLEVEL%&exit /B %EXITCODE%"', #cmd "feature", propagate the process exit code manually
                                        hive_bin=params.hive_bin,
                                        hive_metastore_db_type=params.hive_metastore_db_type,
                                        hive_metastore_user_name=params.hive_metastore_user_name,
                                        hive_metastore_user_passwd=params.hive_metastore_user_passwd)
      try:
        Execute(check_schema_created_cmd)
      except Fail:
        create_schema_cmd = format('cmd /c {hive_bin}\\hive.cmd --service schematool -initSchema '
                                   '-dbType {hive_metastore_db_type} '
                                   '-userName {hive_metastore_user_name} '
                                   '-passWord {hive_metastore_user_passwd!p}',
                                   hive_bin=params.hive_bin,
                                   hive_metastore_db_type=params.hive_metastore_db_type,
                                   hive_metastore_user_name=params.hive_metastore_user_name,
                                   hive_metastore_user_passwd=params.hive_metastore_user_passwd)
        Execute(create_schema_cmd,
                user = params.hive_user,
                logoutput=True
        )

  if name == "hiveserver2":
    if params.hive_execution_engine == "tez":
      # Init the tez app dir in hadoop
      script_file = __file__.replace('/', os.sep)
      cmd_file = os.path.normpath(os.path.join(os.path.dirname(script_file), "..", "files", "hiveTezSetup.cmd"))

      Execute("cmd /c " + cmd_file, logoutput=True, user=params.hadoop_user)
コード例 #6
0
  def pre_rolling_restart(self, env):
    import params
    env.set_params(params)

    # this function should not execute if the version can't be determined or
    # is not at least HDP 2.2.0.0
    if Script.is_hdp_stack_less_than("2.2"):
      return

    Logger.info("Executing Accumulo Client Rolling Upgrade pre-restart")
    conf_select.select(params.stack_name, "accumulo", params.version)
    hdp_select.select("accumulo-client", params.version)
コード例 #7
0
ファイル: conf_select.py プロジェクト: maduhu/HDP2.5-ambari
def get_package_dirs():
  """
  Get package dir mappings
  :return:
  """
  stack_root = Script.get_stack_root()
  package_dirs = copy.deepcopy(_PACKAGE_DIRS)
  for package_name, directories in package_dirs.iteritems():
    for dir in directories:
      current_dir = dir['current_dir']
      current_dir = current_dir.replace(STACK_ROOT_PATTERN, stack_root)
      dir['current_dir'] = current_dir
  return package_dirs
コード例 #8
0
ファイル: storm.py プロジェクト: zouzhberk/ambaridemo
def storm(name=None):
  import params
  yaml_config("storm.yaml",
              conf_dir=params.conf_dir,
              configurations=params.config['configurations']['storm-site'],
              owner=params.storm_user
  )

  if params.service_map.has_key(name):
    service_name = params.service_map[name]
    ServiceConfig(service_name,
                  action="change_user",
                  username = params.storm_user,
                  password = Script.get_password(params.storm_user))
コード例 #9
0
  def _install_lzo_support_if_needed(self, params):
    hadoop_classpath_prefix = self._expand_hadoop_classpath_prefix(params.hadoop_classpath_prefix_template, params.config['configurations']['tez-site'])

    hadoop_lzo_dest_path = extract_path_component(hadoop_classpath_prefix, "hadoop-lzo-")
    if hadoop_lzo_dest_path:
      hadoop_lzo_file = os.path.split(hadoop_lzo_dest_path)[1]

      config = Script.get_config()
      file_url = urlparse.urljoin(config['hostLevelParams']['jdk_location'], hadoop_lzo_file)
      hadoop_lzo_dl_path = os.path.join(config["hostLevelParams"]["agentCacheDir"], hadoop_lzo_file)
      download_file(file_url, hadoop_lzo_dl_path)
      #This is for protection against configuration changes. It will infect every new destination with the lzo jar,
      # but since the classpath points to the jar directly we're getting away with it.
      if not os.path.exists(hadoop_lzo_dest_path):
        copy_file(hadoop_lzo_dl_path, hadoop_lzo_dest_path)
コード例 #10
0
ファイル: utils.py プロジェクト: OpenPOWER-BigData/HDP-ambari
def get_hdfs_binary(distro_component_name):
  """
  Get the hdfs binary to use depending on the stack and version.
  :param distro_component_name: e.g., hadoop-hdfs-namenode, hadoop-hdfs-datanode
  :return: The hdfs binary to use
  """
  import params
  hdfs_binary = "hdfs"
  if params.stack_name == "HDP":
    # This was used in HDP 2.1 and earlier
    hdfs_binary = "hdfs"
    if Script.is_hdp_stack_greater_or_equal("2.2"):
      hdfs_binary = "/usr/hdp/current/{0}/bin/hdfs".format(distro_component_name)

  return hdfs_binary
コード例 #11
0
def get_hadoop_dir(target, force_latest_on_upgrade=False):
  """
  Return the hadoop shared directory in the following override order
  1. Use default for 2.1 and lower
  2. If 2.2 and higher, use /usr/hdp/current/hadoop-client/{target}
  3. If 2.2 and higher AND for an upgrade, use /usr/hdp/<version>/hadoop/{target}.
  However, if the upgrade has not yet invoked hdp-select, return the current
  version of the component.
  :target: the target directory
  :force_latest_on_upgrade: if True, then this will return the "current" directory
  without the HDP version built into the path, such as /usr/hdp/current/hadoop-client
  """

  if not target in HADOOP_DIR_DEFAULTS:
    raise Fail("Target {0} not defined".format(target))

  hadoop_dir = HADOOP_DIR_DEFAULTS[target]

  if Script.is_hdp_stack_greater_or_equal("2.2"):
    # home uses a different template
    if target == "home":
      hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format("current", "hadoop-client")
    else:
      hadoop_dir = HADOOP_DIR_TEMPLATE.format("current", "hadoop-client", target)

    # if we are not forcing "current" for HDP 2.2, then attempt to determine
    # if the exact version needs to be returned in the directory
    if not force_latest_on_upgrade:
      stack_info = _get_upgrade_stack()

      if stack_info is not None:
        stack_version = stack_info[1]

        # determine if hdp-select has been run and if not, then use the current
        # hdp version until this component is upgraded
        current_hdp_version = get_role_component_current_hdp_version()
        if current_hdp_version is not None and stack_version != current_hdp_version:
          stack_version = current_hdp_version

        if target == "home":
          # home uses a different template
          hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_version, "hadoop")
        else:
          hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_version, "hadoop", target)

  return hadoop_dir
コード例 #12
0
ファイル: oozie.py プロジェクト: OpenPOWER-BigData/HDP-ambari
def oozie(is_server=False):
  import params

  from status_params import oozie_server_win_service_name

  XmlConfig("oozie-site.xml",
            conf_dir=params.oozie_conf_dir,
            configurations=params.config['configurations']['oozie-site'],
            owner=params.oozie_user,
            mode='f',
            configuration_attributes=params.config['configuration_attributes']['oozie-site']
  )

  File(os.path.join(params.oozie_conf_dir, "oozie-env.cmd"),
       owner=params.oozie_user,
       content=InlineTemplate(params.oozie_env_cmd_template)
  )

  Directory(params.oozie_tmp_dir,
            owner=params.oozie_user,
            recursive = True,
  )

  if is_server:
    # Manually overriding service logon user & password set by the installation package
    ServiceConfig(oozie_server_win_service_name,
                  action="change_user",
                  username = params.oozie_user,
                  password = Script.get_password(params.oozie_user))

  download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
                      os.path.join(params.oozie_root, "extra_libs", "sqljdbc4.jar")
  )
  webapps_sqljdbc_path = os.path.join(params.oozie_home, "oozie-server", "webapps", "oozie", "WEB-INF", "lib", "sqljdbc4.jar")
  if os.path.isfile(webapps_sqljdbc_path):
    download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
                        webapps_sqljdbc_path
    )
  download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
                      os.path.join(params.oozie_home, "share", "lib", "oozie", "sqljdbc4.jar")
  )
  download_file(os.path.join(params.config['hostLevelParams']['jdk_location'], "sqljdbc4.jar"),
                      os.path.join(params.oozie_home, "temp", "WEB-INF", "lib", "sqljdbc4.jar")
  )
コード例 #13
0
ファイル: stack_select.py プロジェクト: maduhu/HDP2.5-ambari
def get_stack_version_before_install(component_name):
  """
  Works in the similar way to '<stack-selector-tool> status component',
  but also works for not yet installed packages.
  
  Note: won't work if doing initial install.
  """
  stack_root = Script.get_stack_root()
  component_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, "current", component_name)
  stack_selector_name = stack_tools.get_stack_tool_name(stack_tools.STACK_SELECTOR_NAME)
  if os.path.islink(component_dir):
    stack_version = os.path.basename(os.path.dirname(os.readlink(component_dir)))
    match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', stack_version)
    if match is None:
      Logger.info('Failed to get extracted version with {0} in method get_stack_version_before_install'.format(stack_selector_name))
      return None # lazy fail
    return stack_version
  else:
    return None
コード例 #14
0
def _get_single_version_from_hdp_select():
  """
  Call "hdp-select versions" and return the version string if only one version is available.
  :return: Returns a version string if successful, and None otherwise.
  """
  # Ubuntu returns: "stdin: is not a tty", as subprocess output, so must use a temporary file to store the output.
  tmpfile = tempfile.NamedTemporaryFile()
  tmp_dir = Script.get_tmp_dir()
  tmp_file = os.path.join(tmp_dir, "copy_tarball_out.txt")
  hdp_version = None

  out = None
  get_hdp_versions_cmd = "/usr/bin/hdp-select versions > {0}".format(tmp_file)
  try:
    code, stdoutdata = shell.call(get_hdp_versions_cmd, logoutput=True)
    with open(tmp_file, 'r+') as file:
      out = file.read()
  except Exception, e:
    Logger.logger.exception("Could not parse output of {0}. Error: {1}".format(str(tmp_file), str(e)))
コード例 #15
0
ファイル: stack_select.py プロジェクト: maduhu/HDP2.5-ambari
def select_all(version_to_select):
  """
  Executes <stack-selector-tool> on every component for the specified version. If the value passed in is a
  stack version such as "2.3", then this will find the latest installed version which
  could be "2.3.0.0-9999". If a version is specified instead, such as 2.3.0.0-1234, it will use
  that exact version.
  :param version_to_select: the version to <stack-selector-tool> on, such as "2.3" or "2.3.0.0-1234"
  """
  stack_root = Script.get_stack_root()
  (stack_selector_name, stack_selector_path, stack_selector_package) = stack_tools.get_stack_tool(stack_tools.STACK_SELECTOR_NAME)
  # it's an error, but it shouldn't really stop anything from working
  if version_to_select is None:
    Logger.error(format("Unable to execute {stack_selector_name} after installing because there was no version specified"))
    return

  Logger.info("Executing {0} set all on {1}".format(stack_selector_name, version_to_select))

  command = format('{sudo} {stack_selector_path} set all `ambari-python-wrap {stack_selector_path} versions | grep ^{version_to_select} | tail -1`')
  only_if_command = format('ls -d {stack_root}/{version_to_select}*')
  Execute(command, only_if = only_if_command)
コード例 #16
0
  def pre_rolling_restart(self, env):
    import params
    env.set_params(params)

    # this function should not execute if the version can't be determined or
    # is not at least HDP 2.2.0.0
    if Script.is_hdp_stack_less_than("2.2"):
      return

    if self.component not in self.COMPONENT_TO_HDP_SELECT_MAPPING:
      Logger.info("Unable to execute an upgrade for unknown component {0}".format(self.component))
      raise Fail("Unable to execute an upgrade for unknown component {0}".format(self.component))

    hdp_component = self.COMPONENT_TO_HDP_SELECT_MAPPING[self.component]

    Logger.info("Executing Accumulo Rolling Upgrade pre-restart for {0}".format(hdp_component))
    conf_select.select(params.stack_name, "accumulo", params.version)
    hdp_select.select(hdp_component, params.version)

    # some accumulo components depend on the client, so update that too
    hdp_select.select("accumulo-client", params.version)
コード例 #17
0
def get_lzo_packages(stack_version_unformatted):
  lzo_packages = []
  script_instance = Script.get_instance()
  if OSCheck.is_suse_family() and int(OSCheck.get_os_major_version()) >= 12:
    lzo_packages += ["liblzo2-2", "hadoop-lzo-native"]
  elif OSCheck.is_redhat_family() or OSCheck.is_suse_family():
    lzo_packages += ["lzo", "hadoop-lzo-native"]
  elif OSCheck.is_ubuntu_family():
    lzo_packages += ["liblzo2-2"]

  if stack_version_unformatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_unformatted):
    if OSCheck.is_ubuntu_family():
      lzo_packages += [script_instance.format_package_name("hadooplzo-${stack_version}") ,
                       script_instance.format_package_name("hadooplzo-${stack_version}-native")]
    else:
      lzo_packages += [script_instance.format_package_name("hadooplzo_${stack_version}"),
                       script_instance.format_package_name("hadooplzo_${stack_version}-native")]
  else:
    lzo_packages += ["hadoop-lzo"]

  return lzo_packages
コード例 #18
0
def get_hadoop_dir_for_stack_version(target, stack_version):
  """
  Return the hadoop shared directory for the provided stack version. This is necessary
  when folder paths of downgrade-source stack-version are needed after hdp-select. 
  :target: the target directory
  :stack_version: stack version to get hadoop dir for
  """

  if not target in HADOOP_DIR_DEFAULTS:
    raise Fail("Target {0} not defined".format(target))

  hadoop_dir = HADOOP_DIR_DEFAULTS[target]

  formatted_stack_version = format_hdp_stack_version(stack_version)
  if Script.is_hdp_stack_greater_or_equal_to(formatted_stack_version, "2.2"):
    # home uses a different template
    if target == "home":
      hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_version, "hadoop")
    else:
      hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_version, "hadoop", target)

  return hadoop_dir
コード例 #19
0
ファイル: knox.py プロジェクト: andreysabitov/ambari-mantl
def knox():
  import params

  XmlConfig("gateway-site.xml",
            conf_dir=params.knox_conf_dir,
            configurations=params.config['configurations']['gateway-site'],
            configuration_attributes=params.config['configuration_attributes']['gateway-site'],
            owner=params.knox_user
  )

  # Manually overriding service logon user & password set by the installation package
  ServiceConfig(params.knox_gateway_win_service_name,
                action="change_user",
                username = params.knox_user,
                password = Script.get_password(params.knox_user))

  File(os.path.join(params.knox_conf_dir, "gateway-log4j.properties"),
       owner=params.knox_user,
       content=params.gateway_log4j
  )

  File(os.path.join(params.knox_conf_dir, "topologies", "default.xml"),
       group=params.knox_group,
       owner=params.knox_user,
       content=InlineTemplate(params.topology_template)
  )

  if params.security_enabled:
    TemplateConfig( os.path.join(params.knox_conf_dir, "krb5JAASLogin.conf"),
        owner = params.knox_user,
        template_tag = None
    )

  if not os.path.isfile(params.knox_master_secret_path):
    cmd = format('cmd /C {knox_client_bin} create-master --master {knox_master_secret!p}')
    Execute(cmd)
    cmd = format('cmd /C {knox_client_bin} create-cert --hostname {knox_host_name_in_cluster}')
    Execute(cmd)
コード例 #20
0
ファイル: stack_select.py プロジェクト: maduhu/HDP2.5-ambari
def get_hadoop_dir_for_stack_version(target, stack_version):
  """
  Return the hadoop shared directory for the provided stack version. This is necessary
  when folder paths of downgrade-source stack-version are needed after <stack-selector-tool>.
  :target: the target directory
  :stack_version: stack version to get hadoop dir for
  """

  stack_root = Script.get_stack_root()
  if not target in HADOOP_DIR_DEFAULTS:
    raise Fail("Target {0} not defined".format(target))

  hadoop_dir = HADOOP_DIR_DEFAULTS[target]

  formatted_stack_version = format_stack_version(stack_version)
  if formatted_stack_version and  check_stack_feature(StackFeature.ROLLING_UPGRADE, formatted_stack_version):
    # home uses a different template
    if target == "home":
      hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop")
    else:
      hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_root, stack_version, "hadoop", target)

  return hadoop_dir
コード例 #21
0
ファイル: params_linux.py プロジェクト: gauravn7/ambari
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import get_port_from_url
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.script.script import Script

from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages

from urlparse import urlparse

import status_params
import os

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

hostname = config["hostname"]

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)
stack_name = default("/hostLevelParams/stack_name", None)
upgrade_direction = default("/commandParams/upgrade_direction", None)
agent_stack_retry_on_unavailability = cbool(
    default("/hostLevelParams/agent_stack_retry_on_unavailability", None))
agent_stack_retry_count = cint(
    default("/hostLevelParams/agent_stack_retry_count", None))

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
コード例 #22
0
def _prepare_mapreduce_tarball():
    """
  Prepares the mapreduce tarball by including the native LZO libraries if necessary. If LZO is
  not enabled or has not been opted-in, then this will do nothing and return the original
  tarball to upload to HDFS.
  :return:  the full path of the newly created mapreduce tarball to use or the original path
  if no changes were made
  """
    # get the mapreduce tarball to crack open and add LZO libraries to
    _, mapreduce_source_file, _, _ = get_tarball_paths("mapreduce")

    if not lzo_utils.should_install_lzo():
        return mapreduce_source_file

    Logger.info("Preparing the mapreduce tarball with native LZO libraries...")

    temp_dir = Script.get_tmp_dir()

    # create the temp staging directories ensuring that non-root agents using tarfile can work with them
    mapreduce_temp_dir = tempfile.mkdtemp(prefix="mapreduce-tarball-",
                                          dir=temp_dir)
    sudo.chmod(mapreduce_temp_dir, 0777)

    # calculate the source directory for LZO
    hadoop_lib_native_source_dir = os.path.join(
        os.path.dirname(mapreduce_source_file), "lib", "native")
    if not sudo.path_exists(hadoop_lib_native_source_dir):
        raise Fail(
            "Unable to seed the mapreduce tarball with native LZO libraries since the source Hadoop native lib directory {0} does not exist"
            .format(hadoop_lib_native_source_dir))

    Logger.info("Extracting {0} to {1}".format(mapreduce_source_file,
                                               mapreduce_temp_dir))
    tar_archive.untar_archive(mapreduce_source_file, mapreduce_temp_dir)

    mapreduce_lib_dir = os.path.join(mapreduce_temp_dir, "hadoop", "lib")

    # copy native libraries from source hadoop to target
    Execute(("cp", "-af", hadoop_lib_native_source_dir, mapreduce_lib_dir),
            sudo=True)

    # ensure that the hadoop/lib/native directory is readable by non-root (which it typically is not)
    Directory(mapreduce_lib_dir,
              mode=0755,
              cd_access='a',
              recursive_ownership=True)

    # create the staging directory so that non-root agents can write to it
    mapreduce_native_tarball_staging_dir = os.path.join(
        temp_dir, "mapreduce-native-tarball-staging")
    if not os.path.exists(mapreduce_native_tarball_staging_dir):
        Directory(mapreduce_native_tarball_staging_dir,
                  mode=0777,
                  cd_access='a',
                  create_parents=True,
                  recursive_ownership=True)

    mapreduce_tarball_with_native_lib = os.path.join(
        mapreduce_native_tarball_staging_dir, "mapreduce-native.tar.gz")
    Logger.info("Creating a new mapreduce tarball at {0}".format(
        mapreduce_tarball_with_native_lib))
    tar_archive.archive_dir_via_temp_file(mapreduce_tarball_with_native_lib,
                                          mapreduce_temp_dir)

    # ensure that the tarball can be read and uploaded
    sudo.chmod(mapreduce_tarball_with_native_lib, 0744)

    # cleanup
    sudo.rmtree(mapreduce_temp_dir)

    return mapreduce_tarball_with_native_lib
コード例 #23
0
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources

from resource_management.libraries.script.script import Script

# a map of the Ambari role to the component name
# for use with <stack-root>/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
  'SPARK2_JOBHISTORYSERVER' : 'spark2-historyserver',
  'SPARK2_CLIENT' : 'spark2-client',
  'SPARK2_THRIFTSERVER' : 'spark2-thriftserver'
}

component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "SPARK2_CLIENT")

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

stack_name = status_params.stack_name
stack_root = Script.get_stack_root()
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)

# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
version = default("/commandParams/version", None)

spark_conf = '/etc/spark2/conf'
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
コード例 #24
0
def get_packages(scope, service_name=None, component_name=None):
    """
  Gets the packages which should be used with the stack's stack-select tool for the
  specified service/component. Not all services/components are used with the stack-select tools,
  so those will return no packages.

  :param scope: the scope of the command
  :param service_name:  the service name, such as ZOOKEEPER
  :param component_name: the component name, such as ZOOKEEPER_SERVER
  :return:  the packages to use with stack-select or None
  """
    from resource_management.libraries.functions.default import default

    if scope not in _PACKAGE_SCOPES:
        raise Fail("The specified scope of {0} is not valid".format(scope))

    config = Script.get_config()

    if service_name is None or component_name is None:
        if 'role' not in config or 'serviceName' not in config:
            raise Fail(
                "Both the role and the service name must be included in the command in order to determine which packages to use with the stack-select tool"
            )

        service_name = config['serviceName']
        component_name = config['role']

    stack_name = default("/clusterLevelParams/stack_name", None)
    if stack_name is None:
        raise Fail(
            "The stack name is not present in the command. Packages for stack-select tool cannot be loaded."
        )

    stack_packages_config = default(
        "/configurations/cluster-env/stack_packages", None)
    if stack_packages_config is None:
        raise Fail(
            "The stack packages are not defined on the command. Unable to load packages for the stack-select tool"
        )

    data = json.loads(stack_packages_config)

    if stack_name not in data:
        raise Fail(
            "Cannot find stack-select packages for the {0} stack".format(
                stack_name))

    stack_select_key = "stack-select"
    data = data[stack_name]
    if stack_select_key not in data:
        raise Fail(
            "There are no stack-select packages defined for this command for the {0} stack"
            .format(stack_name))

    # this should now be the dictionary of role name to package name
    data = data[stack_select_key]
    service_name = service_name.upper()
    component_name = component_name.upper()

    if service_name not in data:
        Logger.info(
            "Skipping stack-select on {0} because it does not exist in the stack-select package structure."
            .format(service_name))
        return None

    data = data[service_name]

    if component_name not in data:
        Logger.info(
            "Skipping stack-select on {0} because it does not exist in the stack-select package structure."
            .format(component_name))
        return None

    # this one scope is not an array, so transform it into one for now so we can
    # use the same code below
    packages = data[component_name][scope]
    if scope == PACKAGE_SCOPE_STACK_SELECT:
        packages = [packages]

    # grab the package name from the JSON and validate it against the packages
    # that the stack-select tool supports - if it doesn't support it, then try to find the legacy
    # package name if it exists
    supported_packages = get_supported_packages()
    for index, package in enumerate(packages):
        if not is_package_supported(package,
                                    supported_packages=supported_packages):
            if _PACKAGE_SCOPE_LEGACY in data[component_name]:
                legacy_package = data[component_name][_PACKAGE_SCOPE_LEGACY]
                Logger.info(
                    "The package {0} is not supported by this version of the stack-select tool, defaulting to the legacy package of {1}"
                    .format(package, legacy_package))

                # use the legacy package
                packages[index] = legacy_package
            else:
                raise Fail(
                    "The package {0} is not supported by this version of the stack-select tool."
                    .format(package))

    # transform the array bcak to a single element
    if scope == PACKAGE_SCOPE_STACK_SELECT:
        packages = packages[0]

    return packages
コード例 #25
0
ファイル: params.py プロジェクト: theking961/dfhz_hue_mpack
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.get_stack_version import get_stack_version
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.script.script import Script
import status_params
import functools
# a map of the Ambari role to the component name
# for use with <stack-root>/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
  'HUE_SERVER' : 'hue-server',
}

component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "HUE_SERVER")
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_root = Script.get_stack_root()
# Hue 4.x download url
hue_version = '4.6.0'
download_url = 'echo https://cdn.gethue.com/downloads/hue-4.6.0.tgz'
# can clean this one up using hue_version where this var is used
hue_version_dir = 'hue-4.6.0'
# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)
stack_name = default("/hostLevelParams/stack_name", None)
#e.g. /var/lib/ambari-agent/cache/stacks/HDP/$VERSION/services/HUE/package
service_packagedir = os.path.realpath(__file__).split('/scripts')[0]
cluster_name = str(config['clusterName'])
ambari_server_hostname = config['ambariLevelParams']['ambari_server_host']
コード例 #26
0
from resource_management.libraries.functions import format
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script

# a map of the Ambari role to the component name
# for use with /usr/hdp/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
    'HIVE_METASTORE': 'hive-metastore',
    'HIVE_SERVER': 'hive-server2',
    'WEBHCAT_SERVER': 'hive-webhcat',
    'HIVE_CLIENT': 'hive-client',
    'HCAT': 'hive-client'
}

component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP,
                                                     "HIVE_CLIENT")

config = Script.get_config()

if OSCheck.is_windows_family():
    hive_metastore_win_service_name = "metastore"
    hive_client_win_service_name = "hwi"
    hive_server_win_service_name = "hiveserver2"
    webhcat_server_win_service_name = "templeton"
else:
    hive_pid_dir = config['configurations']['hive-env']['hive_pid_dir']
    hive_pid = 'hive-server.pid'

    hive_metastore_pid = 'hive.pid'

    hcat_pid_dir = config['configurations']['hive-env'][
コード例 #27
0
ファイル: params.py プロジェクト: elal/ambari
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import format
from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_directory import HdfsDirectory

# a map of the Ambari role to the component name
# for use with /usr/hdp/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
    'SPARK_JOBHISTORYSERVER': 'spark-historyserver',
    'SPARK_CLIENT': 'spark-client'
}

component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP,
                                                     "SPARK_CLIENT")

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

stack_name = default("/hostLevelParams/stack_name", None)
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)

# TODO! FIXME! Version check is not working as of today :
#   $ yum list installed | grep hdp-select
#   hdp-select.noarch                            2.2.1.0-2340.el6           @HDP-2.2
コード例 #28
0
ファイル: params.py プロジェクト: nishantmonu51/ambari-1
import os

from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import default
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions import format
from resource_management.libraries.functions.version import format_stack_version, compare_versions
from ambari_commons.os_check import OSCheck
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.resources.hdfs_resource import HdfsResource

config = Script.get_config()

host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)

stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)

dfs_type = default("/commandParams/dfs_type", "")
hadoop_conf_dir = "/etc/hadoop/conf"

component_list = default("/localComponents", [])

hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']

# hadoop default params
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
コード例 #29
0
ファイル: params.py プロジェクト: andreysabitov/ambari-mantl
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

"""

from ambari_commons.constants import AMBARI_SUDO_BINARY
from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
from resource_management.core.system import System
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import default, format

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

#users and groups
hbase_user = config['configurations']['hbase-env']['hbase_user']
smoke_user =  config['configurations']['cluster-env']['smokeuser']
gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
gmond_user = config['configurations']['ganglia-env']["gmond_user"]
tez_user = config['configurations']['tez-env']["tez_user"]

user_group = config['configurations']['cluster-env']['user_group']
proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
コード例 #30
0
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

"""
import os
from resource_management.libraries.functions.constants import Direction
from resource_management.libraries.functions import default
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions import format
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature

# server configurations
config = Script.get_config()
stack_root = Script.get_stack_root()

# upgrade params
stack_name = default("/hostLevelParams/stack_name", None)
upgrade_direction = default("/commandParams/upgrade_direction", Direction.UPGRADE)
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)

flume_conf_dir = '/etc/flume/conf'
if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted):
  flume_conf_dir = format('{stack_root}/current/flume-server/conf')

flume_user = '******'
flume_group = 'flume'
if 'flume-env' in config['configurations'] and 'flume_user' in config['configurations']['flume-env']:
コード例 #31
0
ファイル: status_params.py プロジェクト: csivaguru/ambari
    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

"""

from resource_management.libraries.functions import format
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.default import default

config = Script.get_config()

spark_user = config['configurations']['spark-env']['spark_user']
spark_group = config['configurations']['spark-env']['spark_group']
user_group = config['configurations']['cluster-env']['user_group']

if 'hive-env' in config['configurations']:
    hive_user = config['configurations']['hive-env']['hive_user']
else:
    hive_user = "******"

spark_pid_dir = config['configurations']['spark-env']['spark_pid_dir']
spark_history_server_pid_file = format(
    "{spark_pid_dir}/spark-{spark_user}-org.apache.spark.deploy.history.HistoryServer-1.pid"
)
spark_thrift_server_pid_file = format(
コード例 #32
0
    def actionexecute(self, env):
        num_errors = 0

        # Parse parameters
        config = Script.get_config()

        try:
            command_repository = CommandRepository(config['repositoryFile'])
        except KeyError:
            raise Fail(
                "The command repository indicated by 'repositoryFile' was not found"
            )

        repo_rhel_suse = config['configurations']['cluster-env'][
            'repo_suse_rhel_template']
        repo_ubuntu = config['configurations']['cluster-env'][
            'repo_ubuntu_template']
        template = repo_rhel_suse if OSCheck.is_redhat_family(
        ) or OSCheck.is_suse_family() else repo_ubuntu

        # Handle a SIGTERM and SIGINT gracefully
        signal.signal(signal.SIGTERM, self.abort_handler)
        signal.signal(signal.SIGINT, self.abort_handler)

        self.repository_version = command_repository.version_string

        # Select dict that contains parameters
        try:
            package_list = json.loads(config['roleParams']['package_list'])
            stack_id = config['roleParams']['stack_id']
        except KeyError:
            pass

        self.stack_name = Script.get_stack_name()
        if self.stack_name is None:
            raise Fail("Cannot determine the stack name")

        self.stack_root_folder = Script.get_stack_root()
        if self.stack_root_folder is None:
            raise Fail("Cannot determine the stack's root directory")

        if self.repository_version is None:
            raise Fail("Cannot determine the repository version to install")

        self.repository_version = self.repository_version.strip()

        try:
            if not command_repository.items:
                Logger.warning(
                    "Repository list is empty. Ambari may not be managing the repositories for {0}."
                    .format(self.repository_version))
            else:
                Logger.info(
                    "Will install packages for repository version {0}".format(
                        self.repository_version))
                new_repo_files = create_repo_files(template,
                                                   command_repository)
                self.repo_files.update(new_repo_files)
        except Exception as err:
            Logger.logger.exception(
                "Cannot install repository files. Error: {0}".format(str(err)))
            num_errors += 1

        # Build structured output with initial values
        self.structured_output = {
            'package_installation_result': 'FAIL',
            'repository_version_id': command_repository.version_id
        }

        self.put_structured_out(self.structured_output)

        try:
            # check package manager non-completed transactions
            if self.pkg_provider.check_uncompleted_transactions():
                self.pkg_provider.print_uncompleted_transaction_hint()
                num_errors += 1
        except Exception as e:  # we need to ignore any exception
            Logger.warning(
                "Failed to check for uncompleted package manager transactions: "
                + str(e))

        if num_errors > 0:
            raise Fail("Failed to distribute repositories/install packages")

        # Initial list of versions, used to compute the new version installed
        self.old_versions = get_stack_versions(self.stack_root_folder)

        try:
            is_package_install_successful = False
            ret_code = self.install_packages(package_list)
            if ret_code == 0:
                self.structured_output[
                    'package_installation_result'] = 'SUCCESS'
                self.put_structured_out(self.structured_output)
                is_package_install_successful = True
            else:
                num_errors += 1
        except Exception as err:
            num_errors += 1
            Logger.logger.exception(
                "Could not install packages. Error: {0}".format(str(err)))

        # Provide correct exit code
        if num_errors > 0:
            raise Fail("Failed to distribute repositories/install packages")

        # if installing a version of HDP that needs some symlink love, then create them
        if is_package_install_successful and 'actual_version' in self.structured_output:
            self._relink_configurations_with_conf_select(
                stack_id, self.structured_output['actual_version'])
コード例 #33
0
  def rebalancehdfs(self, env):
    from ambari_commons.os_windows import UserHelper, run_os_command_impersonated
    import params
    env.set_params(params)

    hdfs_username, hdfs_domain = UserHelper.parse_user_name(params.hdfs_user, ".")

    name_node_parameters = json.loads( params.name_node_params )
    threshold = name_node_parameters['threshold']
    _print("Starting balancer with threshold = %s\n" % threshold)

    def calculateCompletePercent(first, current):
      return 1.0 - current.bytesLeftToMove/first.bytesLeftToMove

    def startRebalancingProcess(threshold):
      rebalanceCommand = 'hdfs balancer -threshold %s' % threshold
      return ['cmd', '/C', rebalanceCommand]

    command = startRebalancingProcess(threshold)
    basedir = os.path.join(env.config.basedir, 'scripts')

    _print("Executing command %s\n" % command)

    parser = hdfs_rebalance.HdfsParser()
    returncode, stdout, err = run_os_command_impersonated(' '.join(command), hdfs_username, Script.get_password(params.hdfs_user), hdfs_domain)

    for line in stdout.split('\n'):
      _print('[balancer] %s %s' % (str(datetime.now()), line ))
      pl = parser.parseLine(line)
      if pl:
        res = pl.toJson()
        res['completePercent'] = calculateCompletePercent(parser.initialLine, pl)

        self.put_structured_out(res)
      elif parser.state == 'PROCESS_FINISED' :
        _print('[balancer] %s %s' % (str(datetime.now()), 'Process is finished' ))
        self.put_structured_out({'completePercent' : 1})
        break

    if returncode != None and returncode != 0:
      raise Fail('Hdfs rebalance process exited with error. See the log output')
コード例 #34
0
    def _llap_start(self, env, cleanup=False):
      import params
      env.set_params(params)

      if params.hive_server_interactive_ha:
        """
        Check llap app state
        """
        Logger.info("HSI HA is enabled. Checking if LLAP is already running ...")
        if params.stack_supports_hive_interactive_ga:
          status = self.check_llap_app_status_in_llap_ga(params.llap_app_name, 2, params.hive_server_interactive_ha)
        else:
          status = self.check_llap_app_status_in_llap_tp(params.llap_app_name, 2, params.hive_server_interactive_ha)

        if status:
          Logger.info("LLAP app '{0}' is already running.".format(params.llap_app_name))
          return True
        else:
          Logger.info("LLAP app '{0}' is not running. llap will be started.".format(params.llap_app_name))
        pass

      # Call for cleaning up the earlier run(s) LLAP package folders.
      self._cleanup_past_llap_package_dirs()

      Logger.info("Starting LLAP")
      LLAP_PACKAGE_CREATION_PATH = Script.get_tmp_dir()

      unique_name = "llap-slider%s" % datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')

      cmd = format("{stack_root}/current/hive-server2-hive2/bin/hive --service llap --slider-am-container-mb {params.slider_am_container_mb} "
                   "--size {params.llap_daemon_container_size}m --cache {params.hive_llap_io_mem_size}m --xmx {params.llap_heap_size}m "
                   "--loglevel {params.llap_log_level} {params.llap_extra_slider_opts} --output {LLAP_PACKAGE_CREATION_PATH}/{unique_name}")

      # Append params that are supported from Hive llap GA version.
      if params.stack_supports_hive_interactive_ga:
        # Figure out the Slider Anti-affinity to be used.
        # YARN does not support anti-affinity, and therefore Slider implements AA by the means of exclusion lists, i.e, it
        # starts containers one by one and excludes the nodes it gets (adding a delay of ~2sec./machine). When the LLAP
        # container memory size configuration is more than half of YARN node memory, AA is implicit and should be avoided.
        slider_placement = 4
        if long(params.llap_daemon_container_size) > (0.5 * long(params.yarn_nm_mem)):
          slider_placement = 0
          Logger.info("Setting slider_placement : 0, as llap_daemon_container_size : {0} > 0.5 * "
                      "YARN NodeManager Memory({1})".format(params.llap_daemon_container_size, params.yarn_nm_mem))
        else:
          Logger.info("Setting slider_placement: 4, as llap_daemon_container_size : {0} <= 0.5 * "
                     "YARN NodeManager Memory({1})".format(params.llap_daemon_container_size, params.yarn_nm_mem))
        cmd += format(" --slider-placement {slider_placement} --skiphadoopversion --skiphbasecp --instances {params.num_llap_daemon_running_nodes}")

        # Setup the logger for the ga version only
        cmd += format(" --logger {params.llap_logger}")
      else:
        cmd += format(" --instances {params.num_llap_nodes}")
      if params.security_enabled:
        llap_keytab_splits = params.hive_llap_keytab_file.split("/")
        Logger.debug("llap_keytab_splits : {0}".format(llap_keytab_splits))
        cmd += format(" --slider-keytab-dir .slider/keytabs/{params.hive_user}/ --slider-keytab "
                      "{llap_keytab_splits[4]} --slider-principal {params.hive_llap_principal}")

      # Add the aux jars if they are specified. If empty, dont need to add this param.
      if params.hive_aux_jars:
        cmd+= format(" --auxjars {params.hive_aux_jars}")

      # Append args.
      llap_java_args = InlineTemplate(params.llap_app_java_opts).get_content()
      cmd += format(" --args \" {llap_java_args}\"")
      # Append metaspace size to args.
      if params.java_version > 7 and params.llap_daemon_container_size > 4096:
        if params.llap_daemon_container_size <= 32768:
          metaspaceSize = "256m"
        else:
          metaspaceSize = "1024m"
        cmd = cmd[:-1] + " -XX:MetaspaceSize="+metaspaceSize+ "\""

      run_file_path = None
      try:
        Logger.info(format("LLAP start command: {cmd}"))
        code, output, error = shell.checked_call(cmd, user=params.hive_user, quiet = True, stderr=subprocess.PIPE, logoutput=True)

        if code != 0 or output is None:
          raise Fail("Command failed with either non-zero return code or no output.")

        # E.g., output:
        # Prepared llap-slider-05Apr2016/run.sh for running LLAP on Slider
        exp = r"Prepared (.*?run.sh) for running LLAP"
        run_file_path = None
        out_splits = output.split("\n")
        for line in out_splits:
          line = line.strip()
          m = re.match(exp, line, re.I)
          if m and len(m.groups()) == 1:
            run_file_name = m.group(1)
            run_file_path = os.path.join(params.hive_user_home_dir, run_file_name)
            break
        if not run_file_path:
          raise Fail("Did not find run.sh file in output: " + str(output))

        Logger.info(format("Run file path: {run_file_path}"))
        Execute(run_file_path, user=params.hive_user, logoutput=True)
        Logger.info("Submitted LLAP app name : {0}".format(params.llap_app_name))

        # We need to check the status of LLAP app to figure out it got
        # launched properly and is in running state. Then go ahead with Hive Interactive Server start.
        if params.stack_supports_hive_interactive_ga:
          status = self.check_llap_app_status_in_llap_ga(params.llap_app_name, params.num_retries_for_checking_llap_status)
        else:
          status = self.check_llap_app_status_in_llap_tp(params.llap_app_name, params.num_retries_for_checking_llap_status)
        if status:
          Logger.info("LLAP app '{0}' deployed successfully.".format(params.llap_app_name))
          return True
        else:
          Logger.error("LLAP app '{0}' deployment unsuccessful.".format(params.llap_app_name))
          return False
      except:
        # Attempt to clean up the packaged application, or potentially rename it with a .bak
        if run_file_path is not None and cleanup:
          parent_dir = os.path.dirname(run_file_path)
          Directory(parent_dir,
                    action = "delete",
                    ignore_failures = True,
          )

        # throw the original exception
        raise
コード例 #35
0
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.core import shell
from resource_management.core.shell import as_user, as_sudo
from resource_management.core.source import Template
from resource_management.core.exceptions import ComponentIsNotRunning
from resource_management.core.logger import Logger
from resource_management.libraries.functions.curl_krb_request import curl_krb_request
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.namenode_ha_utils import get_namenode_states
from resource_management.libraries.functions.show_logs import show_logs
from ambari_commons.inet_utils import ensure_ssl_using_protocol
from zkfc_slave import ZkfcSlaveDefault

ensure_ssl_using_protocol(
  Script.get_force_https_protocol_name(),
  Script.get_ca_cert_file_path()
)

def safe_zkfc_op(action, env):
  """
  Idempotent operation on the zkfc process to either start or stop it.
  :param action: start or stop
  :param env: environment
  """
  Logger.info("Performing action {0} on zkfc.".format(action))
  zkfc = None
  if action == "start":
    try:
      ZkfcSlaveDefault.status_static(env)
    except ComponentIsNotRunning:
コード例 #36
0
#!/usr/bin/env python
from resource_management import *
from resource_management.libraries.script.script import Script
import sys, os, glob
from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.default import default



# server configurations
config = Script.get_config()



# # params from flink-ambari-config
# flink_install_dir = config['configurations']['flink-ambari-config']['flink_install_dir']
# flink_numcontainers = config['configurations']['flink-ambari-config']['flink_numcontainers']
# flink_jobmanager_memory = config['configurations']['flink-ambari-config']['flink_jobmanager_memory']
# flink_container_memory = config['configurations']['flink-ambari-config']['flink_container_memory']
# setup_prebuilt = config['configurations']['flink-ambari-config']['setup_prebuilt']
# flink_appname = config['configurations']['flink-ambari-config']['flink_appname']
# flink_queue = config['configurations']['flink-ambari-config']['flink_queue']
# flink_streaming = config['configurations']['flink-ambari-config']['flink_streaming']
#
# hadoop_conf_dir = config['configurations']['flink-ambari-config']['hadoop_conf_dir']
# flink_download_url = config['configurations']['flink-ambari-config']['flink_download_url']
#
#
# conf_dir=''
# bin_dir=''
コード例 #37
0
ファイル: status_params.py プロジェクト: rajkrrsingh/ambari
# a map of the Ambari role to the component name
# for use with <stack-root>/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
  'HIVE_METASTORE' : 'hive-metastore',
  'HIVE_SERVER' : 'hive-server2',
  'WEBHCAT_SERVER' : 'hive-webhcat',
  'HIVE_CLIENT' : 'hive-client',
  'HCAT' : 'hive-client',
  'HIVE_SERVER_INTERACTIVE' : 'hive-server2-hive2'
}


# Either HIVE_METASTORE, HIVE_SERVER, WEBHCAT_SERVER, HIVE_CLIENT, HCAT, HIVE_SERVER_INTERACTIVE
role = default("/role", None)
component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "HIVE_CLIENT")
component_directory_interactive = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "HIVE_SERVER_INTERACTIVE")

config = Script.get_config()

stack_root = Script.get_stack_root()
stack_version_unformatted = config['clusterLevelParams']['stack_version']
stack_version_formatted_major = format_stack_version(stack_version_unformatted)

if OSCheck.is_windows_family():
  hive_metastore_win_service_name = "metastore"
  hive_client_win_service_name = "hwi"
  hive_server_win_service_name = "hiveserver2"
  webhcat_server_win_service_name = "templeton"
else:
  hive_pid_dir = config['configurations']['hive-env']['hive_pid_dir']
コード例 #38
0
See the License for the specific language governing permissions and
limitations under the License.
"""

import os
import socket
import time
import logging
import traceback
from resource_management.libraries.functions import format
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import get_kinit_path
from resource_management.core.resources import Execute
from resource_management.core import global_lock

stack_root = Script.get_stack_root()

OK_MESSAGE = "TCP OK - {0:.3f}s response on port {1}"
CRITICAL_MESSAGE = "Connection failed on host {0}:{1} ({2})"

HIVE_SERVER_THRIFT_PORT_KEY = '{{spark2-hive-site-override/hive.server2.thrift.port}}'
HIVE_SERVER_THRIFT_HTTP_PORT_KEY = '{{spark2-hive-site-override/hive.server2.thrift.http.port}}'
HIVE_SERVER_TRANSPORT_MODE_KEY = '{{spark2-hive-site-override/hive.server2.transport.mode}}'
SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'

HIVE_SERVER2_AUTHENTICATION_KEY = '{{hive-site/hive.server2.authentication}}'
HIVE_SERVER2_KERBEROS_KEYTAB = '{{spark2-hive-site-override/hive.server2.authentication.kerberos.keytab}}'
HIVE_SERVER2_PRINCIPAL_KEY = '{{spark2-hive-site-override/hive.server2.authentication.kerberos.principal}}'

# The configured Kerberos executable search paths, if any
KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
コード例 #39
0
class TestMetadataServer(RMFTestCase):
  COMMON_SERVICES_PACKAGE_DIR = "ATLAS/0.1.0.2.3/package"
  STACK_VERSION = "2.3"
  stack_root = Script.get_stack_root()
  conf_dir = stack_root + "/current/atlas-server/conf"
  def configureResourcesCalled(self):
      # Both server and client
      self.assertResourceCalled('Directory', self.conf_dir,
                                owner='atlas',
                                group='hadoop',
                                create_parents = True,
                                cd_access='a',
                                mode=0755)

      # Pid dir
      self.assertResourceCalled('Directory', '/var/run/atlas',
                                owner='atlas',
                                group='hadoop',
                                create_parents = True,
                                cd_access='a',
                                mode=0755
      )
      self.assertResourceCalled('Directory', self.conf_dir + "/solr",
                                owner='atlas',
                                group='hadoop',
                                create_parents = True,
                                cd_access='a',
                                mode=0755,
                                recursive_ownership = True
      )
      # Log dir
      self.assertResourceCalled('Directory', '/var/log/atlas',
                                owner='atlas',
                                group='hadoop',
                                create_parents = True,
                                cd_access='a',
                                mode=0755
      )
      # Data dir
      self.assertResourceCalled('Directory', self.stack_root+"/current/atlas-server/data",
                                owner='atlas',
                                group='hadoop',
                                create_parents = True,
                                cd_access='a',
                                mode=0644
      )
      # Expanded war dir
      self.assertResourceCalled('Directory', self.stack_root+'/current/atlas-server/server/webapp',
                                owner='atlas',
                                group='hadoop',
                                create_parents = True,
                                cd_access='a',
                                mode=0644
      )
      self.assertResourceCalled('Execute', ('cp', '/usr/hdp/current/atlas-server/server/webapp/atlas.war', '/usr/hdp/current/atlas-server/server/webapp/atlas.war'),
                                sudo = True,
                                not_if = True,
      )
      host_name = u"c6401.ambari.apache.org"
      app_props =  dict(self.getConfig()['configurations']['application-properties'])
      app_props['atlas.server.bind.address'] = host_name

      metadata_protocol = "https" if app_props["atlas.enableTLS"] is True else "http"
      metadata_port = app_props["atlas.server.https.port"] if metadata_protocol == "https" else app_props["atlas.server.http.port"]
      app_props["atlas.rest.address"] = u'%s://%s:%s' % (metadata_protocol, host_name, metadata_port)
      app_props["atlas.server.ids"] = "id1"
      app_props["atlas.server.address.id1"] = u"%s:%s" % (host_name, metadata_port)
      app_props["atlas.server.ha.enabled"] = "false"

      self.assertResourceCalled('File', str(self.conf_dir + "/atlas-log4j.xml"),
                          content=InlineTemplate(
                            self.getConfig()['configurations'][
                              'atlas-log4j']['content']),
                          owner='atlas',
                          group='hadoop',
                          mode=0644,
      )
      self.assertResourceCalled('File', str(self.conf_dir + "/atlas-env.sh"),
                                content=InlineTemplate(
                                    self.getConfig()['configurations'][
                                        'atlas-env']['content']),
                                owner='atlas',
                                group='hadoop',
                                mode=0755,
      )
      self.assertResourceCalled('File', str(self.conf_dir + "/solr/solrconfig.xml"),
                                content=InlineTemplate(
                                    self.getConfig()['configurations'][
                                      'atlas-solrconfig']['content']),
                                owner='atlas',
                                group='hadoop',
                                mode=0644,
      )
      # application.properties file
      self.assertResourceCalled('PropertiesFile',str(self.conf_dir + "/application.properties"),
                                properties=app_props,
                                owner=u'atlas',
                                group=u'hadoop',
                                mode=0600,
      )
      self.assertResourceCalled('Directory', '/var/log/ambari-infra-solr-client',
                                create_parents = True,
                                cd_access='a',
                                mode=0755
      )
      self.assertResourceCalled('Directory', '/usr/lib/ambari-infra-solr-client',
                                create_parents = True,
                                recursive_ownership = True,
                                cd_access='a',
                                mode=0755
      )
      self.assertResourceCalled('File', '/usr/lib/ambari-infra-solr-client/solrCloudCli.sh',
                                content=StaticFile('/usr/lib/ambari-infra-solr-client/solrCloudCli.sh'),
                                mode=0755,
                                )
      self.assertResourceCalled('File', '/usr/lib/ambari-infra-solr-client/log4j.properties',
                                content=self.getConfig()['configurations']['infra-solr-client-log4j']['content'],
                                mode=0644,
      )
      self.assertResourceCalled('File', '/var/log/ambari-infra-solr-client/solr-client.log',
                                mode=0664,
                                content=''
      )
      self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --check-znode --retry 5 --interval 10')
      self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --download-config --config-dir /tmp/solr_config_atlas_configs_0.[0-9]* --config-set atlas_configs --retry 30 --interval 5')
      self.assertResourceCalledRegexp('^File$', '^/tmp/solr_config_atlas_configs_0.[0-9]*',
                                      content=InlineTemplate(self.getConfig()['configurations']['atlas-solrconfig']['content']),
                                      only_if='test -d /tmp/solr_config_atlas_configs_0.[0-9]*')
      self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --upload-config --config-dir /tmp/solr_config_atlas_configs_0.[0-9]* --config-set atlas_configs --retry 30 --interval 5',
                                      only_if='test -d /tmp/solr_config_atlas_configs_0.[0-9]*')
      self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --upload-config --config-dir {0}/solr --config-set atlas_configs --retry 30 --interval 5'.format(self.conf_dir),
                                      not_if='test -d /tmp/solr_config_atlas_configs_0.[0-9]*')
      self.assertResourceCalledRegexp('^Directory$', '^/tmp/solr_config_atlas_configs_0.[0-9]*',
                                      action=['delete'],
                                      create_parents=True)

      self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --create-collection --collection vertex_index --config-set atlas_configs --shards 1 --replication 1 --max-shards 1 --retry 5 --interval 10')
      self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --create-collection --collection edge_index --config-set atlas_configs --shards 1 --replication 1 --max-shards 1 --retry 5 --interval 10')
      self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --create-collection --collection fulltext_index --config-set atlas_configs --shards 1 --replication 1 --max-shards 1 --retry 5 --interval 10')

  def configureResourcesCalledSecure(self):
    # Both server and client
    self.assertResourceCalled('Directory', self.conf_dir,
                              owner='atlas',
                              group='hadoop',
                              create_parents = True,
                              cd_access='a',
                              mode=0755
    )

    # Pid dir
    self.assertResourceCalled('Directory', '/var/run/atlas',
                              owner='atlas',
                              group='hadoop',
                              create_parents = True,
                              cd_access='a',
                              mode=0755
    )
    self.assertResourceCalled('Directory', self.conf_dir + "/solr",
                              owner='atlas',
                              group='hadoop',
                              create_parents = True,
                              cd_access='a',
                              mode=0755,
                              recursive_ownership = True
    )
    # Log dir
    self.assertResourceCalled('Directory', '/var/log/atlas',
                              owner='atlas',
                              group='hadoop',
                              create_parents = True,
                              cd_access='a',
                              mode=0755
    )
    # Data dir
    self.assertResourceCalled('Directory', self.stack_root+'/current/atlas-server/data',
                              owner='atlas',
                              group='hadoop',
                              create_parents = True,
                              cd_access='a',
                              mode=0644
    )
    # Expanded war dir
    self.assertResourceCalled('Directory', self.stack_root+'/current/atlas-server/server/webapp',
                              owner='atlas',
                              group='hadoop',
                              create_parents = True,
                              cd_access='a',
                              mode=0644
    )
    self.assertResourceCalled('Execute', ('cp', self.stack_root+'/current/atlas-server/server/webapp/atlas.war', self.stack_root+'/current/atlas-server/server/webapp/atlas.war'),
                              sudo = True,
                              not_if = True,
    )
    host_name = u"c6401.ambari.apache.org"
    app_props =  dict(self.getConfig()['configurations']['application-properties'])
    app_props['atlas.server.bind.address'] = host_name

    metadata_protocol = "https" if app_props["atlas.enableTLS"] is True else "http"
    metadata_port = app_props["atlas.server.https.port"] if metadata_protocol == "https" else app_props["atlas.server.http.port"]
    app_props["atlas.rest.address"] = u'%s://%s:%s' % (metadata_protocol, host_name, metadata_port)
    app_props["atlas.server.ids"] = "id1"
    app_props["atlas.server.address.id1"] = u"%s:%s" % (host_name, metadata_port)
    app_props["atlas.server.ha.enabled"] = "false"

    self.assertResourceCalled('File', self.conf_dir + "/atlas-log4j.xml",
                              content=InlineTemplate(
                                self.getConfig()['configurations'][
                                  'atlas-log4j']['content']),
                              owner='atlas',
                              group='hadoop',
                              mode=0644,
                              )
    self.assertResourceCalled('File', self.conf_dir + "/atlas-env.sh",
                              content=InlineTemplate(
                                self.getConfig()['configurations'][
                                  'atlas-env']['content']),
                              owner='atlas',
                              group='hadoop',
                              mode=0755,
                              )
    self.assertResourceCalled('File', self.conf_dir+"/solr/solrconfig.xml",
                              content=InlineTemplate(
                                self.getConfig()['configurations'][
                                  'atlas-solrconfig']['content']),
                              owner='atlas',
                              group='hadoop',
                              mode=0644,
                              )
    # application.properties file
    self.assertResourceCalled('PropertiesFile',self.conf_dir + "/application.properties",
                              properties=app_props,
                              owner=u'atlas',
                              group=u'hadoop',
                              mode=0600,
                              )

    self.assertResourceCalled('TemplateConfig', self.conf_dir+"/atlas_jaas.conf",
                              owner = 'atlas',
                              )

    self.assertResourceCalled('Directory', '/var/log/ambari-infra-solr-client',
                              create_parents = True,
                              cd_access='a',
                              mode=0755
    )

    self.assertResourceCalled('Directory', '/usr/lib/ambari-infra-solr-client',
                              create_parents = True,
                              recursive_ownership = True,
                              cd_access='a',
                              mode=0755
    )
    self.assertResourceCalled('File', '/usr/lib/ambari-infra-solr-client/solrCloudCli.sh',
                              content=StaticFile('/usr/lib/ambari-infra-solr-client/solrCloudCli.sh'),
                              mode=0755,
                              )
    self.assertResourceCalled('File', '/usr/lib/ambari-infra-solr-client/log4j.properties',
                              content=self.getConfig()['configurations']['infra-solr-client-log4j']['content'],
                              mode=0644,
                              )
    self.assertResourceCalled('File', '/var/log/ambari-infra-solr-client/solr-client.log',
                              mode=0664,
                              content=''
    )
    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr --check-znode --retry 5 --interval 10')
    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --jaas-file /usr/hdp/current/atlas-server/conf/atlas_jaas.conf --download-config --config-dir /tmp/solr_config_atlas_configs_0.[0-9]* --config-set atlas_configs --retry 30 --interval 5')
    self.assertResourceCalledRegexp('^File$', '^/tmp/solr_config_atlas_configs_0.[0-9]*',
                                    content=InlineTemplate(self.getConfig()['configurations']['atlas-solrconfig']['content']),
                                    only_if='test -d /tmp/solr_config_atlas_configs_0.[0-9]*')
    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --jaas-file /usr/hdp/current/atlas-server/conf/atlas_jaas.conf --upload-config --config-dir /tmp/solr_config_atlas_configs_0.[0-9]* --config-set atlas_configs --retry 30 --interval 5',
                                    only_if='test -d /tmp/solr_config_atlas_configs_0.[0-9]*')
    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --jaas-file /usr/hdp/current/atlas-server/conf/atlas_jaas.conf --upload-config --config-dir {0}/solr --config-set atlas_configs --retry 30 --interval 5'.format(self.conf_dir),
                                    not_if='test -d /tmp/solr_config_atlas_configs_0.[0-9]*')
    self.assertResourceCalledRegexp('^Directory$', '^/tmp/solr_config_atlas_configs_0.[0-9]*',
                                    action=['delete'],
                                    create_parents=True)
    kinit_path_local = get_kinit_path()
    self.assertResourceCalled('Execute', kinit_path_local + " -kt /etc/security/keytabs/ambari-infra-solr.keytab infra-solr/[email protected]; curl -k -s --negotiate -u : http://c6401.ambari.apache.org:8886/solr/admin/authorization | grep authorization.enabled && "
                              + kinit_path_local +" -kt /etc/security/keytabs/ambari-infra-solr.keytab infra-solr/[email protected]; curl -H 'Content-type:application/json' -d '{\"set-user-role\": {\"[email protected]\": [\"atlas_user\", \"ranger_audit_user\", \"dev\"]}}' -s -o /dev/null -w'%{http_code}' --negotiate -u: -k http://c6401.ambari.apache.org:8886/solr/admin/authorization | grep 200",
                              logoutput = True, tries = 30, try_sleep = 10, user='******')

    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --jaas-file /usr/hdp/current/atlas-server/conf/atlas_jaas.conf --create-collection --collection vertex_index --config-set atlas_configs --shards 1 --replication 1 --max-shards 1 --retry 5 --interval 10')
    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --jaas-file /usr/hdp/current/atlas-server/conf/atlas_jaas.conf --create-collection --collection edge_index --config-set atlas_configs --shards 1 --replication 1 --max-shards 1 --retry 5 --interval 10')
    self.assertResourceCalledRegexp('^Execute$', '^ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181/infra-solr --jaas-file /usr/hdp/current/atlas-server/conf/atlas_jaas.conf --create-collection --collection fulltext_index --config-set atlas_configs --shards 1 --replication 1 --max-shards 1 --retry 5 --interval 10')

    self.assertResourceCalled('Execute', "ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr/configs/atlas_configs --jaas-file /usr/hdp/current/atlas-server/conf/atlas_jaas.conf --secure-znode --sasl-users atlas,infra-solr --retry 5 --interval 10")
    self.assertResourceCalled('Execute', "ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr/collections/vertex_index --jaas-file /usr/hdp/current/atlas-server/conf/atlas_jaas.conf --secure-znode --sasl-users atlas,infra-solr --retry 5 --interval 10")
    self.assertResourceCalled('Execute', "ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr/collections/edge_index --jaas-file /usr/hdp/current/atlas-server/conf/atlas_jaas.conf --secure-znode --sasl-users atlas,infra-solr --retry 5 --interval 10")
    self.assertResourceCalled('Execute', "ambari-sudo.sh JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/lib/ambari-infra-solr-client/solrCloudCli.sh --zookeeper-connect-string c6401.ambari.apache.org:2181 --znode /infra-solr/collections/fulltext_index --jaas-file /usr/hdp/current/atlas-server/conf/atlas_jaas.conf --secure-znode --sasl-users atlas,infra-solr --retry 5 --interval 10")

  def test_configure_default(self):
    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
                       classname = "MetadataServer",
                       command = "configure",
                       config_file="default.json",
                       stack_version = self.STACK_VERSION,
                       target = RMFTestCase.TARGET_COMMON_SERVICES
    )

    self.configureResourcesCalled()

    self.assertResourceCalled('File', '/tmp/atlas_hbase_setup.rb',
                              owner = "hbase",
                              group = "hadoop",
                              content=Template("atlas_hbase_setup.rb.j2"))

    self.assertResourceCalled('File', str(self.conf_dir+"/hdfs-site.xml"),action = ['delete'],)
    self.assertResourceCalled('Directory',self.stack_root + '/current/atlas-server/', owner = 'atlas', group = 'hadoop', recursive_ownership = True, )
    self.assertNoMoreResources()

  def test_configure_secure(self):
    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
                       classname = "MetadataServer",
                       command = "configure",
                       config_file="secure.json",
                       stack_version = self.STACK_VERSION,
                       target = RMFTestCase.TARGET_COMMON_SERVICES
                       )

    self.configureResourcesCalledSecure()

    self.assertResourceCalled('File', '/tmp/atlas_hbase_setup.rb',
                              owner = "hbase",
                              group = "hadoop",
                              content=Template("atlas_hbase_setup.rb.j2"))

    self.assertResourceCalled('File', str(self.conf_dir+"/hdfs-site.xml"),action = ['delete'],)

    self.assertResourceCalled('Directory',self.stack_root + '/current/atlas-server/', owner = 'atlas', group = 'hadoop', recursive_ownership = True, )
    self.assertNoMoreResources()

  def test_start_default(self):
    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
                       classname = "MetadataServer",
                       command = "start",
                       config_file="default.json",
                       stack_version = self.STACK_VERSION,
                       target = RMFTestCase.TARGET_COMMON_SERVICES
    )
    self.configureResourcesCalled()

    self.assertResourceCalled('File', '/tmp/atlas_hbase_setup.rb',
                              owner = "hbase",
                              group = "hadoop",
                              content=Template("atlas_hbase_setup.rb.j2"))

    self.assertResourceCalled('File', str(self.conf_dir+"/hdfs-site.xml"),action = ['delete'],)
    self.assertResourceCalled('Directory',self.stack_root + '/current/atlas-server/', owner = 'atlas', group = 'hadoop', recursive_ownership = True, )

    self.assertResourceCalled('Execute', 'source {0}/atlas-env.sh ; {1}/current/atlas-server/bin/atlas_start.py'.format(self.conf_dir,self.stack_root),
                              not_if = 'ls /var/run/atlas/atlas.pid >/dev/null 2>&1 && ps -p `cat /var/run/atlas/atlas.pid` >/dev/null 2>&1',
                              user = '******',
    )

  @patch('os.path.isdir')
  def test_stop_default(self, is_dir_mock):
    is_dir_mock.return_value = True

    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/metadata_server.py",
                       classname = "MetadataServer",
                       command = "stop",
                       config_file="default.json",
                       stack_version = self.STACK_VERSION,
                       target = RMFTestCase.TARGET_COMMON_SERVICES
    )
    self.assertResourceCalled('Execute', 'source {0}/atlas-env.sh; {1}/current/atlas-server/bin/atlas_stop.py'.format(self.conf_dir,self.stack_root),
                              user = '******',
    )
    self.assertResourceCalled('File', '/var/run/atlas/atlas.pid',
        action = ['delete'],
    )
コード例 #40
0
def get_tarball_paths(name,
                      use_upgrading_version_during_upgrade=True,
                      custom_source_file=None,
                      custom_dest_file=None):
    """
  For a given tarball name, get the source and destination paths to use.
  :param name: Tarball name
  :param use_upgrading_version_during_upgrade:
  :param custom_source_file: If specified, use this source path instead of the default one from the map.
  :param custom_dest_file: If specified, use this destination path instead of the default one from the map.
  :return: A tuple of (success status, source path, destination path, optional preparation function which is invoked to setup the tarball)
  """
    stack_name = Script.get_stack_name()

    if not stack_name:
        Logger.error(
            "Cannot copy {0} tarball to HDFS because stack name could not be determined."
            .format(str(name)))
        return False, None, None

    if name is None or name.lower() not in TARBALL_MAP:
        Logger.error(
            "Cannot copy tarball to HDFS because {0} is not supported in stack {1} for this operation."
            .format(str(name), str(stack_name)))
        return False, None, None

    service = TARBALL_MAP[name.lower()]['service']

    stack_version = get_current_version(service=service,
                                        use_upgrading_version_during_upgrade=
                                        use_upgrading_version_during_upgrade)
    if not stack_version:
        Logger.error(
            "Cannot copy {0} tarball to HDFS because stack version could be be determined."
            .format(str(name)))
        return False, None, None

    stack_root = Script.get_stack_root()
    if not stack_root:
        Logger.error(
            "Cannot copy {0} tarball to HDFS because stack root could be be determined."
            .format(str(name)))
        return False, None, None

    (source_file, dest_file) = TARBALL_MAP[name.lower()]['dirs']

    if custom_source_file is not None:
        source_file = custom_source_file

    if custom_dest_file is not None:
        dest_file = custom_dest_file

    source_file = source_file.replace(STACK_NAME_PATTERN, stack_name.lower())
    dest_file = dest_file.replace(STACK_NAME_PATTERN, stack_name.lower())

    source_file = source_file.replace(STACK_ROOT_PATTERN, stack_root.lower())
    dest_file = dest_file.replace(STACK_ROOT_PATTERN, stack_root.lower())

    source_file = source_file.replace(STACK_VERSION_PATTERN, stack_version)
    dest_file = dest_file.replace(STACK_VERSION_PATTERN, stack_version)

    prepare_function = None
    if "prepare_function" in TARBALL_MAP[name.lower()]:
        prepare_function = TARBALL_MAP[name.lower()]['prepare_function']

    return True, source_file, dest_file, prepare_function
コード例 #41
0
ファイル: params.py プロジェクト: fkpp/ambari
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import Direction

# a map of the Ambari role to the component name
# for use with <stack-root>/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
    'SPARK_JOBHISTORYSERVER': 'spark-historyserver',
    'SPARK_CLIENT': 'spark-client',
    'SPARK_THRIFTSERVER': 'spark-thriftserver',
    'LIVY_SERVER': 'livy-server',
    'LIVY_CLIENT': 'livy-client'
}

component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP,
                                                     "SPARK_CLIENT")

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

upgrade_direction = default("/commandParams/upgrade_direction", None)

stack_name = status_params.stack_name
stack_root = Script.get_stack_root()
stack_version_unformatted = config['hostLevelParams']['stack_version']
if upgrade_direction == Direction.DOWNGRADE:
    stack_version_unformatted = config['commandParams']['original_stack']
stack_version_formatted = format_stack_version(stack_version_unformatted)
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)

# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
コード例 #42
0
#!/usr/bin/env python
from resource_management import *
from resource_management.libraries.script.script import Script
import os, socket

script_dir = os.path.dirname(os.path.realpath(__file__))
files_dir = os.path.join(os.path.dirname(script_dir), 'files')

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_root = Script.get_stack_root()
stack_name = default("/hostLevelParams/stack_name", None)
stack_version_buildnum = default("/commandParams/version", None)

impala_user = config['configurations']['impala-env']['impala_user']
impala_group = config['configurations']['impala-env']['impala_group']
impala_log_dir = config['configurations']['impala-env']['impala_log_dir']
impala_scratch_dir = config['configurations']['impala-env']['impala_scratch_dir']
mem_limit = config['configurations']['impala-env']['mem_limit']
impala_log_file = os.path.join(impala_log_dir,'impala-setup.log')
impala_catalog_host = config['clusterHostInfo']['impala_catalog_service_hosts'][0]
impala_state_store_host = config['clusterHostInfo']['impala_state_store_hosts'][0]

current_host_name = socket.getfqdn()

security_enabled = config['configurations']['cluster-env']['security_enabled']

hdfs_host = default("/clusterHostInfo/namenode_hosts", [''])[0]
if not hdfs_host:
    hdfs_host = default("/clusterHostInfo/namenode_host", [''])[0]
コード例 #43
0
ファイル: params_linux.py プロジェクト: maduhu/HDP-ambari
from ambari_commons.constants import AMBARI_SUDO_BINARY
from ambari_commons.os_check import OSCheck

from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.copy_tarball import STACK_VERSION_PATTERN
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.get_port_from_url import get_port_from_url
from resource_management.libraries import functions

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

stack_name = default("/hostLevelParams/stack_name", None)

# node hostname
hostname = config["hostname"]

# This is expected to be of the form #.#.#.#
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version_major = format_hdp_stack_version(stack_version_unformatted)
stack_is_hdp21 = Script.is_hdp_stack_less_than("2.2")

# this is not available on INSTALL action because hdp-select is not available
hdp_stack_version = functions.get_hdp_version('hive-server2')
コード例 #44
0
ファイル: install_packages.py プロジェクト: krigu543/ambari
  def actionexecute(self, env):
    num_errors = 0

    # Parse parameters
    config = Script.get_config()

    repo_rhel_suse = config['configurations']['cluster-env']['repo_suse_rhel_template']
    repo_ubuntu = config['configurations']['cluster-env']['repo_ubuntu_template']
    template = repo_rhel_suse if OSCheck.is_redhat_family() or OSCheck.is_suse_family() else repo_ubuntu

    # Handle a SIGTERM and SIGINT gracefully
    signal.signal(signal.SIGTERM, self.abort_handler)
    signal.signal(signal.SIGINT, self.abort_handler)

    self.repository_version_id = None

    base_urls = []
    # Select dict that contains parameters
    try:
      if 'base_urls' in config['roleParams']:
        base_urls = json.loads(config['roleParams']['base_urls'])

      self.repository_version = config['roleParams']['repository_version']
      package_list = json.loads(config['roleParams']['package_list'])
      stack_id = config['roleParams']['stack_id']

      if 'repository_version_id' in config['roleParams']:
        self.repository_version_id = config['roleParams']['repository_version_id']
    except KeyError:
      pass

    # current stack information
    self.current_stack_version_formatted = None
    if 'stack_version' in config['hostLevelParams']:
      current_stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
      self.current_stack_version_formatted = format_stack_version(current_stack_version_unformatted)


    self.stack_name = Script.get_stack_name()
    if self.stack_name is None:
      raise Fail("Cannot determine the stack name")

    self.stack_root_folder = Script.get_stack_root()
    if self.stack_root_folder is None:
      raise Fail("Cannot determine the stack's root directory")

    if self.repository_version is None:
      raise Fail("Cannot determine the repository version to install")

    self.repository_version = self.repository_version.strip()

    # Install/update repositories
    self.current_repositories = []
    self.current_repo_files = set()

    # Enable base system repositories
    # We don't need that for RHEL family, because we leave all repos enabled
    # except disabled HDP* ones
    if OSCheck.is_suse_family():
      self.current_repositories.append('base')
    elif OSCheck.is_ubuntu_family():
      self.current_repo_files.add('base')

    Logger.info("Will install packages for repository version {0}".format(self.repository_version))

    if 0 == len(base_urls):
      Logger.warning("Repository list is empty. Ambari may not be managing the repositories for {0}.".format(self.repository_version))

    try:
      if 'repositoryFile' in config:
        create_repo_files(template, CommandRepository(config['repositoryFile']))
      else:
        append_to_file = False
        for url_info in base_urls:
          repo_name, repo_file = self.install_repository(url_info, append_to_file, template)
          self.current_repositories.append(repo_name)
          self.current_repo_files.add(repo_file)
          append_to_file = True

    except Exception, err:
      Logger.logger.exception("Cannot install repository files. Error: {0}".format(str(err)))
      num_errors += 1
コード例 #45
0
def _prepare_tez_tarball():
    """
  Prepares the Tez tarball by adding the Hadoop native libraries found in the mapreduce tarball.
  It's very important to use the version of mapreduce which matches tez here.
  Additionally, this will also copy native LZO to the tez tarball if LZO is enabled and the
  GPL license has been accepted.
  :return:  the full path of the newly created tez tarball to use
  """
    import tempfile

    Logger.info("Preparing the Tez tarball...")

    # get the mapreduce tarball which matches the version of tez
    # tez installs the mapreduce tar, so it should always be present
    _, mapreduce_source_file, _, _ = get_tarball_paths("mapreduce")
    _, tez_source_file, _, _ = get_tarball_paths("tez")

    temp_dir = Script.get_tmp_dir()

    # create the temp staging directories ensuring that non-root agents using tarfile can work with them
    mapreduce_temp_dir = tempfile.mkdtemp(prefix="mapreduce-tarball-",
                                          dir=temp_dir)
    tez_temp_dir = tempfile.mkdtemp(prefix="tez-tarball-", dir=temp_dir)
    sudo.chmod(mapreduce_temp_dir, 0777)
    sudo.chmod(tez_temp_dir, 0777)

    Logger.info("Extracting {0} to {1}".format(mapreduce_source_file,
                                               mapreduce_temp_dir))
    tar_archive.untar_archive(mapreduce_source_file, mapreduce_temp_dir)

    Logger.info("Extracting {0} to {1}".format(tez_source_file, tez_temp_dir))
    tar_archive.untar_archive(tez_source_file, tez_temp_dir)

    hadoop_lib_native_dir = os.path.join(mapreduce_temp_dir, "hadoop", "lib",
                                         "native")
    tez_lib_dir = os.path.join(tez_temp_dir, "lib")

    if not os.path.exists(hadoop_lib_native_dir):
        raise Fail(
            "Unable to seed the Tez tarball with native libraries since the source Hadoop native lib directory {0} does not exist"
            .format(hadoop_lib_native_dir))

    if not os.path.exists(tez_lib_dir):
        raise Fail(
            "Unable to seed the Tez tarball with native libraries since the target Tez lib directory {0} does not exist"
            .format(tez_lib_dir))

    # copy native libraries from hadoop to tez
    Execute(("cp", "-a", hadoop_lib_native_dir, tez_lib_dir), sudo=True)

    # if enabled, LZO GPL libraries must be copied as well
    if lzo_utils.should_install_lzo():
        stack_root = Script.get_stack_root()
        service_version = component_version.get_component_repository_version(
            service_name="TEZ")

        # some installations might not have Tez, but MapReduce2 should be a fallback to get the LZO libraries from
        if service_version is None:
            Logger.warning(
                "Tez does not appear to be installed, using the MapReduce version to get the LZO libraries"
            )
            service_version = component_version.get_component_repository_version(
                service_name="MAPREDUCE2")

        hadoop_lib_native_lzo_dir = os.path.join(stack_root, service_version,
                                                 "hadoop", "lib", "native")

        if not sudo.path_isdir(hadoop_lib_native_lzo_dir):
            Logger.warning(
                "Unable to located native LZO libraries at {0}, falling back to hadoop home"
                .format(hadoop_lib_native_lzo_dir))
            hadoop_lib_native_lzo_dir = os.path.join(stack_root, "current",
                                                     "hadoop-client", "lib",
                                                     "native")

        if not sudo.path_isdir(hadoop_lib_native_lzo_dir):
            raise Fail(
                "Unable to seed the Tez tarball with native libraries since LZO is enabled but the native LZO libraries could not be found at {0}"
                .format(hadoop_lib_native_lzo_dir))

        Execute(("cp", "-a", hadoop_lib_native_lzo_dir, tez_lib_dir),
                sudo=True)

    # ensure that the tez/lib directory is readable by non-root (which it typically is not)
    Directory(tez_lib_dir, mode=0755, cd_access='a', recursive_ownership=True)

    # create the staging directory so that non-root agents can write to it
    tez_native_tarball_staging_dir = os.path.join(
        temp_dir, "tez-native-tarball-staging")
    if not os.path.exists(tez_native_tarball_staging_dir):
        Directory(tez_native_tarball_staging_dir,
                  mode=0777,
                  cd_access='a',
                  create_parents=True,
                  recursive_ownership=True)

    tez_tarball_with_native_lib = os.path.join(tez_native_tarball_staging_dir,
                                               "tez-native.tar.gz")
    Logger.info("Creating a new Tez tarball at {0}".format(
        tez_tarball_with_native_lib))
    tar_archive.archive_dir_via_temp_file(tez_tarball_with_native_lib,
                                          tez_temp_dir)

    # ensure that the tarball can be read and uploaded
    sudo.chmod(tez_tarball_with_native_lib, 0744)

    # cleanup
    sudo.rmtree(mapreduce_temp_dir)
    sudo.rmtree(tez_temp_dir)

    return tez_tarball_with_native_lib
コード例 #46
0
# a map of the Ambari role to the component name
# for use with /usr/hdp/current/<component>
MAPR_SERVER_ROLE_DIRECTORY_MAP = {
    'HISTORYSERVER': 'hadoop-mapreduce-historyserver',
    'MAPREDUCE2_CLIENT': 'hadoop-mapreduce-client',
}

YARN_SERVER_ROLE_DIRECTORY_MAP = {
    'APP_TIMELINE_SERVER': 'hadoop-yarn-timelineserver',
    'NODEMANAGER': 'hadoop-yarn-nodemanager',
    'RESOURCEMANAGER': 'hadoop-yarn-resourcemanager',
    'YARN_CLIENT': 'hadoop-yarn-client'
}

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

stack_name = default("/hostLevelParams/stack_name", None)

# This is expected to be of the form #.#.#.#
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version_major = format_hdp_stack_version(stack_version_unformatted)
hdp_stack_version = functions.get_hdp_version('hadoop-yarn-resourcemanager')

# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade.
# It cannot be used during the initial Cluser Install because the version is not yet known.
version = default("/commandParams/version", None)

hostname = config['hostname']
コード例 #47
0
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import hdp_select
from resource_management.libraries.functions import format
from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_klist_path
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource

from resource_management.libraries.functions.format_jvm_option import format_jvm_option
from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.functions.is_empty import is_empty


config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

stack_name = default("/hostLevelParams/stack_name", None)
upgrade_direction = default("/commandParams/upgrade_direction", None)
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
version = default("/commandParams/version", None)

# The desired role is only available during a Non-Rolling Upgrade in HA.
# The server calculates which of the two NameNodes will be the active, and the other the standby since they
# are started using different commands.
desired_namenode_role = default("/commandParams/desired_namenode_role", None)
コード例 #48
0
ファイル: params_linux.py プロジェクト: csivaguru/ambari
from resource_management.libraries.functions import format
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.functions import get_klist_path
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource

from resource_management.libraries.functions.format_jvm_option import format_jvm_option
from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.functions.expect import expect

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

stack_name = status_params.stack_name
stack_root = Script.get_stack_root()
upgrade_direction = default("/commandParams/upgrade_direction", None)
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
agent_stack_retry_on_unavailability = config['hostLevelParams'][
    'agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count",
                                 int)
stack_supports_ranger_kerberos = stack_version_formatted and check_stack_feature(
    StackFeature.RANGER_KERBEROS_SUPPORT, stack_version_formatted)

# there is a stack upgrade which has not yet been finalized; it's currently suspended
コード例 #49
0
from ambari_commons.constants import AMBARI_SUDO_BINARY
from ambari_commons.os_check import OSCheck

from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.copy_tarball import STACK_VERSION_PATTERN
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.get_port_from_url import get_port_from_url
from resource_management.libraries import functions

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

stack_name = default("/hostLevelParams/stack_name", None)

# node hostname
hostname = config["hostname"]

# This is expected to be of the form #.#.#.#
stack_version_unformatted = str(config["hostLevelParams"]["stack_version"])
hdp_stack_version_major = format_hdp_stack_version(stack_version_unformatted)
stack_is_hdp21 = Script.is_hdp_stack_greater_or_equal("2.0") and Script.is_hdp_stack_less_than("2.2")

# this is not available on INSTALL action because hdp-select is not available
hdp_stack_version = functions.get_hdp_version("hive-server2")
コード例 #50
0
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

"""

# Ambari Commons & Resource Management Imports
from resource_management.libraries.functions import format
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.default import default
from resource_management.libraries.script.script import Script

# Either HIVE_METASTORE, HIVE_SERVER, HIVE_CLIENT
role = default("/role", None)

config = Script.get_config()

stack_root = Script.get_stack_root()

hive_pid_dir = config['configurations']['hive-env']['hive_pid_dir']
hive_pid = format("{hive_pid_dir}/hive-server.pid")
hive_interactive_pid = format("{hive_pid_dir}/hive-interactive.pid")
hive_metastore_pid = format("{hive_pid_dir}/hive.pid")

process_name = 'mysqld'

SERVICE_FILE_TEMPLATES = [
    '/etc/init.d/{0}', '/usr/lib/systemd/system/{0}.service'
]
POSSIBLE_DAEMON_NAMES = ['mysql', 'mysqld', 'mariadb']
コード例 #51
0
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
from resource_management.libraries.functions import format
from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.get_port_from_url import get_port_from_url
from resource_management.libraries.functions.get_hdp_version import get_hdp_version
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script
from status_params import *
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions import hdp_select
from resource_management.libraries.functions import conf_select

# server configurations
config = Script.get_config()

tmp_dir = Script.get_tmp_dir()
stack_name = default("/hostLevelParams/stack_name", None)
upgrade_direction = default("/commandParams/upgrade_direction", None)
version = default("/commandParams/version", None)
if version is None:
  version = get_hdp_version('knox-server')
# E.g., 2.3.2.0
version_formatted = format_hdp_stack_version(version)

# E.g., 2.3
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

# This is the version whose state is CURRENT. During an RU, this is the source version.
コード例 #52
0
    def actionexecute(self, env):
        num_errors = 0

        # Parse parameters
        config = Script.get_config()

        try:
            command_repository = CommandRepository(config['repositoryFile'])
        except KeyError:
            raise Fail(
                "The command repository indicated by 'repositoryFile' was not found"
            )

        repo_rhel_suse = config['configurations']['cluster-env'][
            'repo_suse_rhel_template']
        repo_ubuntu = config['configurations']['cluster-env'][
            'repo_ubuntu_template']
        template = repo_rhel_suse if OSCheck.is_redhat_family(
        ) or OSCheck.is_suse_family() else repo_ubuntu

        # Handle a SIGTERM and SIGINT gracefully
        signal.signal(signal.SIGTERM, self.abort_handler)
        signal.signal(signal.SIGINT, self.abort_handler)

        self.repository_version = command_repository.version_string

        # Select dict that contains parameters
        try:
            package_list = json.loads(config['roleParams']['package_list'])
            stack_id = config['roleParams']['stack_id']
        except KeyError:
            pass

        self.stack_name = Script.get_stack_name()
        if self.stack_name is None:
            raise Fail("Cannot determine the stack name")

        self.stack_root_folder = Script.get_stack_root()
        if self.stack_root_folder is None:
            raise Fail("Cannot determine the stack's root directory")

        if self.repository_version is None:
            raise Fail("Cannot determine the repository version to install")

        self.repository_version = self.repository_version.strip()

        try:
            if not command_repository.items:
                Logger.warning(
                    "Repository list is empty. Ambari may not be managing the repositories for {0}."
                    .format(self.repository_version))
            else:
                Logger.info(
                    "Will install packages for repository version {0}".format(
                        self.repository_version))
                new_repo_files = create_repo_files(template,
                                                   command_repository)
                self.repo_files.update(new_repo_files)
        except Exception, err:
            Logger.logger.exception(
                "Cannot install repository files. Error: {0}".format(str(err)))
            num_errors += 1
コード例 #53
0
def hive(name=None):
    import params

    XmlConfig(
        "hive-site.xml",
        conf_dir=params.hive_conf_dir,
        configurations=params.config['configurations']['hive-site'],
        owner=params.hive_user,
        configuration_attributes=params.config['configuration_attributes']
        ['hive-site'])

    if name in ["hiveserver2", "metastore"]:
        # Manually overriding service logon user & password set by the installation package
        service_name = params.service_map[name]
        ServiceConfig(service_name,
                      action="change_user",
                      username=params.hive_user,
                      password=Script.get_password(params.hive_user))
        Execute(format("cmd /c hadoop fs -mkdir -p {hive_warehouse_dir}"),
                logoutput=True,
                user=params.hadoop_user)

    if name == 'metastore':
        if params.init_metastore_schema:
            check_schema_created_cmd = format(
                'cmd /c "{hive_bin}\\hive.cmd --service schematool -info '
                '-dbType {hive_metastore_db_type} '
                '-userName {hive_metastore_user_name} '
                '-passWord {hive_metastore_user_passwd!p}'
                '&set EXITCODE=%ERRORLEVEL%&exit /B %EXITCODE%"',  #cmd "feature", propagate the process exit code manually
                hive_bin=params.hive_bin,
                hive_metastore_db_type=params.hive_metastore_db_type,
                hive_metastore_user_name=params.hive_metastore_user_name,
                hive_metastore_user_passwd=params.hive_metastore_user_passwd)
            try:
                Execute(check_schema_created_cmd)
            except Fail:
                create_schema_cmd = format(
                    'cmd /c {hive_bin}\\hive.cmd --service schematool -initSchema '
                    '-dbType {hive_metastore_db_type} '
                    '-userName {hive_metastore_user_name} '
                    '-passWord {hive_metastore_user_passwd!p}',
                    hive_bin=params.hive_bin,
                    hive_metastore_db_type=params.hive_metastore_db_type,
                    hive_metastore_user_name=params.hive_metastore_user_name,
                    hive_metastore_user_passwd=params.
                    hive_metastore_user_passwd)
                Execute(create_schema_cmd,
                        user=params.hive_user,
                        logoutput=True)

    if name == "hiveserver2":
        if params.hive_execution_engine == "tez":
            # Init the tez app dir in hadoop
            script_file = __file__.replace('/', os.sep)
            cmd_file = os.path.normpath(
                os.path.join(os.path.dirname(script_file), "..", "files",
                             "hiveTezSetup.cmd"))

            Execute("cmd /c " + cmd_file,
                    logoutput=True,
                    user=params.hadoop_user)
コード例 #54
0
ファイル: params_linux.py プロジェクト: garyelephant/ambari
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import format
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import is_empty
from resource_management.libraries.functions import get_unique_id_and_date
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.expect import expect
from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames

# server configurations
config = Script.get_config()
exec_tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

stack_name = status_params.stack_name
agent_stack_retry_on_unavailability = config['hostLevelParams'][
    'agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count",
                                 int)
version = default("/commandParams/version", None)
component_directory = status_params.component_directory
etc_prefix_dir = "/etc/hbase"

stack_version_unformatted = status_params.stack_version_unformatted
stack_version_formatted = status_params.stack_version_formatted
stack_root = status_params.stack_root
コード例 #55
0
from resource_management.libraries.functions import hdp_select
from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import get_port_from_url
from resource_management.libraries.script.script import Script

from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages

from urlparse import urlparse

import status_params
import os

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

hostname = config["hostname"]

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)
stack_name = default("/hostLevelParams/stack_name", None)
upgrade_direction = default("/commandParams/upgrade_direction", None)

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
コード例 #56
0
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions import upgrade_summary
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.stack_features import get_stack_feature_version
from resource_management.libraries.functions.get_port_from_url import get_port_from_url
from resource_management.libraries.functions.expect import expect
from resource_management.libraries import functions
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster

# Default log4j version; put config files under /etc/hive/conf
log4j_version = '1'

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
sudo = AMBARI_SUDO_BINARY

stack_root = status_params.stack_root
stack_name = status_params.stack_name
stack_name_uppercase = stack_name.upper()
agent_stack_retry_on_unavailability = config['hostLevelParams'][
    'agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count",
                                 int)

# Needed since this is an Atlas Hook service.
cluster_name = config['clusterName']

# node hostname
コード例 #57
0
from resource_management.libraries.functions import format
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script

# a map of the Ambari role to the component name
# for use with /usr/hdp/current/<component>
SERVER_ROLE_DIRECTORY_MAP = {
  'HIVE_METASTORE' : 'hive-metastore',
  'HIVE_SERVER' : 'hive-server2',
  'WEBHCAT_SERVER' : 'hive-webhcat',
  'HIVE_CLIENT' : 'hive-client',
  'HCAT' : 'hive-client'
}

component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "HIVE_CLIENT")

config = Script.get_config()

if OSCheck.is_windows_family():
  hive_metastore_win_service_name = "metastore"
  hive_client_win_service_name = "hwi"
  hive_server_win_service_name = "hiveserver2"
  webhcat_server_win_service_name = "templeton"
else:
  hive_pid_dir = config['configurations']['hive-env']['hive_pid_dir']
  hive_pid = 'hive-server.pid'

  hive_metastore_pid = 'hive.pid'

  hcat_pid_dir = config['configurations']['hive-env']['hcat_pid_dir'] #hcat_pid_dir
コード例 #58
0
import os
import tempfile

from resource_management.core import sudo
from resource_management.core.logger import Logger
from resource_management.core.exceptions import Fail
from resource_management.libraries.functions import tar_archive
from resource_management.libraries.functions import format
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.script.script import Script


BACKUP_TEMP_DIR = "knox-upgrade-backup"
BACKUP_DATA_ARCHIVE = "knox-data-backup.tar"
STACK_ROOT_DEFAULT = Script.get_stack_root()


def backup_data():
    """
    Backs up the knox data as part of the upgrade process.
    :return: Returns the path to the absolute backup directory.
    """
    Logger.info('Backing up Knox data directory before upgrade...')
    directoryMappings = _get_directory_mappings_during_upgrade()

    Logger.info("Directory mappings to backup: {0}".format(str(directoryMappings)))

    absolute_backup_dir = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR)
    if not os.path.isdir(absolute_backup_dir):
        os.makedirs(absolute_backup_dir)
コード例 #59
0
See the License for the specific language governing permissions and
limitations under the License.

Ambari Agent

"""
import status_params

from resource_management.libraries.functions import format
from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script

# server configurations
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

stack_name = default("/hostLevelParams/stack_name", None)
current_version = default("/hostLevelParams/current_version", None)
component_directory = status_params.component_directory

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)

# default parameters
zk_home = "/usr/hdp/2.3.2.0-2950/zookeeper"
zk_bin = "/usr/hdp/2.3.2.0-2950/zookeeper/bin"
コード例 #60
0
ファイル: params.py プロジェクト: willwill1101/ambari
"""
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import hdp_select
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions import format
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.get_bare_principal import get_bare_principal
from resource_management.libraries.script.script import Script

import status_params

# server configurations
config = Script.get_config()
exec_tmp_dir = status_params.tmp_dir

# security enabled
security_enabled = status_params.security_enabled

# hdp version
stack_name = default("/hostLevelParams/stack_name", None)
version = default("/commandParams/version", None)
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

has_secure_user_auth = False
if Script.is_hdp_stack_greater_or_equal("2.3"):
    has_secure_user_auth = True