def server_files():
  import params

  rrd_py_path = params.rrd_py_path
  Directory(rrd_py_path,
            recursive=True
  )
  rrd_py_file_path = path.join(rrd_py_path, "rrd.py")
  TemplateConfig(rrd_py_file_path,
                 owner="root",
                 group="root",
                 mode=0755
  )
  rrd_file_owner = params.gmetad_user

  Directory(params.rrdcached_base_dir,
            owner=rrd_file_owner,
            group=rrd_file_owner,
            mode=0755,
            recursive=True
  )
  
  if OSCheck.is_suse_family() or OSCheck.is_ubuntu_family():
    File( params.ganglia_apache_config_file,
      content = Template("ganglia.conf.j2"),
      mode = 0644
    )
Exemplo n.º 2
0
  def actionexecute(self, env):
    config = Script.get_config()
    structured_output = {}


    try:
      repo_info_json = config['hostLevelParams']['repo_info']
      repo_info_dict = json.loads(repo_info_json)

      for item in repo_info_dict["repositories"]:
        base_url = item["base_url"]
        repo_name = item["repo_name"]
        repo_id = item["repo_id"]

        repo_rhel_suse = config['configurations']['cluster-env']['repo_suse_rhel_template']
        repo_ubuntu = config['configurations']['cluster-env']['repo_ubuntu_template']

        template = repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else repo_ubuntu
        ubuntu_components = [repo_name] + self.UBUNTU_REPO_COMPONENTS_POSTFIX

        Repository(repo_id,
                 action = "create",
                 base_url = base_url,
                 mirror_list = None,
                 repo_file_name = repo_name,
                 repo_template = template,
                 components = ubuntu_components, # ubuntu specific
        )
        structured_output["repo_update"] = {"exit_code" : 0, "message": format("Repository files successfully updated!")}
    except Exception, exception:
      Logger.logger.exception("ERROR: There was an unexpected error while updating repositories")
      raise Fail("Failed to update repo files!")
Exemplo n.º 3
0
  def install_packages(self, env, exclude_packages=[]):
    """
    List of packages that are required< by service is received from the server
    as a command parameter. The method installs all packages
    from this list
    """
    config = self.get_config()
    try:
      package_list_str = config['hostLevelParams']['package_list']
      if isinstance(package_list_str, basestring) and len(package_list_str) > 0:
        package_list = json.loads(package_list_str)
        for package in package_list:
          if not package['name'] in exclude_packages:
            name = package['name']
            if OSCheck.is_windows_family():
              if name[-4:] == ".msi":
                #TODO all msis must be located in resource folder of server, change it to repo later
                Msi(name, http_source=os.path.join(config['hostLevelParams']['jdk_location']))
            else:
              Package(name)
    except KeyError:
      pass  # No reason to worry

    if OSCheck.is_windows_family():
      #TODO hacky install of windows msi, remove it or move to old(2.1) stack definition when component based install will be implemented
      install_windows_msi(os.path.join(config['hostLevelParams']['jdk_location'], "hdp.msi"),
                          config["hostLevelParams"]["agentCacheDir"], "hdp.msi", self.get_password("hadoop"),
                          str(config['hostLevelParams']['stack_version']))
      reload_windows_env()
    pass
def list_ambari_managed_repos(stack_name):
  """
  Lists all repositories that are present at host
  """
  stack_name = stack_name.upper()
  # TODO : get it dynamically from the server
  repository_names = [stack_name, stack_name + "-UTILS" ]
  if OSCheck.is_ubuntu_family():
    repo_dir = '/etc/apt/sources.list.d/'
  elif OSCheck.is_redhat_family():  # Centos/RHEL 5/6
    repo_dir = '/etc/yum.repos.d/'
  elif OSCheck.is_suse_family():
    repo_dir = '/etc/zypp/repos.d/'
  else:
    raise Fail('Can not dermine repo dir')
  repos = []
  for name in repository_names:
    # List all files that match pattern
    files = glob.glob(os.path.join(repo_dir, name) + '*')
    for f in files:
      filename = os.path.basename(f)
      # leave out extension
      reponame = os.path.splitext(filename)[0]
      repos.append(reponame)
  # get uniq strings
  seen = set()
  uniq = [s for s in repos if not (s in seen or seen.add(s))]
  return uniq
Exemplo n.º 5
0
  def install_repository(self, url_info, repository_version, append_to_file):
    template = "repo_suse_rhel.j2" if OSCheck.is_redhat_family() or OSCheck.is_suse_family() else "repo_ubuntu.j2"

    repo = {
      'repoName': "{0}-{1}".format(url_info['name'], repository_version)
    }

    if not 'baseUrl' in url_info:
      repo['baseurl'] = None
    else:
      repo['baseurl'] = url_info['baseUrl']

    if not 'mirrorsList' in url_info:
      repo['mirrorsList'] = None
    else:
      repo['mirrorsList'] = url_info['mirrorsList']

    ubuntu_components = [url_info['name']] + self.UBUNTU_REPO_COMPONENTS_POSTFIX
    file_name = self.REPO_FILE_NAME_PREFIX + repository_version

    Repository(repo['repoName'],
      action = "create",
      base_url = repo['baseurl'],
      mirror_list = repo['mirrorsList'],
      repo_file_name = file_name,
      repo_template = template,
      append_to_file = append_to_file,
      components = ubuntu_components,  # ubuntu specific
    )
    return repo['repoName'], file_name
 def get_serivice_params(self):
   self.system = System.get_instance()
   if OSCheck.is_suse_family() or OSCheck.is_ubuntu_family():
     self.service_name = "apache2"
     self.httpd_conf_dir = '/etc/apache2'
   else:
     self.service_name = "httpd"
     self.httpd_conf_dir = '/etc/httpd/conf'
def install_repos():
  import params
  if params.host_sys_prepped:
    return

  template = params.repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else params.repo_ubuntu
  _alter_repo("create", params.repo_info, template)
  if params.service_repo_info:
    _alter_repo("create", params.service_repo_info, template)
 def stop(self, env):
   if OSCheck.is_suse_family():
     Execute('rckadmind stop')
     Execute('rckrb5kdc stop')
   elif OSCheck.is_ubuntu_family():
     Execute('service krb5-kdc stop')
     Execute('service krb5-admin-server stop')
   else:
     Execute('service krb5kdc stop')
     Execute('service kadmin stop')
  def get_base_packages_to_install(self):
    """
    HACK: list packages which should be installed without disabling any repos. (This is planned to fix in Ambari-2.2)
    """
    base_packages_to_install = ['fuse']

    if OSCheck.is_suse_family() or OSCheck.is_ubuntu_family():
      base_packages_to_install.append('libfuse2')
    else:
      base_packages_to_install.append('fuse-libs')

    return base_packages_to_install
  def _clear_package_manager_cache(self):
    package_manager_cmd = ""

    if OSCheck.is_redhat_family():
      package_manager_cmd = ("/usr/bin/yum", "clean", "metadata")

    if OSCheck.is_suse_family():
      package_manager_cmd = ("/usr/bin/zypper", "-q", "-n", "clean")

    if OSCheck.is_ubuntu_family():
      return

    Logger.debug("Clearing repo manager metadata")
    Execute(package_manager_cmd, logoutput=False, sudo=True)
Exemplo n.º 11
0
def get_elastic_config_path(default="/etc/default/elasticsearch"):
    """
    Defines the path to the Elasticsearch environment file.  This path will
    differ based on the OS family.
    :param default: The path used if the OS family is not recognized.
    """
    path = default
    if OSCheck.is_redhat_family():
      path = "/etc/sysconfig/elasticsearch"
    elif OSCheck.is_ubuntu_family():
      path = "/etc/default/elasticsearch"
    else:
      Logger.error("Unexpected OS family; using default path={0}".format(path))

    return path
Exemplo n.º 12
0
  def launch_python_subprocess(self, command, tmpout, tmperr):
    """
    Creates subprocess with given parameters. This functionality was moved to separate method
    to make possible unit testing
    """
    close_fds = None if OSCheck.get_os_family() == OSConst.WINSRV_FAMILY else True
    command_env = dict(os.environ)
    if OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
      command_env["PYTHONPATH"] = os.pathsep.join(sys.path)
      for k, v in command_env.iteritems():
        command_env[k] = str(v)

    return subprocess.Popen(command,
      stdout=tmpout,
      stderr=tmperr, close_fds=close_fds, env=command_env, preexec_fn=self.preexec_fn)
def execute(configurations={}, parameters={}, host_name=None):
  """
  Returns a tuple containing the result code and a pre-formatted result label

  Keyword arguments:
  configurations (dictionary): a mapping of configuration key to value
  parameters (dictionary): a mapping of script parameter key to value
  host_name (string): the name of this host where the alert is running
  """

  if configurations is None:
    return (RESULT_CODE_UNKNOWN, ['There were no configurations supplied to the script.'])

  if not OOZIE_URL_KEY in configurations:
    return (RESULT_CODE_UNKNOWN, ['The Oozie URL is a required parameter.'])

  # use localhost on Windows, 0.0.0.0 on others; 0.0.0.0 means bind to all
  # interfaces, which doesn't work on Windows
  localhost_address = 'localhost' if OSCheck.get_os_family() == OSConst.WINSRV_FAMILY else '0.0.0.0'

  oozie_url = configurations[OOZIE_URL_KEY]
  oozie_url = oozie_url.replace(urlparse(oozie_url).hostname,localhost_address)

  try:
    command, env = get_check_command(oozie_url, host_name, configurations)
    # execute the command
    Execute(command, environment=env)

    return (RESULT_CODE_OK, ["Successful connection to {0}".format(oozie_url)])
  except KerberosPropertiesNotFound, ex:
    return (RESULT_CODE_UNKNOWN, [str(ex)])
Exemplo n.º 14
0
  def actionexecute(self, env):
    config = Script.get_config()

    version = default('/commandParams/version', None)
    stack_name = default('/hostLevelParams/stack_name', "")

    if not version:
      raise Fail("Value is required for '/commandParams/version'")
  
    # other os?
    if OSCheck.is_redhat_family():
      cmd = ('/usr/bin/yum', 'clean', 'all')
      code, out = shell.call(cmd, sudo=True)

    min_ver = format_hdp_stack_version("2.2")
    real_ver = format_hdp_stack_version(version)
    if stack_name == "HDP":
      if compare_versions(real_ver, min_ver) >= 0:
        cmd = ('hdp-select', 'set', 'all', version)
        code, out = shell.call(cmd, sudo=True)

      if compare_versions(real_ver, format_hdp_stack_version("2.3")) >= 0:
        # backup the old and symlink /etc/[component]/conf to /usr/hdp/current/[component]
        for k, v in conf_select.PACKAGE_DIRS.iteritems():
          for dir_def in v:
            link_config(dir_def['conf_dir'], dir_def['current_dir'])
Exemplo n.º 15
0
  def start(self, env):
    # Attempt to reconfigure the service before starting
    self.configure(env)

    # Create or update the administrator account
    KerberosScript.create_or_update_administrator_identity()

    if OSCheck.is_suse_family():
      Execute('rckadmind start')
      Execute('rckrb5kdc start')
    elif OSCheck.is_ubuntu_family():
      Execute('service krb5-kdc start')
      Execute('service krb5-admin-server start')
    else:
      Execute('service krb5kdc start')
      Execute('service kadmin start')
  def install_packages(self, package_list):
    """
    Actually install the packages using the package manager.
    :param package_list: List of package names to install
    :return: Returns 0 if no errors were found, and 1 otherwise.
    """
    ret_code = 0

    # Clear cache of package manager right before installation of the packages
    self._clear_package_manager_cache()

    # Install packages
    packages_were_checked = False
    try:
      Package(self.get_base_packages_to_install())

      packages_installed_before = []
      allInstalledPackages(packages_installed_before)
      packages_installed_before = [package[0] for package in packages_installed_before]
      packages_were_checked = True
      filtered_package_list = self.filter_package_list(package_list)
      for package in filtered_package_list:
        name = self.format_package_name(package['name'], self.repository_version)
        Package(name,
                use_repos=list(self.current_repo_files) if OSCheck.is_ubuntu_family() else self.current_repositories,
                skip_repos=[self.REPO_FILE_NAME_PREFIX + "*"] if OSCheck.is_redhat_family() else [])
    except Exception, err:
      ret_code = 1
      Logger.logger.exception("Package Manager failed to install packages. Error: {0}".format(str(err)))

      # Remove already installed packages in case of fail
      if packages_were_checked and packages_installed_before:
        packages_installed_after = []
        allInstalledPackages(packages_installed_after)
        packages_installed_after = [package[0] for package in packages_installed_after]
        packages_installed_before = set(packages_installed_before)
        new_packages_installed = [package for package in packages_installed_after if package not in packages_installed_before]

        if OSCheck.is_ubuntu_family():
          package_version_string = self.repository_version.replace('.', '-')
        else:
          package_version_string = self.repository_version.replace('-', '_')
          package_version_string = package_version_string.replace('.', '_')
        for package in new_packages_installed:
          if package_version_string and (package_version_string in package):
            Package(package, action="remove")
Exemplo n.º 17
0
  def status(self, env):
    import params

    if OSCheck.is_suse_family():
      try:
        Execute('checkproc `which krb5kdc`')
        Execute('checkproc `which kadmind`')
      except Fail as ex:
        raise ComponentIsNotRunning()

    elif OSCheck.is_ubuntu_family():
      check_process_status(params.kdamin_pid_path)
      check_process_status(params.krb5kdc_pid_path)

    else:
      check_process_status(params.kdamin_pid_path)
      check_process_status(params.krb5kdc_pid_path)
def execute(configurations={}, parameters={}, host_name=None):
  """
  Returns a tuple containing the result code and a pre-formatted result label

  Keyword arguments:
  configurations (dictionary): a mapping of configuration key to value
  parameters (dictionary): a mapping of script parameter key to value
  host_name (string): the name of this host where the alert is running
  """

  if configurations is None:
    return (RESULT_CODE_UNKNOWN, ['There were no configurations supplied to the script.'])

  if not OOZIE_URL_KEY in configurations:
    return (RESULT_CODE_UNKNOWN, ['The Oozie URL is a required parameter.'])

  # use localhost on Windows, 0.0.0.0 on others; 0.0.0.0 means bind to all
  # interfaces, which doesn't work on Windows
  localhost_address = 'localhost' if OSCheck.get_os_family() == OSConst.WINSRV_FAMILY else '0.0.0.0'

  https_port = None
  # try to get https port form oozie-env content
  if OOZIE_ENV_CONTENT in configurations:
    for line in configurations[OOZIE_ENV_CONTENT].splitlines():
      result = re.match(OOZIE_ENV_HTTPS_RE, line)

      if result is not None:
        https_port = result.group(1)
  # or from oozie-site.xml
  if https_port is None and OOZIE_HTTPS_PORT in configurations:
    https_port = configurations[OOZIE_HTTPS_PORT]

  oozie_url = configurations[OOZIE_URL_KEY]

  # construct proper url for https
  if https_port is not None:
    parsed_url = urlparse(oozie_url)
    oozie_url = oozie_url.replace(parsed_url.scheme, "https")
    if parsed_url.port is None:
      oozie_url.replace(parsed_url.hostname, ":".join([parsed_url.hostname, str(https_port)]))
    else:
      oozie_url = oozie_url.replace(str(parsed_url.port), str(https_port))

  # https will not work with localhost address, we need put fqdn
  if https_port is None:
    oozie_url = oozie_url.replace(urlparse(oozie_url).hostname, localhost_address)

  try:
    command, env, oozie_user = get_check_command(oozie_url, host_name, configurations)
    # execute the command
    Execute(command, 
            environment=env,
            user=oozie_user,
    )

    return (RESULT_CODE_OK, ["Successful connection to {0}".format(oozie_url)])
  except KerberosPropertiesNotFound, ex:
    return (RESULT_CODE_UNKNOWN, [str(ex)])
def bind_signal_handlers(agentPid):
  global _handler
  if OSCheck.get_os_family() != OSConst.WINSRV_FAMILY:
    if os.getpid() == agentPid:
      signal.signal(signal.SIGINT, signal_handler)
      signal.signal(signal.SIGTERM, signal_handler)
      signal.signal(signal.SIGUSR1, debug)
    _handler = HeartbeatStopHandlersLinux()
  else:
    _handler = HeartbeatStopHandlersWindows()
  return _handler
Exemplo n.º 20
0
 def format_package_name(self, package_name, repo_id):
   """
   This method overcomes problems at SLES SP3. Zypper here behaves differently
   than at SP1, and refuses to install packages by mask if there is any installed package that
   matches this mask.
   So we preppend concrete HDP version to mask under Suse
   """
   if OSCheck.is_suse_family() and '*' in package_name:
     mask_version = re.search(r'((_\d+)*(_)?\*)', package_name).group(0)
     formatted_version = '_' + repo_id.replace('.', '_').replace('-', '_') + '*'
     return package_name.replace(mask_version, formatted_version)
   else:
     return package_name
Exemplo n.º 21
0
  def on_failure(self, pythonCommand, result):
    """
    Log some useful information after task failure.
    """
    logger.info("Command " + pprint.pformat(pythonCommand) + " failed with exitcode=" + str(result['exitcode']))
    if OSCheck.is_windows_family():
      cmd_list = ["WMIC path win32_process get Caption,Processid,Commandline", "netstat -an"]
    else:
      cmd_list = ["ps faux", "netstat -tulpn"]

    shell_runner = shellRunner()
    
    for cmd in cmd_list:
      ret = shell_runner.run(cmd)
      logger.info("Command '{0}' returned {1}. {2}{3}".format(cmd, ret["exitCode"], ret["error"], ret["output"]))
Exemplo n.º 22
0
  def execute_java_home_available_check(self, config):
    print "Java home check started."
    java_home = config['commandParams']['java_home']

    print "Java home to check: " + java_home
    java_bin = "java"
    if OSCheck.is_windows_family():
      java_bin = "java.exe"
  
    if not os.path.isfile(os.path.join(java_home, "bin", java_bin)):
      print "Java home doesn't exist!"
      java_home_check_structured_output = {"exit_code" : 1, "message": "Java home doesn't exist!"}
    else:
      print "Java home exists!"
      java_home_check_structured_output = {"exit_code" : 0, "message": "Java home exists!"}
  
    return java_home_check_structured_output
Exemplo n.º 23
0
def turn_off_autostart(service):
  if OSCheck.is_ubuntu_family():
    Execute(('update-rc.d', service, 'disable'),
            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
            sudo = True
    )
    Execute(('service', service, 'stop'),
            sudo = True,
            ignore_failures=True,
    )
    File(format('/etc/init/{service}.override'), # disable upstart job
         content = 'manual',
    )
  else:
    Execute(('chkconfig', service, 'off'),
            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
            sudo = True,
    )
Exemplo n.º 24
0
def bind_signal_handlers(agentPid):
  global _handler
  if OSCheck.get_os_family() != OSConst.WINSRV_FAMILY:
    if os.getpid() == agentPid:
      signal.signal(signal.SIGINT, signal_handler)
      signal.signal(signal.SIGTERM, signal_handler)
      signal.signal(signal.SIGUSR2, remote_debug) # Interrupt running process, and provide a python prompt for it
      try:
        import faulthandler  # This is not default module, has to be installed separately
        faulthandler.enable(file=sys.stderr, all_threads=True)
        faulthandler.register(signal.SIGUSR1, file=sys.stderr, all_threads=True, chain=False)
        sys.stderr.write("Registered faulthandler\n")
      except ImportError:
        pass  # Module is not included into python distribution

    _handler = HeartbeatStopHandlersLinux()
  else:
    _handler = HeartbeatStopHandlersWindows()
  return _handler
Exemplo n.º 25
0
def get_lzo_packages(stack_version_unformatted):
  lzo_packages = []
  script_instance = Script.get_instance()
  if OSCheck.is_suse_family() and int(OSCheck.get_os_major_version()) >= 12:
    lzo_packages += ["liblzo2-2", "hadoop-lzo-native"]
  elif OSCheck.is_redhat_family() or OSCheck.is_suse_family():
    lzo_packages += ["lzo", "hadoop-lzo-native"]
  elif OSCheck.is_ubuntu_family():
    lzo_packages += ["liblzo2-2"]

  if stack_version_unformatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_unformatted):
    if OSCheck.is_ubuntu_family():
      lzo_packages += [script_instance.format_package_name("hadooplzo-${stack_version}") ,
                       script_instance.format_package_name("hadooplzo-${stack_version}-native")]
    else:
      lzo_packages += [script_instance.format_package_name("hadooplzo_${stack_version}"),
                       script_instance.format_package_name("hadooplzo_${stack_version}-native")]
  else:
    lzo_packages += ["hadoop-lzo"]

  return lzo_packages
Exemplo n.º 26
0
    def actionexecute(self, env):
        num_errors = 0

        # Parse parameters
        config = Script.get_config()

        try:
            command_repository = CommandRepository(config['repositoryFile'])
        except KeyError:
            raise Fail(
                "The command repository indicated by 'repositoryFile' was not found"
            )

        repo_rhel_suse = config['configurations']['cluster-env'][
            'repo_suse_rhel_template']
        repo_ubuntu = config['configurations']['cluster-env'][
            'repo_ubuntu_template']
        template = repo_rhel_suse if OSCheck.is_redhat_family(
        ) or OSCheck.is_suse_family() else repo_ubuntu

        # Handle a SIGTERM and SIGINT gracefully
        signal.signal(signal.SIGTERM, self.abort_handler)
        signal.signal(signal.SIGINT, self.abort_handler)

        self.repository_version = command_repository.version_string

        # Select dict that contains parameters
        try:
            package_list = json.loads(config['roleParams']['package_list'])
            stack_id = config['roleParams']['stack_id']
        except KeyError:
            pass

        self.stack_name = Script.get_stack_name()
        if self.stack_name is None:
            raise Fail("Cannot determine the stack name")

        self.stack_root_folder = Script.get_stack_root()
        if self.stack_root_folder is None:
            raise Fail("Cannot determine the stack's root directory")

        if self.repository_version is None:
            raise Fail("Cannot determine the repository version to install")

        self.repository_version = self.repository_version.strip()

        try:
            if not command_repository.items:
                Logger.warning(
                    "Repository list is empty. Ambari may not be managing the repositories for {0}."
                    .format(self.repository_version))
            else:
                Logger.info(
                    "Will install packages for repository version {0}".format(
                        self.repository_version))
                new_repo_files = create_repo_files(template,
                                                   command_repository)
                self.repo_files.update(new_repo_files)
        except Exception as err:
            Logger.logger.exception(
                "Cannot install repository files. Error: {0}".format(str(err)))
            num_errors += 1

        # Build structured output with initial values
        self.structured_output = {
            'package_installation_result': 'FAIL',
            'repository_version_id': command_repository.version_id
        }

        self.put_structured_out(self.structured_output)

        try:
            # check package manager non-completed transactions
            if self.pkg_provider.check_uncompleted_transactions():
                self.pkg_provider.print_uncompleted_transaction_hint()
                num_errors += 1
        except Exception as e:  # we need to ignore any exception
            Logger.warning(
                "Failed to check for uncompleted package manager transactions: "
                + str(e))

        if num_errors > 0:
            raise Fail("Failed to distribute repositories/install packages")

        # Initial list of versions, used to compute the new version installed
        self.old_versions = get_stack_versions(self.stack_root_folder)

        try:
            is_package_install_successful = False
            ret_code = self.install_packages(package_list)
            if ret_code == 0:
                self.structured_output[
                    'package_installation_result'] = 'SUCCESS'
                self.put_structured_out(self.structured_output)
                is_package_install_successful = True
            else:
                num_errors += 1
        except Exception as err:
            num_errors += 1
            Logger.logger.exception(
                "Could not install packages. Error: {0}".format(str(err)))

        # Provide correct exit code
        if num_errors > 0:
            raise Fail("Failed to distribute repositories/install packages")

        self._fix_default_links_for_current()
        # if installing a version of HDP that needs some symlink love, then create them
        if is_package_install_successful and 'actual_version' in self.structured_output:
            self._relink_configurations_with_conf_select(
                stack_id, self.structured_output['actual_version'])
Exemplo n.º 27
0
    "/configurations/hbase-env/hbase_regionserver_xmn_ratio", float)
regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize,
                                          regionserver_xmn_percent,
                                          regionserver_xmn_max)

hbase_regionserver_shutdown_timeout = expect(
    '/configurations/hbase-env/hbase_regionserver_shutdown_timeout', int, 30)

phoenix_hosts = default('/clusterHostInfo/phoenix_query_server_hosts', [])
phoenix_enabled = default('/configurations/hbase-env/phoenix_sql_enabled',
                          False)
has_phoenix = len(phoenix_hosts) > 0

underscored_version = stack_version_unformatted.replace('.', '_')
dashed_version = stack_version_unformatted.replace('.', '-')
if OSCheck.is_redhat_family() or OSCheck.is_suse_family():
    phoenix_package = format("phoenix_{underscored_version}_*")
elif OSCheck.is_ubuntu_family():
    phoenix_package = format("phoenix-{dashed_version}-.*")

pid_dir = status_params.pid_dir
tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
local_dir = config['configurations']['hbase-site']['hbase.local.dir']
ioengine_param = default(
    '/configurations/hbase-site/hbase.bucketcache.ioengine', None)

client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
regionserver_jaas_config_file = format(
    "{hbase_conf_dir}/hbase_regionserver_jaas.conf")
queryserver_jaas_config_file = format(
Exemplo n.º 28
0
security_enabled = config['configurations']['cluster-env']['security_enabled']

#java params
java_home = config['hostLevelParams']['java_home']

#hadoop params
hdfs_log_dir_prefix = config['configurations']['hadoop-env'][
    'hdfs_log_dir_prefix']
hadoop_pid_dir_prefix = config['configurations']['hadoop-env'][
    'hadoop_pid_dir_prefix']
hadoop_root_logger = config['configurations']['hadoop-env'][
    'hadoop_root_logger']

if Script.is_hdp_stack_greater_or_equal(
        "2.0") and Script.is_hdp_stack_less_than(
            "2.1") and not OSCheck.is_suse_family():
    # deprecated rhel jsvc_path
    jsvc_path = "/usr/libexec/bigtop-utils"
else:
    jsvc_path = "/usr/lib/bigtop-utils"

hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env'][
    'namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env'][
    'namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option(
    "/configurations/hadoop-env/namenode_opt_permsize", "128m")
namenode_opt_maxpermsize = format_jvm_option(
    "/configurations/hadoop-env/namenode_opt_maxpermsize", "256m")
Exemplo n.º 29
0
#for create_hdfs_directory
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name',
                              'missing_principal').replace("_HOST", hostname)

# Tez-related properties
tez_user = config['configurations']['tez-env']['tez_user']

# Tez jars
tez_local_api_jars = '/usr/lib/tez/tez*.jar'
tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'

# Tez libraries
tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)

if OSCheck.is_ubuntu_family():
    mysql_configname = '/etc/mysql/my.cnf'
else:
    mysql_configname = '/etc/my.cnf'

mysql_user = '******'

# Hive security
hive_authorization_enabled = config['configurations']['hive-site'][
    'hive.security.authorization.enabled']

mysql_jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"

hive_site_config = dict(config['configurations']['hive-site'])

########################################################
Exemplo n.º 30
0
if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
    rca_enabled = config['configurations']['hadoop-env']['rca_enabled']
else:
    rca_enabled = False
rca_disabled_prefix = "###"
if rca_enabled == True:
    rca_prefix = ""
else:
    rca_prefix = rca_disabled_prefix

#hadoop-env.sh
java_home = config['hostLevelParams']['java_home']

if hdp_stack_version != "" and compare_versions(
        hdp_stack_version, '2.0') >= 0 and compare_versions(
            hdp_stack_version, '2.1') < 0 and not OSCheck.is_suse_family():
    # deprecated rhel jsvc_path
    jsvc_path = "/usr/libexec/bigtop-utils"
else:
    jsvc_path = "/usr/lib/bigtop-utils"

hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env'][
    'namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env'][
    'namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option(
    "/configurations/hadoop-env/namenode_opt_permsize", "128m")
namenode_opt_maxpermsize = format_jvm_option(
    "/configurations/hadoop-env/namenode_opt_maxpermsize", "256m")
Exemplo n.º 31
0
import stat
import string
import sys
import tempfile

from ambari_commons.exceptions import FatalException
from ambari_commons.os_check import OSCheck, OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
from ambari_commons.os_utils import run_os_command, search_file, set_file_permissions
from ambari_commons.logging_utils import get_debug_mode, print_info_msg, print_warning_msg, print_error_msg, \
  set_debug_mode
from ambari_server.properties import Properties
from ambari_server.userInput import get_validated_string_input
from ambari_server.utils import compare_versions, locate_file

OS_VERSION = OSCheck().get_os_major_version()
OS_TYPE = OSCheck.get_os_type()
OS_FAMILY = OSCheck.get_os_family()

PID_NAME = "ambari-server.pid"

# Non-root user setup commands
NR_USER_PROPERTY = "ambari-server.user"

BLIND_PASSWORD = "******"

# Common messages
PRESS_ENTER_MSG = "Press <enter> to continue."

OS_FAMILY_PROPERTY = "server.os_family"
OS_TYPE_PROPERTY = "server.os_type"
Exemplo n.º 32
0
    def install_packages(self, package_list):
        """
    Actually install the packages using the package manager.
    :param package_list: List of package names to install
    :return: Returns 0 if no errors were found, and 1 otherwise.
    """
        ret_code = 0

        config = self.get_config()
        agent_stack_retry_on_unavailability = cbool(
            config['ambariLevelParams']['agent_stack_retry_on_unavailability'])
        agent_stack_retry_count = cint(
            config['ambariLevelParams']['agent_stack_retry_count'])

        # Install packages
        packages_were_checked = False
        packages_installed_before = []
        stack_selector_package = stack_tools.get_stack_tool_package(
            stack_tools.STACK_SELECTOR_NAME)

        try:
            # install the stack-selector; we need to supply the action as "upgrade" here since the normal
            # install command will skip if the package is already installed in the system.
            # This is required for non-versioned components, like stack-select, since each version of
            # the stack comes with one. Also, scope the install by repository since we need to pick a
            # specific repo that the stack-select tools are coming out of in case there are multiple
            # patches installed
            repositories = config['repositoryFile']['repositories']
            command_repos = CommandRepository(config['repositoryFile'])
            repository_ids = [
                repository['repoId'] for repository in repositories
            ]
            repos_to_use = {}
            for repo_id in repository_ids:
                if repo_id in self.repo_files:
                    repos_to_use[repo_id] = self.repo_files[repo_id]

            self.repo_mgr.upgrade_package(
                stack_selector_package,
                RepoCallContext(use_repos=repos_to_use,
                                retry_on_repo_unavailability=
                                agent_stack_retry_on_unavailability,
                                retry_count=agent_stack_retry_count))

            packages_installed_before = self.repo_mgr.installed_packages()
            packages_installed_before = [
                package[0] for package in packages_installed_before
            ]
            packages_were_checked = True
            filtered_package_list = self.filter_package_list(package_list)
            try:
                available_packages_in_repos = self.repo_mgr.get_available_packages_in_repos(
                    command_repos)
            except Exception:
                available_packages_in_repos = []
            for package in filtered_package_list:
                name = self.get_package_from_available(
                    package['name'], available_packages_in_repos)

                # This enables upgrading non-versioned packages, despite the fact they exist.
                # Needed by 'mahout' which is non-version but have to be updated
                self.repo_mgr.upgrade_package(
                    name,
                    RepoCallContext(retry_on_repo_unavailability=
                                    agent_stack_retry_on_unavailability,
                                    retry_count=agent_stack_retry_count))
        except Exception as err:
            ret_code = 1
            Logger.logger.exception(
                "Package Manager failed to install packages. Error: {0}".
                format(str(err)))

            # Remove already installed packages in case of fail
            if packages_were_checked and packages_installed_before:
                packages_installed_after = self.repo_mgr.installed_packages()
                packages_installed_after = [
                    package[0] for package in packages_installed_after
                ]
                packages_installed_before = set(packages_installed_before)
                new_packages_installed = [
                    package for package in packages_installed_after
                    if package not in packages_installed_before
                ]

                if OSCheck.is_ubuntu_family():
                    package_version_string = self.repository_version.replace(
                        '.', '-')
                else:
                    package_version_string = self.repository_version.replace(
                        '-', '_')
                    package_version_string = package_version_string.replace(
                        '.', '_')

                for package in new_packages_installed:
                    if package_version_string and (package_version_string
                                                   in package):
                        self.repo_mgr.remove_package(package,
                                                     RepoCallContext())

        if not self.repo_mgr.verify_dependencies():
            ret_code = 1
            Logger.logger.error("Failure while verifying dependencies")
            Logger.logger.error(
                "*******************************************************************************"
            )
            Logger.logger.error(
                "Manually verify and fix package dependencies and then re-run install_packages"
            )
            Logger.logger.error(
                "*******************************************************************************"
            )

        # Compute the actual version in order to save it in structured out
        try:
            if ret_code == 0:
                self.compute_actual_version()
            else:
                self.check_partial_install()
        except Fail as err:
            ret_code = 1
            Logger.logger.exception(
                "Failure while computing actual version. Error: {0}".format(
                    str(err)))
        return ret_code
from resource_management.core.resources import Execute
from resource_management.core.shell import call
from resource_management.libraries.functions import format
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import get_klist_path
from ambari_commons.os_check import OSConst, OSCheck
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from urlparse import urlparse
import os
import re

RESULT_CODE_OK = 'OK'
RESULT_CODE_CRITICAL = 'CRITICAL'
RESULT_CODE_UNKNOWN = 'UNKNOWN'

if OSCheck.is_windows_family():
  OOZIE_ENV_HTTPS_RE = r"set\s+OOZIE_HTTPS_PORT=(\d+)"
else:
  OOZIE_ENV_HTTPS_RE = r"export\s+OOZIE_HTTPS_PORT=(\d+)"

# The configured Kerberos executable search paths, if any
KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'

OOZIE_URL_KEY = '{{oozie-site/oozie.base.url}}'
SECURITY_ENABLED = '{{cluster-env/security_enabled}}'
OOZIE_USER = '******'
OOZIE_CONF_DIR = '/usr/hdp/current/oozie-server/conf'
OOZIE_CONF_DIR_LEGACY = '/etc/oozie/conf'
OOZIE_HTTPS_PORT = '{{oozie-site/oozie.https.port}}'
OOZIE_ENV_CONTENT = '{{oozie-env/content}}'
Exemplo n.º 34
0
lzo_packages = get_lzo_packages(stack_version_unformatted)

exclude_packages = []
if not lzo_enabled:
  exclude_packages += lzo_packages
  
name_node_params = default("/commandParams/namenode", None)

#hadoop params
hadoop_env_sh_template = config['configurations']['hadoop-env']['content']

#hadoop-env.sh
java_home = config['hostLevelParams']['java_home']
java_version = int(config['hostLevelParams']['java_version'])

if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.0') >= 0 and compare_versions(hdp_stack_version, '2.1') < 0 and not OSCheck.is_suse_family():
  # deprecated rhel jsvc_path
  jsvc_path = "/usr/libexec/bigtop-utils"
else:
  jsvc_path = "/usr/lib/bigtop-utils"

hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")

jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize =  "1024m"
Exemplo n.º 35
0
    else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
      secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port)
    if secure_dn_ports_are_in_use:
      hadoop_secure_dn_user = hdfs_user
    else:
      hadoop_secure_dn_user = '******'

ambari_libs_dir = "/var/lib/ambari-agent/lib"
limits_conf_dir = "/etc/security/limits.d"

hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
hdfs_user_nproc_limit = default("/configurations/hadoop-env/hdfs_user_nproc_limit", "65536")

create_lib_snappy_symlinks = not Script.is_hdp_stack_greater_or_equal("2.2")

if Script.is_hdp_stack_greater_or_equal("2.0") and Script.is_hdp_stack_less_than("2.1") and not OSCheck.is_suse_family():
  # deprecated rhel jsvc_path
  jsvc_path = "/usr/libexec/bigtop-utils"
else:
  jsvc_path = "/usr/lib/bigtop-utils"

execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
ulimit_cmd = "ulimit -c unlimited ; "

snappy_so = "libsnappy.so"
so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
so_src_dir_x86 = format("{hadoop_home}/lib")
so_src_dir_x64 = format("{hadoop_home}/lib64")
Exemplo n.º 36
0
    def install_packages(self, package_list):
        """
    Actually install the packages using the package manager.
    :param package_list: List of package names to install
    :return: Returns 0 if no errors were found, and 1 otherwise.
    """
        ret_code = 0

        config = self.get_config()
        agent_stack_retry_on_unavailability = cbool(
            config['hostLevelParams']['agent_stack_retry_on_unavailability'])
        agent_stack_retry_count = cint(
            config['hostLevelParams']['agent_stack_retry_count'])

        # Install packages
        packages_were_checked = False
        stack_selector_package = stack_tools.get_stack_tool_package(
            stack_tools.STACK_SELECTOR_NAME)
        try:
            Package(stack_selector_package,
                    action="upgrade",
                    retry_on_repo_unavailability=
                    agent_stack_retry_on_unavailability,
                    retry_count=agent_stack_retry_count)

            packages_installed_before = []
            allInstalledPackages(packages_installed_before)
            packages_installed_before = [
                package[0] for package in packages_installed_before
            ]
            packages_were_checked = True
            filtered_package_list = self.filter_package_list(package_list)
            for package in filtered_package_list:
                name = self.format_package_name(package['name'])
                Package(
                    name,
                    action=
                    "upgrade",  # this enables upgrading non-versioned packages, despite the fact they exist. Needed by 'mahout' which is non-version but have to be updated     
                    retry_on_repo_unavailability=
                    agent_stack_retry_on_unavailability,
                    retry_count=agent_stack_retry_count)
        except Exception, err:
            ret_code = 1
            Logger.logger.exception(
                "Package Manager failed to install packages. Error: {0}".
                format(str(err)))

            # Remove already installed packages in case of fail
            if packages_were_checked and packages_installed_before:
                packages_installed_after = []
                allInstalledPackages(packages_installed_after)
                packages_installed_after = [
                    package[0] for package in packages_installed_after
                ]
                packages_installed_before = set(packages_installed_before)
                new_packages_installed = [
                    package for package in packages_installed_after
                    if package not in packages_installed_before
                ]

                if OSCheck.is_ubuntu_family():
                    package_version_string = self.repository_version.replace(
                        '.', '-')
                else:
                    package_version_string = self.repository_version.replace(
                        '-', '_')
                    package_version_string = package_version_string.replace(
                        '.', '_')
                for package in new_packages_installed:
                    if package_version_string and (package_version_string
                                                   in package):
                        Package(package, action="remove")
Exemplo n.º 37
0
log_dir = config['configurations']['hbase-env']['hbase_log_dir']
java_io_tmpdir = config['configurations']['hbase-env']['hbase_java_io_tmpdir']
master_heapsize = ensure_unit_for_memory(config['configurations']['hbase-env']['hbase_master_heapsize'])

regionserver_heapsize = ensure_unit_for_memory(config['configurations']['hbase-env']['hbase_regionserver_heapsize'])
regionserver_xmn_max = config['configurations']['hbase-env']['hbase_regionserver_xmn_max']
regionserver_xmn_percent = expect("/configurations/hbase-env/hbase_regionserver_xmn_ratio", float)
regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)

hbase_regionserver_shutdown_timeout = expect('/configurations/hbase-env/hbase_regionserver_shutdown_timeout', int, 30)

phoenix_hosts = default('/clusterHostInfo/phoenix_query_server_hosts', [])
phoenix_enabled = default('/configurations/hbase-env/phoenix_sql_enabled', False)
has_phoenix = len(phoenix_hosts) > 0

if OSCheck.is_redhat_family() or OSCheck.is_suse_family():
  phoenix_package = format("phoenix-*")

pid_dir = status_params.pid_dir
tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
local_dir = config['configurations']['hbase-site']['hbase.local.dir']
ioengine_param = default('/configurations/hbase-site/hbase.bucketcache.ioengine', None)

client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
queryserver_jaas_config_file = format("{hbase_conf_dir}/hbase_queryserver_jaas.conf")

ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
has_ganglia_server = not len(ganglia_server_hosts) == 0
if has_ganglia_server:
Exemplo n.º 38
0
 def osdisks():
     if OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
         return Hardware._osdisks_win()
     else:
         return Hardware._osdisks_linux()
Exemplo n.º 39
0
                    exception)
                structured_output[CHECK_INSTALLED_PACKAGES] = {
                    "exit_code": 1,
                    "message": str(exception)
                }
                structured_output[CHECK_EXISTING_REPOS] = {
                    "exit_code": 1,
                    "message": str(exception)
                }

        # Here we are checking transparent huge page if CHECK_TRANSPARENT_HUGE_PAGE is in check_execute_list
        if CHECK_TRANSPARENT_HUGE_PAGE in check_execute_list:
            try:
                thp_regex = "\[(.+)\]"
                file_name = None
                if OSCheck.is_ubuntu_family():
                    file_name = THP_FILE_UBUNTU
                elif OSCheck.is_redhat_family():
                    file_name = THP_FILE_REDHAT
                if file_name and os.path.isfile(file_name):
                    with open(file_name) as f:
                        file_content = f.read()
                        structured_output[CHECK_TRANSPARENT_HUGE_PAGE] = {
                            "exit_code":
                            0,
                            "message":
                            str(
                                re.search(thp_regex, file_content).groups()[0])
                        }
                else:
                    structured_output[CHECK_TRANSPARENT_HUGE_PAGE] = {
Exemplo n.º 40
0
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

"""

from resource_management import *
from utils import get_property_value, get_unstructured_data
from ambari_commons.os_check import OSCheck

krb5_conf_dir = '/etc'
krb5_conf_file = 'krb5.conf'
krb5_conf_path = krb5_conf_dir + '/' + krb5_conf_file

if OSCheck.is_suse_family():
    kdc_conf_dir = '/var/lib/kerberos/krb5kdc'
elif OSCheck.is_ubuntu_family():
    kdc_conf_dir = '/etc/krb5kdc'
else:
    kdc_conf_dir = '/var/kerberos/krb5kdc'
kdc_conf_file = 'kdc.conf'
kdc_conf_path = kdc_conf_dir + '/' + kdc_conf_file

kadm5_acl_dir = kdc_conf_dir  # Typically kadm5.acl and kdc.conf exist in the same directory
kadm5_acl_file = 'kadm5.acl'
kadm5_acl_path = kadm5_acl_dir + '/' + kadm5_acl_file

config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
Exemplo n.º 41
0
  hadoop_conf_empty_dir = None

versioned_hdp_root = '/usr/hdp/current'

#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']

#java params
java_home = config['hostLevelParams']['java_home']

#hadoop params
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']

if Script.is_hdp_stack_greater_or_equal("2.0") and Script.is_hdp_stack_less_than("2.1") and not OSCheck.is_suse_family():
  # deprecated rhel jsvc_path
  jsvc_path = "/usr/libexec/bigtop-utils"
else:
  jsvc_path = "/usr/lib/bigtop-utils"

hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")

jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize =  "1024m"
Exemplo n.º 42
0
    def actionexecute(self, env):
        num_errors = 0

        # Parse parameters
        config = Script.get_config()

        repo_rhel_suse = config['configurations']['cluster-env'][
            'repo_suse_rhel_template']
        repo_ubuntu = config['configurations']['cluster-env'][
            'repo_ubuntu_template']
        template = repo_rhel_suse if OSCheck.is_redhat_family(
        ) or OSCheck.is_suse_family() else repo_ubuntu

        # Handle a SIGTERM and SIGINT gracefully
        signal.signal(signal.SIGTERM, self.abort_handler)
        signal.signal(signal.SIGINT, self.abort_handler)

        # Select dict that contains parameters
        try:
            self.repository_version = config['roleParams'][
                'repository_version']
            base_urls = json.loads(config['roleParams']['base_urls'])
            package_list = json.loads(config['roleParams']['package_list'])
            stack_id = config['roleParams']['stack_id']
        except KeyError:
            # Last try
            self.repository_version = config['commandParams'][
                'repository_version']
            base_urls = json.loads(config['commandParams']['base_urls'])
            package_list = json.loads(config['commandParams']['package_list'])
            stack_id = config['commandParams']['stack_id']

        # current stack information
        self.current_hdp_stack_version = None
        if 'stack_version' in config['hostLevelParams']:
            current_stack_version_unformatted = str(
                config['hostLevelParams']['stack_version'])
            self.current_hdp_stack_version = format_hdp_stack_version(
                current_stack_version_unformatted)

        stack_name = None
        self.stack_root_folder = None
        if stack_id and "-" in stack_id:
            stack_split = stack_id.split("-")
            if len(stack_split) == 2:
                stack_name = stack_split[0].upper()
                if stack_name in self.STACK_TO_ROOT_FOLDER:
                    self.stack_root_folder = self.STACK_TO_ROOT_FOLDER[
                        stack_name]
        if self.stack_root_folder is None:
            raise Fail(
                "Cannot determine the stack's root directory by parsing the stack_id property, {0}"
                .format(str(stack_id)))
        if self.repository_version is None:
            raise Fail("Cannot determine the repository version to install")

        self.repository_version = self.repository_version.strip()

        # Install/update repositories
        installed_repositories = []
        self.current_repositories = []
        self.current_repo_files = set()

        # Enable base system repositories
        # We don't need that for RHEL family, because we leave all repos enabled
        # except disabled HDP* ones
        if OSCheck.is_suse_family():
            self.current_repositories.append('base')
        elif OSCheck.is_ubuntu_family():
            self.current_repo_files.add('base')

        Logger.info("Will install packages for repository version {0}".format(
            self.repository_version))
        try:
            append_to_file = False
            for url_info in base_urls:
                repo_name, repo_file = self.install_repository(
                    url_info, append_to_file, template)
                self.current_repositories.append(repo_name)
                self.current_repo_files.add(repo_file)
                append_to_file = True

            installed_repositories = list_ambari_managed_repos()
        except Exception, err:
            Logger.logger.exception(
                "Cannot distribute repositories. Error: {0}".format(str(err)))
            num_errors += 1
Exemplo n.º 43
0
  def actionexecute(self, env):
    num_errors = 0

    # Parse parameters
    config = Script.get_config()

    repo_rhel_suse = config['configurations']['cluster-env']['repo_suse_rhel_template']
    repo_ubuntu = config['configurations']['cluster-env']['repo_ubuntu_template']
    template = repo_rhel_suse if OSCheck.is_redhat_family() or OSCheck.is_suse_family() else repo_ubuntu

    # Handle a SIGTERM and SIGINT gracefully
    signal.signal(signal.SIGTERM, self.abort_handler)
    signal.signal(signal.SIGINT, self.abort_handler)

    self.repository_version_id = None

    base_urls = []
    # Select dict that contains parameters
    try:
      if 'base_urls' in config['roleParams']:
        base_urls = json.loads(config['roleParams']['base_urls'])

      self.repository_version = config['roleParams']['repository_version']
      package_list = json.loads(config['roleParams']['package_list'])
      stack_id = config['roleParams']['stack_id']

      if 'repository_version_id' in config['roleParams']:
        self.repository_version_id = config['roleParams']['repository_version_id']
    except KeyError:
      pass

    # current stack information
    self.current_stack_version_formatted = None
    if 'stack_version' in config['hostLevelParams']:
      current_stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
      self.current_stack_version_formatted = format_stack_version(current_stack_version_unformatted)


    self.stack_name = Script.get_stack_name()
    if self.stack_name is None:
      raise Fail("Cannot determine the stack name")

    self.stack_root_folder = Script.get_stack_root()
    if self.stack_root_folder is None:
      raise Fail("Cannot determine the stack's root directory")

    if self.repository_version is None:
      raise Fail("Cannot determine the repository version to install")

    self.repository_version = self.repository_version.strip()

    # Install/update repositories
    self.current_repositories = []
    self.current_repo_files = set()

    # Enable base system repositories
    # We don't need that for RHEL family, because we leave all repos enabled
    # except disabled HDP* ones
    if OSCheck.is_suse_family():
      self.current_repositories.append('base')
    elif OSCheck.is_ubuntu_family():
      self.current_repo_files.add('base')

    Logger.info("Will install packages for repository version {0}".format(self.repository_version))

    if 0 == len(base_urls):
      Logger.warning("Repository list is empty. Ambari may not be managing the repositories for {0}.".format(self.repository_version))

    try:
      if 'repositoryFile' in config:
        create_repo_files(template, CommandRepository(config['repositoryFile']))
      else:
        append_to_file = False
        for url_info in base_urls:
          repo_name, repo_file = self.install_repository(url_info, append_to_file, template)
          self.current_repositories.append(repo_name)
          self.current_repo_files.add(repo_file)
          append_to_file = True

    except Exception, err:
      Logger.logger.exception("Cannot install repository files. Error: {0}".format(str(err)))
      num_errors += 1
Exemplo n.º 44
0
  def install_packages(self, package_list):
    """
    Actually install the packages using the package manager.
    :param package_list: List of package names to install
    :return: Returns 0 if no errors were found, and 1 otherwise.
    """
    ret_code = 0
    
    config = self.get_config()
    agent_stack_retry_on_unavailability = cbool(config['hostLevelParams']['agent_stack_retry_on_unavailability'])
    agent_stack_retry_count = cint(config['hostLevelParams']['agent_stack_retry_count'])

    # Install packages
    packages_were_checked = False
    stack_selector_package = stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME)
    try:
      Package(stack_selector_package,
              action="upgrade",
              retry_on_repo_unavailability=agent_stack_retry_on_unavailability,
              retry_count=agent_stack_retry_count
      )
      
      packages_installed_before = []
      allInstalledPackages(packages_installed_before)
      packages_installed_before = [package[0] for package in packages_installed_before]
      packages_were_checked = True
      filtered_package_list = self.filter_package_list(package_list)
      try:
        available_packages_in_repos = packages_analyzer.get_available_packages_in_repos(config['repositoryFile']['repositories'])
      except Exception:
        available_packages_in_repos = []
      for package in filtered_package_list:
        name = self.get_package_from_available(package['name'], available_packages_in_repos)
        Package(name,
          action="upgrade", # this enables upgrading non-versioned packages, despite the fact they exist. Needed by 'mahout' which is non-version but have to be updated     
          retry_on_repo_unavailability=agent_stack_retry_on_unavailability,
          retry_count=agent_stack_retry_count
        )
    except Exception as err:
      ret_code = 1
      Logger.logger.exception("Package Manager failed to install packages. Error: {0}".format(str(err)))

      # Remove already installed packages in case of fail
      if packages_were_checked and packages_installed_before:
        packages_installed_after = []
        allInstalledPackages(packages_installed_after)
        packages_installed_after = [package[0] for package in packages_installed_after]
        packages_installed_before = set(packages_installed_before)
        new_packages_installed = [package for package in packages_installed_after if package not in packages_installed_before]

        if OSCheck.is_ubuntu_family():
          package_version_string = self.repository_version.replace('.', '-')
        else:
          package_version_string = self.repository_version.replace('-', '_')
          package_version_string = package_version_string.replace('.', '_')

        for package in new_packages_installed:
          if package_version_string and (package_version_string in package):
            Package(package, action="remove")

    if not verifyDependencies():
      ret_code = 1
      Logger.logger.error("Failure while verifying dependencies")
      Logger.logger.error("*******************************************************************************")
      Logger.logger.error("Manually verify and fix package dependencies and then re-run install_packages")
      Logger.logger.error("*******************************************************************************")

    # Compute the actual version in order to save it in structured out
    try:
      if ret_code == 0:
         self.compute_actual_version()
      else:
        self.check_partial_install()
    except Fail as err:
      ret_code = 1
      Logger.logger.exception("Failure while computing actual version. Error: {0}".format(str(err)))
    return ret_code
Exemplo n.º 45
0
    def actionexecute(self, env):
        num_errors = 0

        # Parse parameters
        config = Script.get_config()

        try:
            command_repository = CommandRepository(config['repositoryFile'])
        except KeyError:
            raise Fail(
                "The command repository indicated by 'repositoryFile' was not found"
            )

        repo_rhel_suse = config['configurations']['cluster-env'][
            'repo_suse_rhel_template']
        repo_ubuntu = config['configurations']['cluster-env'][
            'repo_ubuntu_template']
        template = repo_rhel_suse if OSCheck.is_redhat_family(
        ) or OSCheck.is_suse_family() else repo_ubuntu

        # Handle a SIGTERM and SIGINT gracefully
        signal.signal(signal.SIGTERM, self.abort_handler)
        signal.signal(signal.SIGINT, self.abort_handler)

        self.repository_version = command_repository.version_string

        # Select dict that contains parameters
        try:
            package_list = json.loads(config['roleParams']['package_list'])
            stack_id = config['roleParams']['stack_id']
        except KeyError:
            pass

        self.stack_name = Script.get_stack_name()
        if self.stack_name is None:
            raise Fail("Cannot determine the stack name")

        self.stack_root_folder = Script.get_stack_root()
        if self.stack_root_folder is None:
            raise Fail("Cannot determine the stack's root directory")

        if self.repository_version is None:
            raise Fail("Cannot determine the repository version to install")

        self.repository_version = self.repository_version.strip()

        try:
            if not command_repository.repositories:
                Logger.warning(
                    "Repository list is empty. Ambari may not be managing the repositories for {0}."
                    .format(self.repository_version))
            else:
                Logger.info(
                    "Will install packages for repository version {0}".format(
                        self.repository_version))
                new_repo_files = create_repo_files(template,
                                                   command_repository)
                self.repo_files.update(new_repo_files)
        except Exception, err:
            Logger.logger.exception(
                "Cannot install repository files. Error: {0}".format(str(err)))
            num_errors += 1
Exemplo n.º 46
0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

"""
from ambari_commons.os_check import OSCheck
from resource_management.libraries.functions import format
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script

if OSCheck.is_windows_family():
    from params_windows import *
else:
    from params_linux import *

host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)

# server configurations
config = Script.get_config()

stack_name = default("/hostLevelParams/stack_name", None)

# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)

stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
Exemplo n.º 47
0
class InstallPackages(Script):
    """
  This script is a part of Rolling Upgrade workflow and is described at
  appropriate design doc.
  It installs repositories to the node and then installs packages.
  For now, repositories are installed into individual files.
  """

    UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]
    REPO_FILE_NAME_PREFIX = 'HDP-'

    def actionexecute(self, env):
        delayed_fail = False
        package_install_result = False

        # Parse parameters
        config = Script.get_config()

        # Select dict that contains parameters
        try:
            repository_version = config['roleParams']['repository_version']
            base_urls = json.loads(config['roleParams']['base_urls'])
            package_list = json.loads(config['roleParams']['package_list'])
            stack_id = config['roleParams']['stack_id']
        except KeyError:
            # Last try
            repository_version = config['commandParams']['repository_version']
            base_urls = json.loads(config['commandParams']['base_urls'])
            package_list = json.loads(config['commandParams']['package_list'])
            stack_id = config['commandParams']['stack_id']

        # Install/update repositories
        installed_repositories = []
        current_repositories = [
            'base'
        ]  # Some our packages are installed from the base repo
        current_repo_files = set(['base'])
        old_versions = self.hdp_versions()

        try:
            append_to_file = False
            for url_info in base_urls:
                repo_name, repo_file = self.install_repository(
                    url_info, repository_version, append_to_file)
                current_repositories.append(repo_name)
                current_repo_files.add(repo_file)
                append_to_file = True

            installed_repositories = list_ambari_managed_repos()
        except Exception, err:
            print "Can not distribute repositories."
            print traceback.format_exc()
            delayed_fail = True

        # Install packages
        if not delayed_fail:
            packages_were_checked = False
            try:
                packages_installed_before = []
                allInstalledPackages(packages_installed_before)
                packages_installed_before = [
                    package[0] for package in packages_installed_before
                ]
                packages_were_checked = True
                filtered_package_list = self.filter_package_list(package_list)
                for package in filtered_package_list:
                    name = self.format_package_name(package['name'],
                                                    repository_version)
                    Package(
                        name,
                        use_repos=list(current_repo_files) if
                        OSCheck.is_ubuntu_family() else current_repositories)
                package_install_result = True
            except Exception, err:
                print "Can not install packages."
                print traceback.format_exc()
                delayed_fail = True

                # Remove already installed packages in case of fail
                if packages_were_checked and packages_installed_before:
                    packages_installed_after = []
                    allInstalledPackages(packages_installed_after)
                    packages_installed_after = [
                        package[0] for package in packages_installed_after
                    ]
                    packages_installed_before = set(packages_installed_before)
                    new_packages_installed = [
                        package for package in packages_installed_after
                        if package not in packages_installed_before
                    ]

                    if OSCheck.is_ubuntu_family():
                        package_version_string = repository_version.replace(
                            '.', '-')
                    else:
                        package_version_string = repository_version.replace(
                            '-', '_')
                        package_version_string = package_version_string.replace(
                            '.', '_')
                    for package in new_packages_installed:
                        if package_version_string and (package_version_string
                                                       in package):
                            Package(package, action="remove")
Exemplo n.º 48
0
regionserver_xmn_percent = config['configurations']['hbase-env']['hbase_regionserver_xmn_ratio']
regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)


phoenix_hosts = default('/clusterHostInfo/phoenix_query_server_hosts', [])
phoenix_enabled = default('/configurations/hbase-env/phoenix_sql_enabled', False)
has_phoenix = len(phoenix_hosts) > 0

if not has_phoenix and not phoenix_enabled:
  exclude_packages = ['phoenix*']
else:
  exclude_packages = []

underscored_version = stack_version_unformatted.replace('.', '_')
dashed_version = stack_version_unformatted.replace('.', '-')
if OSCheck.is_redhat_family() or OSCheck.is_suse_family():
  phoenix_package = format("phoenix_{underscored_version}_*")
elif OSCheck.is_ubuntu_family():
  phoenix_package = format("phoenix-{dashed_version}-.*")

pid_dir = status_params.pid_dir
tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
local_dir = config['configurations']['hbase-site']['hbase.local.dir']

client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
queryserver_jaas_config_file = format("{hbase_conf_dir}/hbase_queryserver_jaas.conf")

ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
Exemplo n.º 49
0
from resource_management import *
from ambari_commons.os_check import OSCheck

config = Script.get_config()

hive_pid_dir = config['configurations']['hive-env']['hive_pid_dir']
hive_pid = 'hive-server.pid'

hive_metastore_pid = 'hive.pid'

hcat_pid_dir = config['configurations']['hive-env'][
    'hcat_pid_dir']  #hcat_pid_dir
webhcat_pid_file = format('{hcat_pid_dir}/webhcat.pid')

process_name = 'mysqld'
if OSCheck.is_suse_family() or OSCheck.is_ubuntu_family():
    daemon_name = 'mysql'
else:
    daemon_name = 'mysqld'

# Security related/required params
hostname = config['hostname']
security_enabled = config['configurations']['cluster-env']['security_enabled']
hadoop_conf_dir = "/etc/hadoop/conf"
kinit_path_local = functions.get_kinit_path()
tmp_dir = Script.get_tmp_dir()
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hive_user = config['configurations']['hive-env']['hive_user']
hive_conf_dir = "/etc/hive/conf"
webhcat_user = config['configurations']['hive-env']['webhcat_user']
webhcat_conf_dir = '/etc/hive-webhcat/conf'
Exemplo n.º 50
0
# for create_hdfs_directory
security_enabled = config['configurations']['cluster-env']['security_enabled']
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env'][
    'hdfs_principal_name']
smokeuser_principal = config['configurations']['cluster-env'][
    'smokeuser_principal_name']
kinit_path_local = get_kinit_path(
    default('/configurations/kerberos-env/executable_search_paths', None))
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
dfs_type = default("/commandParams/dfs_type", "")

# MYSQL
if OSCheck.is_ubuntu_family():
    mysql_configname = '/etc/mysql/my.cnf'
else:
    mysql_configname = '/etc/my.cnf'

daemon_name = status_params.daemon_name
# There will always be exactly one mysql_host
mysql_host = config['clusterHostInfo']['metron_enrichment_mysql_server_hosts'][
    0]
mysql_port = config['configurations']['metron-env'][
    'metron_enrichment_db_port']

mysql_adduser_path = tmp_dir + "/addMysqlUser.sh"
mysql_deluser_path = tmp_dir + "/removeMysqlUser.sh"
mysql_create_geoip_path = tmp_dir + "/createMysqlGeoIp.sh"
Exemplo n.º 51
0
def execute(parameters=None, host_name=None):
    """
  Returns a tuple containing the result code and a pre-formatted result label

  Keyword arguments:
  parameters (dictionary): a mapping of parameter key to value
  host_name (string): the name of this host where the alert is running
  """

    if parameters is None:
        return (RESULT_CODE_UNKNOWN,
                ['There were no parameters supplied to the script.'])

    if not OOZIE_URL_KEY in parameters:
        return (RESULT_CODE_UNKNOWN,
                ['The Oozie URL is a required parameter.'])

    # use localhost on Windows, 0.0.0.0 on others; 0.0.0.0 means bind to all
    # interfaces, which doesn't work on Windows
    localhost_address = 'localhost' if OSCheck.get_os_family(
    ) == OSConst.WINSRV_FAMILY else '0.0.0.0'

    oozie_url = parameters[OOZIE_URL_KEY]
    oozie_url = oozie_url.replace(
        urlparse(oozie_url).hostname, localhost_address)

    security_enabled = False
    if SECURITY_ENABLED in parameters:
        security_enabled = str(parameters[SECURITY_ENABLED]).upper() == 'TRUE'

    command = format(
        "source /etc/oozie/conf/oozie-env.sh ; oozie admin -oozie {oozie_url} -status"
    )

    try:
        # kinit if security is enabled so that oozie-env.sh can make the web request
        kerberos_env = None

        if security_enabled:
            if OOZIE_KEYTAB in parameters and OOZIE_PRINCIPAL in parameters:
                oozie_keytab = parameters[OOZIE_KEYTAB]
                oozie_principal = parameters[OOZIE_PRINCIPAL]

                # substitute _HOST in kerberos principal with actual fqdn
                oozie_principal = oozie_principal.replace('_HOST', host_name)
            else:
                return (RESULT_CODE_UNKNOWN, [
                    'The Oozie keytab and principal are required parameters when security is enabled.'
                ])

            # Create the kerberos credentials cache (ccache) file and set it in the environment to use
            # when executing curl
            env = Environment.get_instance()
            ccache_file = "{0}{1}oozie_alert_cc_{2}".format(
                env.tmp_dir, sep, getpid())
            kerberos_env = {'KRB5CCNAME': ccache_file}

            klist_path_local = get_klist_path()
            klist_command = format("{klist_path_local} -s {ccache_file}")

            # Determine if we need to kinit by testing to see if the relevant cache exists and has
            # non-expired tickets.  Tickets are marked to expire after 5 minutes to help reduce the number
            # it kinits we do but recover quickly when keytabs are regenerated
            return_code, _ = call(klist_command)
            if return_code != 0:
                kinit_path_local = get_kinit_path()
                kinit_command = format(
                    "{kinit_path_local} -l 5m -kt {oozie_keytab} {oozie_principal}; "
                )

                # kinit
                Execute(kinit_command, environment=kerberos_env)

        # execute the command
        Execute(command, environment=kerberos_env)

        return (RESULT_CODE_OK,
                ["Successful connection to {0}".format(oozie_url)])

    except Exception, ex:
        return (RESULT_CODE_CRITICAL, [str(ex)])
Exemplo n.º 52
0
            download_file(check_db_connection_url, check_db_connection_path)

        except Exception, e:
            message = "Error downloading DBConnectionVerification.jar from Ambari Server resources. Check network access to " \
                      "Ambari Server.\n" + str(e)
            print message
            db_connection_check_structured_output = {
                "exit_code": 1,
                "message": message
            }
            return db_connection_check_structured_output

        # download jdbc driver from ambari-server resources
        try:
            download_file(jdbc_url, jdbc_path)
            if db_name == DB_MSSQL and OSCheck.is_windows_family():
                jdbc_auth_path = os.path.join(agent_cache_dir,
                                              JDBC_AUTH_SYMLINK_MSSQL)
                jdbc_auth_url = jdk_location + JDBC_AUTH_SYMLINK_MSSQL
                download_file(jdbc_auth_url, jdbc_auth_path)
        except Exception, e:
            message = format("Error: Ambari Server cannot download the database JDBC driver and is unable to test the " \
                      "database connection. You must run ambari-server setup --jdbc-db={db_name} " \
                      "--jdbc-driver=/path/to/your/{db_name}/driver.jar on the Ambari Server host to make the JDBC " \
                      "driver available for download and to enable testing the database connection.\n") + str(e)
            print message
            db_connection_check_structured_output = {
                "exit_code": 1,
                "message": message
            }
            return db_connection_check_structured_output
Exemplo n.º 53
0
    def execute_db_connection_check(self, config, tmp_dir):
        print "DB connection check started."

        # initialize needed data

        ambari_server_hostname = config['commandParams']['ambari_server_host']
        check_db_connection_jar_name = "DBConnectionVerification.jar"
        jdk_location = config['commandParams']['jdk_location']
        java_home = config['commandParams']['java_home']
        db_name = config['commandParams']['db_name']

        if db_name == DB_MYSQL:
            jdbc_url = jdk_location + JDBC_DRIVER_SYMLINK_MYSQL
            jdbc_driver = JDBC_DRIVER_MYSQL
            jdbc_name = JDBC_DRIVER_SYMLINK_MYSQL
        elif db_name == DB_ORACLE:
            jdbc_url = jdk_location + JDBC_DRIVER_SYMLINK_ORACLE
            jdbc_driver = JDBC_DRIVER_ORACLE
            jdbc_name = JDBC_DRIVER_SYMLINK_ORACLE
        elif db_name == DB_POSTGRESQL:
            jdbc_url = jdk_location + JDBC_DRIVER_SYMLINK_POSTGRESQL
            jdbc_driver = JDBC_DRIVER_POSTGRESQL
            jdbc_name = JDBC_DRIVER_SYMLINK_POSTGRESQL
        elif db_name == DB_MSSQL:
            jdbc_url = jdk_location + JDBC_DRIVER_SYMLINK_MSSQL
            jdbc_driver = JDBC_DRIVER_MSSQL
            jdbc_name = JDBC_DRIVER_SYMLINK_MSSQL

        db_connection_url = config['commandParams']['db_connection_url']
        user_name = config['commandParams']['user_name']
        user_passwd = config['commandParams']['user_passwd']
        agent_cache_dir = os.path.abspath(
            config["hostLevelParams"]["agentCacheDir"])
        check_db_connection_url = jdk_location + check_db_connection_jar_name
        jdbc_path = os.path.join(agent_cache_dir, jdbc_name)
        check_db_connection_path = os.path.join(agent_cache_dir,
                                                check_db_connection_jar_name)

        java_bin = "java"
        class_path_delimiter = ":"
        if OSCheck.is_windows_family():
            java_bin = "java.exe"
            class_path_delimiter = ";"

        java_exec = os.path.join(java_home, "bin", java_bin)

        if ('jdk_name' not in config['commandParams'] or config['commandParams']['jdk_name'] == None \
            or config['commandParams']['jdk_name'] == '') and not os.path.isfile(java_exec):
            message = "Custom java is not available on host. Please install it. Java home should be the same as on server. " \
                      "\n"
            print message
            db_connection_check_structured_output = {
                "exit_code": 1,
                "message": message
            }
            return db_connection_check_structured_output

        environment = {"no_proxy": format("{ambari_server_hostname}")}
        # download and install java if it doesn't exists
        if not os.path.isfile(java_exec):
            jdk_name = config['commandParams']['jdk_name']
            jdk_url = "{0}/{1}".format(jdk_location, jdk_name)
            jdk_download_target = os.path.join(agent_cache_dir, jdk_name)
            java_dir = os.path.dirname(java_home)
            try:
                download_file(jdk_url, jdk_download_target)
            except Exception, e:
                message = "Error downloading JDK from Ambari Server resources. Check network access to " \
                          "Ambari Server.\n" + str(e)
                print message
                db_connection_check_structured_output = {
                    "exit_code": 1,
                    "message": message
                }
                return db_connection_check_structured_output

            if jdk_name.endswith(".exe"):
                install_cmd = "{0} /s INSTALLDIR={1} STATIC=1 WEB_JAVA=0 /L \\var\\log\\ambari-agent".format(
                    os_utils.quote_path(jdk_download_target),
                    os_utils.quote_path(java_home),
                )
                install_path = [java_dir]
                try:
                    Execute(install_cmd, path=install_path)
                except Exception, e:
                    message = "Error installing java.\n" + str(e)
                    print message
                    db_connection_check_structured_output = {
                        "exit_code": 1,
                        "message": message
                    }
                    return db_connection_check_structured_output
Exemplo n.º 54
0
  def execute_db_connection_check(self, config, tmp_dir):
    Logger.info("DB connection check started.")
  
    # initialize needed data
  
    ambari_server_hostname = config['commandParams']['ambari_server_host']
    check_db_connection_jar_name = "DBConnectionVerification.jar"
    jdk_location = config['commandParams']['jdk_location']
    java_home = config['commandParams']['java_home']
    db_name = config['commandParams']['db_name']
    no_jdbc_error_message = None

    if db_name == DB_MYSQL:
      jdbc_driver_mysql_name = default("/ambariLevelParams/custom_mysql_jdbc_name", None)
      if not jdbc_driver_mysql_name:
        no_jdbc_error_message = "The MySQL JDBC driver has not been set. Please ensure that you have executed 'ambari-server setup --jdbc-db=mysql --jdbc-driver=/path/to/jdbc_driver'."
      else:
        jdbc_url = CheckHost.build_url(jdk_location, jdbc_driver_mysql_name)
        jdbc_driver_class = JDBC_DRIVER_CLASS_MYSQL
        jdbc_name = jdbc_driver_mysql_name
    elif db_name == DB_ORACLE:
      jdbc_driver_oracle_name = default("/ambariLevelParams/custom_oracle_jdbc_name", None)
      if not jdbc_driver_oracle_name:
        no_jdbc_error_message = "The Oracle JDBC driver has not been set. Please ensure that you have executed 'ambari-server setup --jdbc-db=oracle --jdbc-driver=/path/to/jdbc_driver'."
      else:
        jdbc_url = CheckHost.build_url(jdk_location, jdbc_driver_oracle_name)
        jdbc_driver_class = JDBC_DRIVER_CLASS_ORACLE
        jdbc_name = jdbc_driver_oracle_name
    elif db_name == DB_POSTGRESQL:
      jdbc_driver_postgres_name = default("/ambariLevelParams/custom_postgres_jdbc_name", None)
      if not jdbc_driver_postgres_name:
        no_jdbc_error_message = "The Postgres JDBC driver has not been set. Please ensure that you have executed 'ambari-server setup --jdbc-db=postgres --jdbc-driver=/path/to/jdbc_driver'."
      else:
        jdbc_url = CheckHost.build_url(jdk_location, jdbc_driver_postgres_name)
        jdbc_driver_class = JDBC_DRIVER_CLASS_POSTGRESQL
        jdbc_name = jdbc_driver_postgres_name
    elif db_name == DB_MSSQL:
      jdbc_driver_mssql_name = default("/ambariLevelParams/custom_mssql_jdbc_name", None)
      if not jdbc_driver_mssql_name:
        no_jdbc_error_message = "The MSSQL JDBC driver has not been set. Please ensure that you have executed 'ambari-server setup --jdbc-db=mssql --jdbc-driver=/path/to/jdbc_driver'."
      else:
        jdbc_url = CheckHost.build_url(jdk_location, jdbc_driver_mssql_name)
        jdbc_driver_class = JDBC_DRIVER_CLASS_MSSQL
        jdbc_name = jdbc_driver_mssql_name
    elif db_name == DB_SQLA:
      jdbc_driver_sqla_name = default("/ambariLevelParams/custom_sqlanywhere_jdbc_name", None)
      if not jdbc_driver_sqla_name:
        no_jdbc_error_message = "The SQLAnywhere JDBC driver has not been set. Please ensure that you have executed 'ambari-server setup --jdbc-db=sqlanywhere --jdbc-driver=/path/to/jdbc_driver'."
      else:
        jdbc_url = CheckHost.build_url(jdk_location, jdbc_driver_sqla_name)
        jdbc_driver_class = JDBC_DRIVER_CLASS_SQLA
        jdbc_name = jdbc_driver_sqla_name
    else: no_jdbc_error_message = format("'{db_name}' database type not supported.")

    if no_jdbc_error_message:
      Logger.warning(no_jdbc_error_message)
      db_connection_check_structured_output = {"exit_code" : 1, "message": no_jdbc_error_message}
      return db_connection_check_structured_output

    db_connection_url = config['commandParams']['db_connection_url']
    user_name = config['commandParams']['user_name']
    user_passwd = config['commandParams']['user_passwd']
    agent_cache_dir = os.path.abspath(config["agentLevelParams"]["agentCacheDir"])
    check_db_connection_url = CheckHost.build_url(jdk_location, check_db_connection_jar_name)
    jdbc_path = os.path.join(agent_cache_dir, jdbc_name)
    class_path_delimiter = ":"
    if db_name == DB_SQLA:
      jdbc_jar_path = agent_cache_dir + JDBC_DRIVER_SQLA_JAR_PATH_IN_ARCHIVE
      java_library_path = agent_cache_dir + JARS_PATH_IN_ARCHIVE_SQLA + class_path_delimiter + agent_cache_dir + \
                          LIBS_PATH_IN_ARCHIVE_SQLA
    else:
      jdbc_jar_path = jdbc_path
      java_library_path = agent_cache_dir

    check_db_connection_path = os.path.join(agent_cache_dir, check_db_connection_jar_name)

    java_bin = "java"
    if OSCheck.is_windows_family():
      java_bin = "java.exe"
      class_path_delimiter = ";"

    java_exec = os.path.join(java_home, "bin",java_bin)

    if ('jdk_name' not in config['commandParams'] or config['commandParams']['jdk_name'] == None \
        or config['commandParams']['jdk_name'] == '') and not os.path.isfile(java_exec):
      message = "Custom java is not available on host. Please install it. Java home should be the same as on server. " \
                "\n"
      Logger.warning(message)
      db_connection_check_structured_output = {"exit_code" : 1, "message": message}
      return db_connection_check_structured_output

    environment = { "no_proxy": format("{ambari_server_hostname}") }
    # download and install java if it doesn't exists
    if not os.path.isfile(java_exec):
      jdk_name = config['commandParams']['jdk_name']
      jdk_url = CheckHost.build_url(jdk_location, jdk_name)
      jdk_download_target = os.path.join(agent_cache_dir, jdk_name)
      java_dir = os.path.dirname(java_home)
      try:
        download_file(jdk_url, jdk_download_target)
      except Exception, e:
        message = "Error downloading JDK from Ambari Server resources. Check network access to " \
                  "Ambari Server.\n" + str(e)
        Logger.exception(message)
        db_connection_check_structured_output = {"exit_code" : 1, "message": message}
        return db_connection_check_structured_output

      if jdk_name.endswith(".exe"):
        install_cmd = "{0} /s INSTALLDIR={1} STATIC=1 WEB_JAVA=0 /L \\var\\log\\ambari-agent".format(
        os_utils.quote_path(jdk_download_target), os_utils.quote_path(java_home),
        )
        install_path = [java_dir]
        try:
          Execute(install_cmd, path = install_path)
        except Exception, e:
          message = "Error installing java.\n" + str(e)
          Logger.exception(message)
          db_connection_check_structured_output = {"exit_code" : 1, "message": message}
          return db_connection_check_structured_output
Exemplo n.º 55
0
    def execute(self):
        """
    Sets up logging;
    Parses command parameters and executes method relevant to command type
    """
        logger, chout, cherr = Logger.initialize_logger()

        # parse arguments
        if len(sys.argv) < 7:
            logger.error("Script expects at least 6 arguments")
            print USAGE.format(os.path.basename(
                sys.argv[0]))  # print to stdout
            sys.exit(1)

        command_name = str.lower(sys.argv[1])
        self.command_data_file = sys.argv[2]
        self.basedir = sys.argv[3]
        self.stroutfile = sys.argv[4]
        self.load_structured_out()
        self.logging_level = sys.argv[5]
        Script.tmp_dir = sys.argv[6]

        logging_level_str = logging._levelNames[self.logging_level]
        chout.setLevel(logging_level_str)
        logger.setLevel(logging_level_str)

        # on windows we need to reload some of env variables manually because there is no default paths for configs(like
        # /etc/something/conf on linux. When this env vars created by one of the Script execution, they can not be updated
        # in agent, so other Script executions will not be able to access to new env variables
        if OSCheck.is_windows_family():
            reload_windows_env()

        try:
            with open(self.command_data_file) as f:
                pass
                Script.config = ConfigDictionary(json.load(f))
                # load passwords here(used on windows to impersonate different users)
                Script.passwords = {}
                for k, v in _PASSWORD_MAP.iteritems():
                    if get_path_from_configuration(
                            k, Script.config) and get_path_from_configuration(
                                v, Script.config):
                        Script.passwords[get_path_from_configuration(
                            k, Script.config)] = get_path_from_configuration(
                                v, Script.config)

        except IOError:
            logger.exception(
                "Can not read json file with command parameters: ")
            sys.exit(1)

        # Run class method depending on a command type
        try:
            method = self.choose_method_to_execute(command_name)
            with Environment(self.basedir, tmp_dir=Script.tmp_dir) as env:
                env.config.download_path = Script.tmp_dir
                method(env)
                if command_name == "install":
                    self.set_version()
        except ClientComponentHasNoStatus or ComponentIsNotRunning:
            # Support of component status checks.
            # Non-zero exit code is interpreted as an INSTALLED status of a component
            sys.exit(1)
        except Fail:
            logger.exception(
                "Error while executing command '{0}':".format(command_name))
            sys.exit(1)
        finally:
            if self.should_expose_component_version(command_name):
                self.save_component_version_to_structured_out()
Exemplo n.º 56
0
      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
    elif dfs_http_policy == "HTTP_AND_HTTPS":
      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
    else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
    if secure_dn_ports_are_in_use:
      hadoop_secure_dn_user = hdfs_user
    else:
      hadoop_secure_dn_user = '******'

#hadoop params
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']

if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.0') >= 0 and compare_versions(hdp_stack_version, '2.1') < 0 and not OSCheck.is_suse_family():
  # deprecated rhel jsvc_path
  jsvc_path = "/usr/libexec/bigtop-utils"
else:
  jsvc_path = "/usr/lib/bigtop-utils"

hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")

jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize =  "1024m"
Exemplo n.º 57
0
def execute(configurations={}, parameters={}, host_name=None):
    """
  Returns a tuple containing the result code and a pre-formatted result label

  Keyword arguments:
  configurations (dictionary): a mapping of configuration key to value
  parameters (dictionary): a mapping of script parameter key to value
  host_name (string): the name of this host where the alert is running
  """

    if configurations is None:
        return (RESULT_CODE_UNKNOWN,
                ['There were no configurations supplied to the script.'])

    if not OOZIE_URL_KEY in configurations:
        return (RESULT_CODE_UNKNOWN,
                ['The Oozie URL is a required parameter.'])

    # use localhost on Windows, 0.0.0.0 on others; 0.0.0.0 means bind to all
    # interfaces, which doesn't work on Windows
    localhost_address = 'localhost' if OSCheck.get_os_family(
    ) == OSConst.WINSRV_FAMILY else '0.0.0.0'

    https_port = None
    # try to get https port form oozie-env content
    if OOZIE_ENV_CONTENT in configurations:
        for line in configurations[OOZIE_ENV_CONTENT].splitlines():
            result = re.match(OOZIE_ENV_HTTPS_RE, line)

            if result is not None:
                https_port = result.group(1)
    # or from oozie-site.xml
    if https_port is None and OOZIE_HTTPS_PORT in configurations:
        https_port = configurations[OOZIE_HTTPS_PORT]

    oozie_url = configurations[OOZIE_URL_KEY]

    # construct proper url for https
    if https_port is not None:
        parsed_url = urlparse(oozie_url)
        oozie_url = oozie_url.replace(parsed_url.scheme, "https")
        if parsed_url.port is None:
            oozie_url.replace(parsed_url.hostname,
                              ":".join([parsed_url.hostname,
                                        str(https_port)]))
        else:
            oozie_url = oozie_url.replace(str(parsed_url.port),
                                          str(https_port))

    # https will not work with localhost address, we need put fqdn
    if https_port is None:
        oozie_url = oozie_url.replace(
            urlparse(oozie_url).hostname, localhost_address)

    try:
        command, env, oozie_user = get_check_command(oozie_url, host_name,
                                                     configurations)
        # execute the command
        Execute(
            command,
            environment=env,
            user=oozie_user,
        )

        return (RESULT_CODE_OK,
                ["Successful connection to {0}".format(oozie_url)])
    except KerberosPropertiesNotFound, ex:
        return (RESULT_CODE_UNKNOWN, [str(ex)])