Example #1
0
 def remove_package(self, name):
   if self._check_existence(name):
     cmd = REMOVE_CMD % (name)
     Logger.info("Removing package %s ('%s')" % (name, cmd))
     shell.checked_call(cmd)
   else:
     Logger.info("Skipping removing non-existent package %s" % (name))
 def install(self, env):
     Logger.info("install mysql cluster management")
     import params
     env.set_params(params)
     # remove_mysql_cmd = "yum remove mysql -y"
     # Execute(remove_mysql_cmd,
     #         user='******'
     #         )
     remove_mysql_lib_cmd = "rpm -e --nodeps mysql-libs-5.1.71-1.el6.x86_64 >/dev/null 2>&1"
     Execute(remove_mysql_lib_cmd,
             user='******'
             )
     install_server_cmd = "yum -y install MySQL-Cluster-server-gpl-7.4.11-1.el6.x86_64"
     Execute(install_server_cmd,
             user='******'
             )
     # create mgm config dir
     Directory(params.mgm_config_path,
               owner='root',
               group='root',
               recursive=True
               )
     # create cluster config
     Logger.info('create config.ini in /var/lib/mysql-cluster')
     File(format("{mgm_config_path}/config.ini"),
          owner = 'root',
          group = 'root',
          mode = 0644,
          content=InlineTemplate(params.config_ini_template)
          )
     Execute('ndb_mgmd -f /var/lib/mysql-cluster/config.ini --initial  >/dev/null 2>&1', logoutput = True)
     # create pid file
     pid_cmd = "pgrep -o -f ^ndb_mgmd.* > {0}".format(params.mgm_pid_file)
     Execute(pid_cmd,
             logoutput=True)
Example #3
0
  def action_create(self):
    path = self.resource.path

    if os.path.isdir(path):
      raise Fail("Applying %s failed, directory with name %s exists" % (self.resource, path))

    dirname = os.path.dirname(path)
    if not os.path.isdir(dirname):
      raise Fail("Applying %s failed, parent directory %s doesn't exist" % (self.resource, dirname))

    write = False
    content = self._get_content()
    if not os.path.exists(path):
      write = True
      reason = "it doesn't exist"
    elif self.resource.replace:
      if content is not None:
        with open(path, "rb") as fp:
          old_content = fp.read()
        if content != old_content:
          write = True
          reason = "contents don't match"
          if self.resource.backup:
            self.resource.env.backup_file(path)

    if write:
      Logger.info("Writing %s because %s" % (self.resource, reason))
      with open(path, "wb") as fp:
        if content:
          fp.write(content)

    if self.resource.owner and self.resource.mode:
      _set_file_acl(self.resource.path, self.resource.owner, self.resource.mode)
Example #4
0
 def check_service_status(self, service, keyword):
   cmd = "service {0} status | grep -E '{1}'".format(service, keyword)
   Logger.info("run service check on {0} : ".format(service))
   (status, output) = commands.getstatusoutput(cmd)
   if (output == ""):
     Logger.error("service {0} not running".format(service))
     raise ComponentIsNotRunning() 
Example #5
0
 def install_package(self, name):
   if not self._check_existence(name):
     cmd = INSTALL_CMD % (name)
     Logger.info("Installing package %s ('%s')" % (name, cmd))
     shell.checked_call(cmd)
   else:
     Logger.info("Skipping installing existent package %s" % (name))
Example #6
0
def service_check(cmd, user, label):
    """
    Executes a service check command that adheres to LSB-compliant
    return codes.  The return codes are interpreted as defined
    by the LSB.

    See http://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/iniscrptact.html
    for more information.

    :param cmd: The service check command to execute.
    :param label: The name of the service.
    """
    Logger.info("Performing service check; cmd={0}, user={1}, label={2}".format(cmd, user, label))
    rc, out, err = get_user_call_output(cmd, user, is_checked_call=False)

    if len(err) > 0:
      Logger.error(err)

    if rc in [1, 2, 3]:
      # if return code in [1, 2, 3], then 'program is not running' or 'program is dead'
      Logger.info("{0} is not running".format(label))
      raise ComponentIsNotRunning()

    elif rc == 0:
      # if return code = 0, then 'program is running or service is OK'
      Logger.info("{0} is running".format(label))

    else:
      # else service state is unknown
      err_msg = "{0} service check failed; cmd '{1}' returned {2}".format(label, cmd, rc)
      Logger.error(err_msg)
      raise ExecutionFailed(err_msg, rc, out, err)
Example #7
0
 def check_process(self, keyword):
   Logger.info("check process with: {0}".format(keyword))
   cmd = "ps aux | grep -E '" + keyword + "' | grep -v grep | cat"
   result = self.exe(cmd)
   if (result == ""):
     Logger.error("process {0} not exist".format(keyword))
     raise ComponentIsNotRunning()
def setup_solr_cloud():
    import params

    code, output = call(
            format(
                    '{zk_client_prefix} -cmd get {solr_cloud_zk_directory}{clusterstate_json}'
            ),
            env={'JAVA_HOME': params.java64_home},
            timeout=60
    )

    if not ("NoNodeException" in output):
        Logger.info(
                format(
                        "ZK node {solr_cloud_zk_directory}{clusterstate_json} already exists, skipping ..."
                )
        )
        return

    Execute(
            format(
                    '{zk_client_prefix} -cmd makepath {solr_cloud_zk_directory}'
            ),
            environment={'JAVA_HOME': params.java64_home},
            ignore_failures=True,
            user=params.solr_config_user
    )
def check_process_status(pid_file):
  """
  Function checks whether process is running.
  Process is considered running, if pid file exists, and process with
  a pid, mentioned in pid file is running
  If process is not running, will throw ComponentIsNotRunning exception

  @param pid_file: path to service pid file
  """
  if not pid_file or not os.path.isfile(pid_file):
    raise ComponentIsNotRunning()
  
  try:
    pid = int(sudo.read_file(pid_file))
  except:
    Logger.debug("Pid file {0} does not exist".format(pid_file))
    raise ComponentIsNotRunning()

  code, out = shell.call(["ps","-p", str(pid)])
  
  if code:
    Logger.debug("Process with pid {0} is not running. Stale pid file"
              " at {1}".format(pid, pid_file))
    raise ComponentIsNotRunning()
  pass
Example #10
0
 def check_process(keyword):
   Logger.info("check process by: {0}".format(keyword))
   cmd = "ps aux | grep -E '" + keyword + "' | grep -v grep | cat"
   result = Toolkit.exe(cmd)
   if (result == ""):
     Logger.error("process checked by {0} not exist".format(keyword))
     raise ComponentIsNotRunning()
Example #11
0
def _get_directory_mappings_during_upgrade():
  """
  Gets a dictionary of directory to archive name that represents the
  directories that need to be backed up and their output tarball archive targets
  :return:  the dictionary of directory to tarball mappings
  """
  import params

  # Must be performing an Upgrade
  if params.upgrade_direction is None or params.upgrade_direction != Direction.UPGRADE or \
          params.upgrade_from_version is None or params.upgrade_from_version == "":
    Logger.error("Function _get_directory_mappings_during_upgrade() can only be called during a Stack Upgrade in direction UPGRADE.")
    return {}

  # By default, use this for all stacks.
  knox_data_dir = '/var/lib/knox/data'

  if params.stack_name and params.stack_name.upper() == "HDP" and \
          compare_versions(format_hdp_stack_version(params.upgrade_from_version), "2.3.0.0") > 0:
    # Use the version that is being upgraded from.
    knox_data_dir = format('/usr/hdp/{upgrade_from_version}/knox/data')

  # the trailing "/" is important here so as to not include the "conf" folder itself
  directories = {knox_data_dir: BACKUP_DATA_ARCHIVE, params.knox_conf_dir + "/": BACKUP_CONF_ARCHIVE}

  Logger.info(format("Knox directories to backup:\n{directories}"))
  return directories
Example #12
0
  def check_nifi_process_status(self, pid_file):
    """
    Function checks whether process is running.
    Process is considered running, if pid file exists, and process with
    a pid, mentioned in pid file is running
    If process is not running, will throw ComponentIsNotRunning exception

    @param pid_file: path to service pid file
    """
    if not pid_file or not os.path.isfile(pid_file):
        raise ComponentIsNotRunning()

    try:
        lines = [line.rstrip('\n') for line in open(pid_file)]
        pid = int(lines[2].split('=')[1]);
    except:
        Logger.warn("Pid file {0} does not exist".format(pid_file))
        raise ComponentIsNotRunning()

    code, out = shell.call(["ps","-p", str(pid)])

    if code:
        Logger.debug("Process with pid {0} is not running. Stale pid file"
                     " at {1}".format(pid, pid_file))
        raise ComponentIsNotRunning()
    pass
    def configure(self, env, upgrade_type=None, config_dir=None):
        from params import params
        env.set_params(params)

        Logger.info("Running profiler configure")
        File(format("{metron_config_path}/profiler.properties"),
             content=Template("profiler.properties.j2"),
             owner=params.metron_user,
             group=params.metron_group
             )

        if not metron_service.is_zk_configured(params):
            metron_service.init_zk_config(params)
            metron_service.set_zk_configured(params)
        metron_service.refresh_configs(params)

        commands = ProfilerCommands(params)
        if not commands.is_hbase_configured():
            commands.create_hbase_tables()
        if params.security_enabled and not commands.is_hbase_acl_configured():
            commands.set_hbase_acls()
        if params.security_enabled and not commands.is_acl_configured():
            commands.init_kafka_acls()
            commands.set_acl_configured()

        Logger.info("Calling security setup")
        storm_security_setup(params)
        if not commands.is_configured():
            commands.set_configured()
 def start(self, env):
     Logger.info("start sql node")
     import params
     env.set_params(params)
     Execute("mysqld_safe >/dev/null 2>&1 &",
             user='******'
             )
 def stop(self, env):
     Logger.info("stop sql node")
     import params
     env.set_params(params)
     Execute(params.stop_sql_node_cmd,
             user='******'
             )
def init_config():
    Logger.info('Loading config into ZooKeeper')
    Execute(ambari_format(
        "{metron_home}/bin/zk_load_configs.sh --mode PUSH -i {metron_zookeeper_config_path} -z {zookeeper_quorum}"
    ),
            path=ambari_format("{java_home}/bin"))
                                                       user=params.metron_user)

    try:
        stormjson = json.loads(stdout)
    except ValueError, e:
        Logger.info('Stdout: ' + str(stdout))
        Logger.info('Stderr: ' + str(stderr))
        Logger.exception(str(e))
        return {}

    topologiesDict = {}

    for topology in stormjson['topologies']:
        topologiesDict[topology['name']] = topology['status']

    Logger.info("Topologies: " + str(topologiesDict))
    return topologiesDict


def load_global_config(params):
    Logger.info('Create Metron Local Config Directory')
    Logger.info("Configure Metron global.json")

    directories = [params.metron_zookeeper_config_path]
    Directory(directories,
              mode=0755,
              owner=params.metron_user,
              group=params.metron_group)

    File("{0}/global.json".format(params.metron_zookeeper_config_path),
         owner=params.metron_user,
Example #18
0
 def query_data(self):
     Logger.info("Querying data from table {0}".format(
         constants.smoke_check_table_name))
     sql_cmd = "select * from {0}".format(constants.smoke_check_table_name)
     exec_psql_cmd(sql_cmd, self.active_master_host)
Example #19
0
 def insert_data(self):
     Logger.info("Inserting data to table {0}".format(
         constants.smoke_check_table_name))
     sql_cmd = "insert into {0} select * from generate_series(1,10)".format(
         constants.smoke_check_table_name)
     exec_psql_cmd(sql_cmd, self.active_master_host)
Example #20
0
 def create_table(self):
     Logger.info("Creating table {0}".format(
         constants.smoke_check_table_name))
     sql_cmd = "create table {0} (col1 int) distributed randomly".format(
         constants.smoke_check_table_name)
     exec_psql_cmd(sql_cmd, self.active_master_host)
Example #21
0
 def drop_table(self):
     Logger.info("Dropping {0} table if exists".format(
         constants.smoke_check_table_name))
     sql_cmd = "drop table if exists {0}".format(
         constants.smoke_check_table_name)
     exec_psql_cmd(sql_cmd, self.active_master_host)
Example #22
0
    def compute_actual_version(self):
        """
    After packages are installed, determine what the new actual version is.
    """

        # If the repo contains a build number, optimistically assume it to be the actual_version. It will get changed
        # to correct value if it is not
        self.actual_version = None
        self.repo_version_with_build_number = None
        if self.repository_version:
            m = re.search("[\d\.]+-\d+", self.repository_version)
            if m:
                # Contains a build number
                self.repo_version_with_build_number = self.repository_version
                self.structured_output[
                    'actual_version'] = self.repo_version_with_build_number  # This is the best value known so far.
                self.put_structured_out(self.structured_output)

        Logger.info(
            "Attempting to determine actual version with build number.")
        Logger.info("Old versions: {0}".format(self.old_versions))

        new_versions = get_stack_versions(self.stack_root_folder)
        Logger.info("New versions: {0}".format(new_versions))

        deltas = set(new_versions) - set(self.old_versions)
        Logger.info("Deltas: {0}".format(deltas))

        # Get version without build number
        normalized_repo_version = self.repository_version.split('-')[0]

        if 1 == len(deltas):
            self.actual_version = next(iter(deltas)).strip()
            self.structured_output['actual_version'] = self.actual_version
            self.put_structured_out(self.structured_output)
            write_actual_version_to_history_file(normalized_repo_version,
                                                 self.actual_version)
            Logger.info(
                "Found actual version {0} by checking the delta between versions before and after installing packages"
                .format(self.actual_version))
        else:
            # If the first install attempt does a partial install and is unable to report this to the server,
            # then a subsequent attempt will report an empty delta. For this reason, we search for a best fit version for the repo version
            Logger.info(
                "Cannot determine actual version installed by checking the delta between versions "
                "before and after installing package")
            Logger.info(
                "Will try to find for the actual version by searching for best possible match in the list of versions installed"
            )
            self.actual_version = self.find_best_fit_version(
                new_versions, self.repository_version)
            if self.actual_version is not None:
                self.actual_version = self.actual_version.strip()
                self.structured_output['actual_version'] = self.actual_version
                self.put_structured_out(self.structured_output)
                Logger.info(
                    "Found actual version {0} by searching for best possible match"
                    .format(self.actual_version))
            else:
                msg = "Could not determine actual version installed. Try reinstalling packages again."
                raise Fail(msg)
Example #23
0
    def service_check(self, env):
        import params
        env.set_params(params)

        if params.security_enabled:
            kinit_cmd = format(
                "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; "
            )
        else:
            kinit_cmd = ""

        # Check HiveServer
        Logger.info("Running Hive Server checks")
        Logger.info("--------------------------\n")
        self.check_hive_server(env, 'Hive Server', kinit_cmd,
                               params.hive_server_hosts,
                               int(format("{hive_server_port}")))

        if params.has_hive_interactive and params.hive_interactive_enabled:
            Logger.info("Running Hive Server2 checks")
            Logger.info("--------------------------\n")

            self.check_hive_server(
                env, 'Hive Server2', kinit_cmd, params.hive_interactive_hosts,
                int(format("{hive_server_interactive_port}")))

            Logger.info("Running LLAP checks")
            Logger.info("-------------------\n")
            self.check_llap(env, kinit_cmd, params.hive_interactive_hosts,
                            int(format("{hive_server_interactive_port}")),
                            params.hive_llap_principal,
                            params.hive_server2_authentication,
                            params.hive_transport_mode,
                            params.hive_http_endpoint)

        Logger.info("Running HCAT checks")
        Logger.info("-------------------\n")
        hcat_service_check()

        Logger.info("Running WEBHCAT checks")
        Logger.info("---------------------\n")
        webhcat_service_check()
Example #24
0
    def execute(self):
        """
    Sets up logging;
    Parses command parameters and executes method relevant to command type
    """
        parser = OptionParser()
        parser.add_option(
            "-o",
            "--out-files-logging",
            dest="log_out_files",
            action="store_true",
            help=
            "use this option to enable outputting *.out files of the service pre-start"
        )
        (self.options, args) = parser.parse_args()

        self.log_out_files = self.options.log_out_files

        # parse arguments
        if len(args) < 6:
            print "Script expects at least 6 arguments"
            print USAGE.format(os.path.basename(
                sys.argv[0]))  # print to stdout
            sys.exit(1)

        self.command_name = str.lower(sys.argv[1])
        self.command_data_file = sys.argv[2]
        self.basedir = sys.argv[3]
        self.stroutfile = sys.argv[4]
        self.load_structured_out()
        self.logging_level = sys.argv[5]
        Script.tmp_dir = sys.argv[6]
        # optional script arguments for forcing https protocol and ca_certs file
        if len(sys.argv) >= 8:
            Script.force_https_protocol = sys.argv[7]
        if len(sys.argv) >= 9:
            Script.ca_cert_file_path = sys.argv[8]

        logging_level_str = logging._levelNames[self.logging_level]
        Logger.initialize_logger(__name__, logging_level=logging_level_str)

        # on windows we need to reload some of env variables manually because there is no default paths for configs(like
        # /etc/something/conf on linux. When this env vars created by one of the Script execution, they can not be updated
        # in agent, so other Script executions will not be able to access to new env variables
        if OSCheck.is_windows_family():
            reload_windows_env()

        # !!! status commands re-use structured output files; if the status command doesn't update the
        # the file (because it doesn't have to) then we must ensure that the file is reset to prevent
        # old, stale structured output from a prior status command from being used
        if self.command_name == "status":
            Script.structuredOut = {}
            self.put_structured_out({})

        # make sure that script has forced https protocol and ca_certs file passed from agent
        ensure_ssl_using_protocol(Script.get_force_https_protocol_name(),
                                  Script.get_ca_cert_file_path())

        try:
            with open(self.command_data_file) as f:
                pass
                Script.config = ConfigDictionary(json.load(f))
                # load passwords here(used on windows to impersonate different users)
                Script.passwords = {}
                for k, v in _PASSWORD_MAP.iteritems():
                    if get_path_from_configuration(
                            k, Script.config) and get_path_from_configuration(
                                v, Script.config):
                        Script.passwords[get_path_from_configuration(
                            k, Script.config)] = get_path_from_configuration(
                                v, Script.config)

        except IOError:
            Logger.logger.exception(
                "Can not read json file with command parameters: ")
            sys.exit(1)

        # Run class method depending on a command type
        try:
            method = self.choose_method_to_execute(self.command_name)
            with Environment(self.basedir, tmp_dir=Script.tmp_dir) as env:
                env.config.download_path = Script.tmp_dir

                if not self.is_hook():
                    self.execute_prefix_function(self.command_name, 'pre', env)

                method(env)

                if not self.is_hook():
                    self.execute_prefix_function(self.command_name, 'post',
                                                 env)

        except Fail as ex:
            ex.pre_raise()
            raise
        finally:
            if self.should_expose_component_version(self.command_name):
                self.save_component_version_to_structured_out()
Example #25
0
 def __init__(self):
     super(HiveServiceCheckDefault, self).__init__()
     Logger.initialize_logger()
Example #26
0
    def actionexecute(self, env):
        num_errors = 0

        # Parse parameters
        config = Script.get_config()

        try:
            command_repository = CommandRepository(config['repositoryFile'])
        except KeyError:
            raise Fail(
                "The command repository indicated by 'repositoryFile' was not found"
            )

        repo_rhel_suse = config['configurations']['cluster-env'][
            'repo_suse_rhel_template']
        repo_ubuntu = config['configurations']['cluster-env'][
            'repo_ubuntu_template']
        template = repo_rhel_suse if OSCheck.is_redhat_family(
        ) or OSCheck.is_suse_family() else repo_ubuntu

        # Handle a SIGTERM and SIGINT gracefully
        signal.signal(signal.SIGTERM, self.abort_handler)
        signal.signal(signal.SIGINT, self.abort_handler)

        self.repository_version = command_repository.version_string

        # Select dict that contains parameters
        try:
            package_list = json.loads(config['roleParams']['package_list'])
            stack_id = config['roleParams']['stack_id']
        except KeyError:
            pass

        self.stack_name = Script.get_stack_name()
        if self.stack_name is None:
            raise Fail("Cannot determine the stack name")

        self.stack_root_folder = Script.get_stack_root()
        if self.stack_root_folder is None:
            raise Fail("Cannot determine the stack's root directory")

        if self.repository_version is None:
            raise Fail("Cannot determine the repository version to install")

        self.repository_version = self.repository_version.strip()

        try:
            if not command_repository.items:
                Logger.warning(
                    "Repository list is empty. Ambari may not be managing the repositories for {0}."
                    .format(self.repository_version))
            else:
                Logger.info(
                    "Will install packages for repository version {0}".format(
                        self.repository_version))
                new_repo_files = create_repo_files(template,
                                                   command_repository)
                self.repo_files.update(new_repo_files)
        except Exception as err:
            Logger.logger.exception(
                "Cannot install repository files. Error: {0}".format(str(err)))
            num_errors += 1

        # Build structured output with initial values
        self.structured_output = {
            'package_installation_result': 'FAIL',
            'repository_version_id': command_repository.version_id
        }

        self.put_structured_out(self.structured_output)

        try:
            # check package manager non-completed transactions
            if self.pkg_provider.check_uncompleted_transactions():
                self.pkg_provider.print_uncompleted_transaction_hint()
                num_errors += 1
        except Exception as e:  # we need to ignore any exception
            Logger.warning(
                "Failed to check for uncompleted package manager transactions: "
                + str(e))

        if num_errors > 0:
            raise Fail("Failed to distribute repositories/install packages")

        # Initial list of versions, used to compute the new version installed
        self.old_versions = get_stack_versions(self.stack_root_folder)

        try:
            is_package_install_successful = False
            ret_code = self.install_packages(package_list)
            if ret_code == 0:
                self.structured_output[
                    'package_installation_result'] = 'SUCCESS'
                self.put_structured_out(self.structured_output)
                is_package_install_successful = True
            else:
                num_errors += 1
        except Exception as err:
            num_errors += 1
            Logger.logger.exception(
                "Could not install packages. Error: {0}".format(str(err)))

        # Provide correct exit code
        if num_errors > 0:
            raise Fail("Failed to distribute repositories/install packages")

        # if installing a version of HDP that needs some symlink love, then create them
        if is_package_install_successful and 'actual_version' in self.structured_output:
            self._relink_configurations_with_conf_select(
                stack_id, self.structured_output['actual_version'])
Example #27
0
 def abort_handler(self, signum, frame):
     Logger.error(
         "Caught signal {0}, will handle it gracefully. Compute the actual version if possible before exiting."
         .format(signum))
     self.check_partial_install()
def get_running_topologies(params):
    Logger.info('Getting Running Storm Topologies from Storm REST Server')

    Logger.info('Security enabled? ' + str(params.security_enabled))

    # Want to sudo to the metron user and kinit as them so we aren't polluting root with Metron's Kerberos tickets.
    # This is becuase we need to run a command with a return as the metron user. Sigh
    negotiate = '--negotiate -u : ' if params.security_enabled else ''
    cmd = ambari_format('curl --max-time 3 ' + negotiate +
                        '{storm_rest_addr}/api/v1/topology/summary')

    if params.security_enabled:
        kinit(params.kinit_path_local,
              params.metron_keytab_path,
              params.metron_principal_name,
              execute_user=params.metron_user)

    Logger.info('Running cmd: ' + cmd)
    return_code, stdout, sdterr = get_user_call_output(cmd,
                                                       user=params.metron_user)

    try:
        stormjson = json.loads(stdout)
    except ValueError, e:
        Logger.info('Stdout: ' + str(stdout))
        Logger.info('Stderr: ' + str(stderr))
        Logger.exception(str(e))
        return {}
    def service_check(self, env):
        """
        Performs a service check for the Profiler.
        :param env: Environment
        """
        Logger.info('Checking Kafka topics for Profiler')
        metron_service.check_kafka_topics(self.__params,
                                          [self.__params.profiler_input_topic])

        Logger.info("Checking HBase table for profiler")
        metron_service.check_hbase_table(self.__params,
                                         self.__params.profiler_hbase_table)
        metron_service.check_hbase_column_family(
            self.__params, self.__params.profiler_hbase_table,
            self.__params.profiler_hbase_cf)

        if self.__params.security_enabled:

            Logger.info('Checking Kafka ACLs for Profiler')
            metron_service.check_kafka_acls(self.__params, self.__get_topics())
            metron_service.check_kafka_acl_groups(
                self.__params, self.__get_kafka_acl_groups())

            Logger.info('Checking Kafka ACLs for Profiler')
            metron_service.check_hbase_acls(self.__params,
                                            self.__params.profiler_hbase_table)

        Logger.info("Checking for Profiler topology")
        if not self.is_topology_active(env):
            raise Fail("Profiler topology not running")

        Logger.info("Profiler service check completed successfully")
Example #30
0
 def restart(self, env):
     import params
     env.set_params(params)
     Logger.info('Restart Elasticsearch slave node')
     self.configure(env)
     Execute("service elasticsearch restart")
Example #31
0
    def service_check(self, env):
        import params
        env.set_params(params)
        streamline_api = format(
            "http://{params.hostname}:{params.streamline_port}/api/v1/catalog/streams/componentbundles"
        )
        Logger.info(streamline_api)
        max_retries = 3
        success = False

        if params.security_enabled:
            kinit_cmd = format(
                "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};"
            )
            return_code, out = shell.checked_call(
                kinit_cmd,
                path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
                user=params.smokeuser,
            )

        for num in range(0, max_retries):
            try:
                Logger.info(format("Making http requests to {streamline_api}"))

                if params.security_enabled:
                    get_app_info_cmd = "curl --negotiate -u : -ks --location-trusted --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + streamline_api
                    return_code, stdout, _ = get_user_call_output(
                        get_app_info_cmd,
                        user=params.smokeuser,
                        path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
                    )
                    try:
                        json_response = json.loads(stdout)
                        success = True
                        Logger.info(
                            format(
                                "Successfully made a API request to SAM. {stdout}"
                            ))
                        break
                    except Exception as e:
                        Logger.error(
                            format(
                                "Response from SAM API was not a valid JSON. Response: {stdout}"
                            ))
                else:
                    response = urllib2.urlopen(streamline_api)
                    api_response = response.read()
                    response_code = response.getcode()
                    Logger.info(format("SAM response http status {response}"))
                    if response.getcode() != 200:
                        Logger.error(
                            format(
                                "Failed to fetch response for {streamline_api}"
                            ))
                        show_logs(params.streamline_log_dir,
                                  params.streamline_user)
                        raise
                    else:
                        success = True
                        Logger.info(
                            format(
                                "Successfully made a API request to SAM. {api_response}"
                            ))
                        break
            except (urllib2.URLError, ExecutionFailed) as e:
                Logger.error(
                    format(
                        "Failed to make API request to SAM server at {streamline_api},retrying.. {num} out of {max_retries}"
                    ))
                time.sleep(num * 10)  #exponential back-off
                continue

        if success != True:
            Logger.error(
                format(
                    "Failed to make API request to  SAM server at {streamline_api} after {max_retries}"
                ))
            raise
Example #32
0
 def configure(self, env, upgrade_type=None, config_dir=None):
     import params
     env.set_params(params)
     Logger.info('Configure Elasticsearch slave node')
     configure_slave()
 def init_kafka_acls(self):
     Logger.info('Creating Kafka ACls for profiler')
     metron_service.init_kafka_acls(self.__params, self.__get_topics())
     metron_service.init_kafka_acl_groups(self.__params,
                                          self.__get_kafka_acl_groups())
  def actionexecute(self, env):
    resolve_ambari_config()

    # Parse parameters from command json file.
    config = Script.get_config()

    host_name = socket.gethostname()
    version = default('/roleParams/version', None)

    # These 2 variables are optional
    service_package_folder = default('/commandParams/service_package_folder', None)
    if service_package_folder is None:
      service_package_folder = default('/serviceLevelParams/service_package_folder', None)
    hooks_folder = default('/commandParams/hooks_folder', None)

    tasks = json.loads(config['roleParams']['tasks'])
    if tasks:
      for t in tasks:
        task = ExecuteTask(t)
        Logger.info(str(task))

        # If a (script, function) exists, it overwrites the command.
        if task.script and task.function:
          file_cache = FileCache(agent_config)

          server_url_prefix = default('/ambariLevelParams/jdk_location', "")

          if service_package_folder and hooks_folder:
            command_paths = {
              "commandParams": {
                "service_package_folder": service_package_folder,
              },
              "clusterLevelParams": {
                   "hooks_folder": hooks_folder
              }
            } 

            base_dir = file_cache.get_service_base_dir(command_paths, server_url_prefix)
          else:
            base_dir = file_cache.get_custom_actions_base_dir(server_url_prefix)

          script_path = os.path.join(base_dir, task.script)
          if not os.path.exists(script_path):
            message = "Script %s does not exist" % str(script_path)
            raise Fail(message)

          # Notice that the script_path is now the fully qualified path, and the
          # same command-#.json file is used.
          # Also, the python wrapper is used, since it sets up the correct environment variables
          command_params = ["/usr/bin/ambari-python-wrap",
                            script_path,
                            task.function,
                            self.command_data_file,
                            self.basedir,
                            self.stroutfile,
                            self.logging_level,
                            Script.get_tmp_dir()]

          task.command = "source /var/lib/ambari-agent/ambari-env.sh ; " + " ".join(command_params)
          # Replace redundant whitespace to make the unit tests easier to validate
          task.command = re.sub("\s+", " ", task.command).strip()

        if task.command:
          task.command = replace_variables(task.command, host_name, version)
          shell.checked_call(task.command, logoutput=True, quiet=True)
Example #35
0
 def stop(self, env, upgrade_type=None):
     import params
     env.set_params(params)
     Logger.info('Stop Elasticsearch slave node')
     Execute("service elasticsearch stop")
Example #36
0
def enable_kms_plugin():

  import params

  if params.has_ranger_admin:

    ranger_flag = False

    if params.stack_supports_ranger_kerberos and params.security_enabled:
      if not is_empty(params.rangerkms_principal) and params.rangerkms_principal != '':
        ranger_flag = check_ranger_service_support_kerberos(params.kms_user, params.rangerkms_keytab, params.rangerkms_principal)
      else:
        ranger_flag = check_ranger_service_support_kerberos(params.kms_user, params.spengo_keytab, params.spnego_principal)
    else:
      ranger_flag = check_ranger_service()

    if not ranger_flag:
      Logger.error('Error in Get/Create service for Ranger Kms.')

    current_datetime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

    File(format('{kms_conf_dir}/ranger-security.xml'),
      owner = params.kms_user,
      group = params.kms_group,
      mode = 0644,
      content = format('<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>')
    )

    Directory([os.path.join('/etc', 'ranger', params.repo_name), os.path.join('/etc', 'ranger', params.repo_name, 'policycache')],
      owner = params.kms_user,
      group = params.kms_group,
      mode=0775,
      create_parents = True
    )
    
    File(os.path.join('/etc', 'ranger', params.repo_name, 'policycache',format('kms_{repo_name}.json')),
      owner = params.kms_user,
      group = params.kms_group,
      mode = 0644        
    )

    XmlConfig("ranger-kms-audit.xml",
      conf_dir=params.kms_conf_dir,
      configurations=params.config['configurations']['ranger-kms-audit'],
      configuration_attributes=params.config['configuration_attributes']['ranger-kms-audit'],
      owner=params.kms_user,
      group=params.kms_group,
      mode=0744)

    XmlConfig("ranger-kms-security.xml",
      conf_dir=params.kms_conf_dir,
      configurations=params.config['configurations']['ranger-kms-security'],
      configuration_attributes=params.config['configuration_attributes']['ranger-kms-security'],
      owner=params.kms_user,
      group=params.kms_group,
      mode=0744)

    XmlConfig("ranger-policymgr-ssl.xml",
      conf_dir=params.kms_conf_dir,
      configurations=params.config['configurations']['ranger-kms-policymgr-ssl'],
      configuration_attributes=params.config['configuration_attributes']['ranger-kms-policymgr-ssl'],
      owner=params.kms_user,
      group=params.kms_group,
      mode=0744)

    if params.xa_audit_db_is_enabled:
      cred_setup = params.cred_setup_prefix + ('-f', params.credential_file, '-k', 'auditDBCred', '-v', PasswordString(params.xa_audit_db_password), '-c', '1')
      Execute(cred_setup, environment={'JAVA_HOME': params.java_home}, logoutput=True, sudo=True)

    cred_setup = params.cred_setup_prefix + ('-f', params.credential_file, '-k', 'sslKeyStore', '-v', PasswordString(params.ssl_keystore_password), '-c', '1')
    Execute(cred_setup, environment={'JAVA_HOME': params.java_home}, logoutput=True, sudo=True)

    cred_setup = params.cred_setup_prefix + ('-f', params.credential_file, '-k', 'sslTrustStore', '-v', PasswordString(params.ssl_truststore_password), '-c', '1')
    Execute(cred_setup, environment={'JAVA_HOME': params.java_home}, logoutput=True, sudo=True)

    File(params.credential_file,
      owner = params.kms_user,
      group = params.kms_group,
      mode = 0640
      )
Example #37
0
 def install(self, env):
     import params
     env.set_params(params)
     Logger.info('Install Elasticsearch slave node')
     self.install_packages(env)
Example #38
0
def kms(upgrade_type=None):
  import params

  if params.has_ranger_admin:

    Directory(params.kms_conf_dir,
      owner = params.kms_user,
      group = params.kms_group,
      create_parents = True
    )

    copy_jdbc_connector()

    File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
      content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
      mode = 0644,
    )

    cp = format("{check_db_connection_jar}")
    if params.db_flavor.lower() == 'sqla':
      cp = cp + os.pathsep + format("{kms_home}/ews/webapp/lib/sajdbc4.jar")
    else:
      path_to_jdbc = format("{kms_home}/ews/webapp/lib/{jdbc_jar_name}")
      if not os.path.isfile(path_to_jdbc):
        path_to_jdbc = format("{kms_home}/ews/webapp/lib/") + \
                       params.default_connectors_map[params.db_flavor.lower()] if params.db_flavor.lower() in params.default_connectors_map else None
        if not os.path.isfile(path_to_jdbc):
          path_to_jdbc = format("{kms_home}/ews/webapp/lib/") + "*"
          error_message = "Error! Sorry, but we can't find jdbc driver with default name " + params.default_connectors_map[params.db_flavor] + \
                " in ranger kms lib dir. So, db connection check can fail. Please run 'ambari-server setup --jdbc-db={db_name} --jdbc-driver={path_to_jdbc} on server host.'"
          Logger.error(error_message)

      cp = cp + os.pathsep + path_to_jdbc

    db_connection_check_command = format(
      "{java_home}/bin/java -cp {cp} org.apache.ambari.server.DBConnectionVerification '{ranger_kms_jdbc_connection_url}' {db_user} {db_password!p} {ranger_kms_jdbc_driver}")
    
    env_dict = {}
    if params.db_flavor.lower() == 'sqla':
      env_dict = {'LD_LIBRARY_PATH':params.ld_library_path}

    Execute(db_connection_check_command, path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', tries=5, try_sleep=10, environment=env_dict)

    if params.xa_audit_db_is_enabled and params.driver_source is not None and not params.driver_source.endswith("/None"):
      if params.xa_previous_jdbc_jar and os.path.isfile(params.xa_previous_jdbc_jar):
        File(params.xa_previous_jdbc_jar, action='delete')

      File(params.downloaded_connector_path,
        content = DownloadSource(params.driver_source),
        mode = 0644
      )

      Execute(('cp', '--remove-destination', params.downloaded_connector_path, params.driver_target),
          path=["/bin", "/usr/bin/"],
          sudo=True)

      File(params.driver_target, mode=0644)

    Directory(os.path.join(params.kms_home, 'ews', 'webapp', 'WEB-INF', 'classes', 'lib'),
        mode=0755,
        owner=params.kms_user,
        group=params.kms_group        
      )

    Execute(('cp',format('{kms_home}/ranger-kms-initd'),'/etc/init.d/ranger-kms'),
    not_if=format('ls /etc/init.d/ranger-kms'),
    only_if=format('ls {kms_home}/ranger-kms-initd'),
    sudo=True)

    File('/etc/init.d/ranger-kms',
      mode = 0755
    )

    Directory(format('{kms_home}/'),
              owner = params.kms_user,
              group = params.kms_group,
              recursive_ownership = True,
    )

    Directory(params.ranger_kms_pid_dir,
      mode=0755,
      owner = params.kms_user,
      group = params.user_group,
      cd_access = "a",
      create_parents=True
    )

    if params.stack_supports_pid:
      File(format('{kms_conf_dir}/ranger-kms-env-piddir.sh'),
        content = format("export RANGER_KMS_PID_DIR_PATH={ranger_kms_pid_dir}\nexport KMS_USER={kms_user}"),
        owner = params.kms_user,
        group = params.kms_group,
        mode=0755
      )

    Directory(params.kms_log_dir,
      owner = params.kms_user,
      group = params.kms_group,
      cd_access = 'a',
      create_parents=True,
      mode=0755
    )

    File(format('{kms_conf_dir}/ranger-kms-env-logdir.sh'),
      content = format("export RANGER_KMS_LOG_DIR={kms_log_dir}"),
      owner = params.kms_user,
      group = params.kms_group,
      mode=0755
    )

    Execute(('ln','-sf', format('{kms_home}/ranger-kms'),'/usr/bin/ranger-kms'),
      not_if=format('ls /usr/bin/ranger-kms'),
      only_if=format('ls {kms_home}/ranger-kms'),
      sudo=True)

    File('/usr/bin/ranger-kms', mode = 0755)

    Execute(('ln','-sf', format('{kms_home}/ranger-kms'),'/usr/bin/ranger-kms-services.sh'),
      not_if=format('ls /usr/bin/ranger-kms-services.sh'),
      only_if=format('ls {kms_home}/ranger-kms'),
      sudo=True)

    File('/usr/bin/ranger-kms-services.sh', mode = 0755)

    Execute(('ln','-sf', format('{kms_home}/ranger-kms-initd'),format('{kms_home}/ranger-kms-services.sh')),
      not_if=format('ls {kms_home}/ranger-kms-services.sh'),
      only_if=format('ls {kms_home}/ranger-kms-initd'),
      sudo=True)

    File(format('{kms_home}/ranger-kms-services.sh'), mode = 0755)

    Directory(params.kms_log_dir,
      owner = params.kms_user,
      group = params.kms_group,
      mode = 0775
    )

    do_keystore_setup(params.credential_provider_path, params.jdbc_alias, params.db_password)
    do_keystore_setup(params.credential_provider_path, params.masterkey_alias, params.kms_master_key_password)
    if params.stack_support_kms_hsm and params.enable_kms_hsm:
      do_keystore_setup(params.credential_provider_path, params.hms_partition_alias, unicode(params.hms_partition_passwd))

    XmlConfig("dbks-site.xml",
      conf_dir=params.kms_conf_dir,
      configurations=params.config['configurations']['dbks-site'],
      configuration_attributes=params.config['configuration_attributes']['dbks-site'],
      owner=params.kms_user,
      group=params.kms_group,
      mode=0644
    )

    XmlConfig("ranger-kms-site.xml",
      conf_dir=params.kms_conf_dir,
      configurations=params.config['configurations']['ranger-kms-site'],
      configuration_attributes=params.config['configuration_attributes']['ranger-kms-site'],
      owner=params.kms_user,
      group=params.kms_group,
      mode=0644
    )

    XmlConfig("kms-site.xml",
      conf_dir=params.kms_conf_dir,
      configurations=params.config['configurations']['kms-site'],
      configuration_attributes=params.config['configuration_attributes']['kms-site'],
      owner=params.kms_user,
      group=params.kms_group,
      mode=0644
    )

    File(os.path.join(params.kms_conf_dir, "kms-log4j.properties"),
      owner=params.kms_user,
      group=params.kms_group,
      content=params.kms_log4j,
      mode=0644
    )
    if params.security_enabled:
      # core-site.xml linking required by setup for HDFS encryption
      XmlConfig("core-site.xml",
        conf_dir=params.kms_conf_dir,
        configurations=params.config['configurations']['core-site'],
        configuration_attributes=params.config['configuration_attributes']['core-site'],
        owner=params.kms_user,
        group=params.kms_group,
        mode=0644
      )
Example #39
0
  def actionexecute(self, env):
    num_errors = 0

    # Parse parameters
    config = Script.get_config()

    repo_rhel_suse = config['configurations']['cluster-env']['repo_suse_rhel_template']
    repo_ubuntu = config['configurations']['cluster-env']['repo_ubuntu_template']
    template = repo_rhel_suse if OSCheck.is_redhat_family() or OSCheck.is_suse_family() else repo_ubuntu

    # Handle a SIGTERM and SIGINT gracefully
    signal.signal(signal.SIGTERM, self.abort_handler)
    signal.signal(signal.SIGINT, self.abort_handler)

    self.repository_version_id = None

    base_urls = []
    # Select dict that contains parameters
    try:
      if 'base_urls' in config['roleParams']:
        base_urls = json.loads(config['roleParams']['base_urls'])

      self.repository_version = config['roleParams']['repository_version']
      package_list = json.loads(config['roleParams']['package_list'])
      stack_id = config['roleParams']['stack_id']

      if 'repository_version_id' in config['roleParams']:
        self.repository_version_id = config['roleParams']['repository_version_id']
    except KeyError:
      pass

    # current stack information
    self.current_stack_version_formatted = None
    if 'stack_version' in config['hostLevelParams']:
      current_stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
      self.current_stack_version_formatted = format_stack_version(current_stack_version_unformatted)


    self.stack_name = Script.get_stack_name()
    if self.stack_name is None:
      raise Fail("Cannot determine the stack name")

    self.stack_root_folder = Script.get_stack_root()
    if self.stack_root_folder is None:
      raise Fail("Cannot determine the stack's root directory")

    if self.repository_version is None:
      raise Fail("Cannot determine the repository version to install")

    self.repository_version = self.repository_version.strip()

    # Install/update repositories
    self.current_repositories = []
    self.current_repo_files = set()

    # Enable base system repositories
    # We don't need that for RHEL family, because we leave all repos enabled
    # except disabled HDP* ones
    if OSCheck.is_suse_family():
      self.current_repositories.append('base')
    elif OSCheck.is_ubuntu_family():
      self.current_repo_files.add('base')

    Logger.info("Will install packages for repository version {0}".format(self.repository_version))

    if 0 == len(base_urls):
      Logger.warning("Repository list is empty. Ambari may not be managing the repositories for {0}.".format(self.repository_version))

    try:
      if 'repositoryFile' in config:
        create_repo_files(template, CommandRepository(config['repositoryFile']))
      else:
        append_to_file = False
        for url_info in base_urls:
          repo_name, repo_file = self.install_repository(url_info, append_to_file, template)
          self.current_repositories.append(repo_name)
          self.current_repo_files.add(repo_file)
          append_to_file = True

    except Exception, err:
      Logger.logger.exception("Cannot install repository files. Error: {0}".format(str(err)))
      num_errors += 1
 def local_repo():
     Logger.info("Setting up local repo")
     Execute("yum -y install createrepo")
     Execute("createrepo /localrepo")
     Execute("chmod -R o-w+r /localrepo")
Example #41
0
def copy_jdbc_connector(stack_version=None):
  import params

  if params.jdbc_jar_name is None and params.driver_curl_source.endswith("/None"):
    error_message = "Error! Sorry, but we can't find jdbc driver related to {0} database to download from {1}. \
    Please run 'ambari-server setup --jdbc-db={db_name} --jdbc-driver={path_to_jdbc} on server host.'".format(params.db_flavor, params.jdk_location)
    Logger.error(error_message)

  if params.driver_curl_source and not params.driver_curl_source.endswith("/None"):
    if params.previous_jdbc_jar and os.path.isfile(params.previous_jdbc_jar):
      File(params.previous_jdbc_jar, action='delete')

  kms_home = params.kms_home
  if stack_version is not None:
    kms_home = format("{stack_root}/{stack_version}/ranger-kms")

  driver_curl_target = format("{kms_home}/ews/webapp/lib/{jdbc_jar_name}")

  File(params.downloaded_custom_connector,
    content = DownloadSource(params.driver_curl_source),
    mode = 0644
  )

  Directory(os.path.join(kms_home, 'ews', 'lib'),
    mode=0755
  )

  if params.db_flavor.lower() == 'sqla':
    Execute(('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir), sudo = True)

    Execute(('cp', '--remove-destination', params.jar_path_in_archive, os.path.join(kms_home, 'ews', 'webapp', 'lib')),
      path=["/bin", "/usr/bin/"],
      sudo=True)

    Directory(params.jdbc_libs_dir,
      cd_access="a",
      create_parents=True)

    Execute(as_sudo(['yes', '|', 'cp', params.libs_path_in_archive, params.jdbc_libs_dir], auto_escape=False),
      path=["/bin", "/usr/bin/"])

    File(os.path.join(kms_home, 'ews', 'webapp', 'lib', 'sajdbc4.jar'), mode=0644)
  else:
    Execute(('cp', '--remove-destination', params.downloaded_custom_connector, os.path.join(kms_home, 'ews', 'webapp', 'lib')),
      path=["/bin", "/usr/bin/"],
      sudo=True)

    File(os.path.join(kms_home, 'ews', 'webapp', 'lib', params.jdbc_jar_name), mode=0644)

  ModifyPropertiesFile(format("{kms_home}/install.properties"),
    properties = params.config['configurations']['kms-properties'],
    owner = params.kms_user
  )

  if params.db_flavor.lower() == 'sqla':
    ModifyPropertiesFile(format("{kms_home}/install.properties"),
      properties = {'SQL_CONNECTOR_JAR': format('{kms_home}/ews/webapp/lib/sajdbc4.jar')},
      owner = params.kms_user,
    )
  else:
    ModifyPropertiesFile(format("{kms_home}/install.properties"),
      properties = {'SQL_CONNECTOR_JAR': format('{driver_curl_target}')},
      owner = params.kms_user,
    )
Example #42
0
    def install_packages(self, env):
        """
    List of packages that are required< by service is received from the server
    as a command parameter. The method installs all packages
    from this list
    
    exclude_packages - list of regexes (possibly raw strings as well), the
    packages which match the regex won't be installed.
    NOTE: regexes don't have Python syntax, but simple package regexes which support only * and .* and ?
    """
        config = self.get_config()

        if 'host_sys_prepped' in config['hostLevelParams']:
            # do not install anything on sys-prepped host
            if config['hostLevelParams']['host_sys_prepped'] == True:
                Logger.info("Node has all packages pre-installed. Skipping.")
                return
            pass
        try:
            package_list_str = config['hostLevelParams']['package_list']
            agent_stack_retry_on_unavailability = bool(
                config['hostLevelParams']
                ['agent_stack_retry_on_unavailability'])
            agent_stack_retry_count = int(
                config['hostLevelParams']['agent_stack_retry_count'])
            try:
                available_packages_in_repos = packages_analyzer.get_available_packages_in_repos(
                    config['repositoryFile']['repositories'])
            except Exception as err:
                available_packages_in_repos = []
            if isinstance(package_list_str,
                          basestring) and len(package_list_str) > 0:
                package_list = json.loads(package_list_str)
                for package in package_list:
                    if self.check_package_condition(package):
                        name = self.get_package_from_available(
                            package['name'], available_packages_in_repos)
                        # HACK: On Windows, only install ambari-metrics packages using Choco Package Installer
                        # TODO: Update this once choco packages for hadoop are created. This is because, service metainfo.xml support
                        # <osFamily>any<osFamily> which would cause installation failure on Windows.
                        if OSCheck.is_windows_family():
                            if "ambari-metrics" in name:
                                Package(name)
                        else:
                            Package(name,
                                    retry_on_repo_unavailability=
                                    agent_stack_retry_on_unavailability,
                                    retry_count=agent_stack_retry_count)
        except KeyError:
            pass  # No reason to worry

        if OSCheck.is_windows_family():
            #TODO hacky install of windows msi, remove it or move to old(2.1) stack definition when component based install will be implemented
            hadoop_user = config["configurations"]["cluster-env"][
                "hadoop.user.name"]
            install_windows_msi(
                config['hostLevelParams']['jdk_location'],
                config["hostLevelParams"]["agentCacheDir"], [
                    "hdp-2.3.0.0.winpkg.msi", "hdp-2.3.0.0.cab",
                    "hdp-2.3.0.0-01.cab"
                ], hadoop_user, self.get_password(hadoop_user),
                str(config['hostLevelParams']['stack_version']))
            reload_windows_env()
 def remote_repo():
     Logger.info('Using remote repo')
Example #44
0
    def restart(self, env):
        """
    Default implementation of restart command is to call stop and start methods
    Feel free to override restart() method with your implementation.
    For client components we call install
    """
        config = self.get_config()
        componentCategory = None
        try:
            componentCategory = config['roleParams']['component_category']
        except KeyError:
            pass

        upgrade_type_command_param = ""
        direction = None
        if config is not None:
            command_params = config[
                "commandParams"] if "commandParams" in config else None
            if command_params is not None:
                upgrade_type_command_param = command_params[
                    "upgrade_type"] if "upgrade_type" in command_params else ""
                direction = command_params[
                    "upgrade_direction"] if "upgrade_direction" in command_params else None

        upgrade_type = Script.get_upgrade_type(upgrade_type_command_param)
        is_stack_upgrade = upgrade_type is not None

        # need this before actually executing so that failures still report upgrade info
        if is_stack_upgrade:
            upgrade_info = {"upgrade_type": upgrade_type_command_param}
            if direction is not None:
                upgrade_info["direction"] = direction.upper()

            Script.structuredOut.update(upgrade_info)

        if componentCategory and componentCategory.strip().lower(
        ) == 'CLIENT'.lower():
            if is_stack_upgrade:
                # Remain backward compatible with the rest of the services that haven't switched to using
                # the pre_upgrade_restart method. Once done. remove the else-block.
                if "pre_upgrade_restart" in dir(self):
                    self.pre_upgrade_restart(env, upgrade_type=upgrade_type)
                else:
                    self.pre_rolling_restart(env)

            self.install(env)
        else:
            # To remain backward compatible with older stacks, only pass upgrade_type if available.
            # TODO, remove checking the argspec for "upgrade_type" once all of the services support that optional param.
            if "upgrade_type" in inspect.getargspec(self.stop).args:
                self.stop(env, upgrade_type=upgrade_type)
            else:
                if is_stack_upgrade:
                    self.stop(
                        env,
                        rolling_restart=(upgrade_type == UPGRADE_TYPE_ROLLING))
                else:
                    self.stop(env)

            if is_stack_upgrade:
                # Remain backward compatible with the rest of the services that haven't switched to using
                # the pre_upgrade_restart method. Once done. remove the else-block.
                if "pre_upgrade_restart" in dir(self):
                    self.pre_upgrade_restart(env, upgrade_type=upgrade_type)
                else:
                    self.pre_rolling_restart(env)

            service_name = config[
                'serviceName'] if config is not None and 'serviceName' in config else None
            try:
                #TODO Once the logic for pid is available from Ranger and Ranger KMS code, will remove the below if block.
                services_to_skip = ['RANGER', 'RANGER_KMS']
                if service_name in services_to_skip:
                    Logger.info(
                        'Temporarily skipping status check for {0} service only.'
                        .format(service_name))
                elif is_stack_upgrade:
                    Logger.info(
                        'Skipping status check for {0} service during upgrade'.
                        format(service_name))
                else:
                    self.status(env)
                    raise Fail(
                        "Stop command finished but process keep running.")
            except ComponentIsNotRunning as e:
                pass  # expected
            except ClientComponentHasNoStatus as e:
                pass  # expected

            # To remain backward compatible with older stacks, only pass upgrade_type if available.
            # TODO, remove checking the argspec for "upgrade_type" once all of the services support that optional param.
            self.pre_start(env)
            if "upgrade_type" in inspect.getargspec(self.start).args:
                self.start(env, upgrade_type=upgrade_type)
            else:
                if is_stack_upgrade:
                    self.start(
                        env,
                        rolling_restart=(upgrade_type == UPGRADE_TYPE_ROLLING))
                else:
                    self.start(env)
            self.post_start(env)

            if is_stack_upgrade:
                # Remain backward compatible with the rest of the services that haven't switched to using
                # the post_upgrade_restart method. Once done. remove the else-block.
                if "post_upgrade_restart" in dir(self):
                    self.post_upgrade_restart(env, upgrade_type=upgrade_type)
                else:
                    self.post_rolling_restart(env)

        if self.should_expose_component_version("restart"):
            self.save_component_version_to_structured_out()
 def stop_enrichment_topology(self):
     Logger.info('Stopping ' + self.__enrichment_topology)
     stop_cmd = 'storm kill ' + self.__enrichment_topology
     Execute(stop_cmd)
     Logger.info('Done stopping enrichment topologies')