Beispiel #1
0
    def execute(self):
        """
    Sets up logging;
    Parses command parameters and executes method relevant to command type
    """
        logger, chout, cherr = Logger.initialize_logger(__name__)

        # parse arguments
        if len(sys.argv) < 7:
            logger.error("Script expects at least 6 arguments")
            print USAGE.format(os.path.basename(
                sys.argv[0]))  # print to stdout
            sys.exit(1)

        command_name = str.lower(sys.argv[1])
        self.command_data_file = sys.argv[2]
        self.basedir = sys.argv[3]
        self.stroutfile = sys.argv[4]
        self.load_structured_out()
        self.logging_level = sys.argv[5]
        Script.tmp_dir = sys.argv[6]

        logging_level_str = logging._levelNames[self.logging_level]
        chout.setLevel(logging_level_str)
        logger.setLevel(logging_level_str)

        # on windows we need to reload some of env variables manually because there is no default paths for configs(like
        # /etc/something/conf on linux. When this env vars created by one of the Script execution, they can not be updated
        # in agent, so other Script executions will not be able to access to new env variables
        if OSCheck.is_windows_family():
            reload_windows_env()

        try:
            with open(self.command_data_file) as f:
                pass
                Script.config = ConfigDictionary(json.load(f))
                # load passwords here(used on windows to impersonate different users)
                Script.passwords = {}
                for k, v in _PASSWORD_MAP.iteritems():
                    if get_path_from_configuration(
                            k, Script.config) and get_path_from_configuration(
                                v, Script.config):
                        Script.passwords[get_path_from_configuration(
                            k, Script.config)] = get_path_from_configuration(
                                v, Script.config)

        except IOError:
            logger.exception(
                "Can not read json file with command parameters: ")
            sys.exit(1)

        # Run class method depending on a command type
        try:
            method = self.choose_method_to_execute(command_name)
            with Environment(self.basedir, tmp_dir=Script.tmp_dir) as env:
                env.config.download_path = Script.tmp_dir
                method(env)
        finally:
            if self.should_expose_component_version(command_name):
                self.save_component_version_to_structured_out()
def get_live_status(pid_file, flume_conf_directory):
  """
  Gets the status information of a flume agent, including source, sink, and
  channel counts.
  :param pid_file: the PID file of the agent to check
  :param flume_conf_directory:  the configuration directory (ie /etc/flume/conf)
  :return: a dictionary of information about the flume agent
  """
  pid_file_part = pid_file.split(os.sep).pop()

  res = {}
  res['name'] = pid_file_part

  if pid_file_part.endswith(".pid"):
    res['name'] = pid_file_part[:-4]

  res['status'] = 'RUNNING' if is_flume_process_live(pid_file) else 'NOT_RUNNING'
  res['sources_count'] = 0
  res['sinks_count'] = 0
  res['channels_count'] = 0

  flume_agent_conf_dir = flume_conf_directory + os.sep + res['name']
  flume_agent_meta_file = flume_agent_conf_dir + os.sep + 'ambari-meta.json'

  try:
    with open(flume_agent_meta_file) as fp:
      meta = json.load(fp)
      res['sources_count'] = meta['sources_count']
      res['sinks_count'] = meta['sinks_count']
      res['channels_count'] = meta['channels_count']
  except:
    Logger.logger.exception(format("Error reading {flume_agent_meta_file}: "))

  return res
  def delete_storm_local_data(self, env):
    """
    Deletes Storm data from local directories. This will create a marker file
    with JSON data representing the upgrade stack and request/stage ID. This
    will prevent multiple Storm components on the same host from removing
    the local directories more than once.
    :return:
    """
    import params

    Logger.info('Clearing Storm data from local directories...')

    storm_local_directory = params.local_dir
    if storm_local_directory is None:
      raise Fail("The storm local directory specified by storm-site/storm.local.dir must be specified")

    request_id = default("/requestId", None)
    stage_id = default("/stageId", None)
    stack_version = params.version
    stack_name = params.stack_name

    json_map = {}
    json_map["requestId"] = request_id
    json_map["stageId"] = stage_id
    json_map["stackVersion"] = stack_version
    json_map["stackName"] = stack_name

    temp_directory = params.tmp_dir
    upgrade_file = os.path.join(temp_directory, "storm-upgrade-{0}.json".format(stack_version))

    if os.path.exists(upgrade_file):
      try:
        with open(upgrade_file) as file_pointer:
          existing_json_map = json.load(file_pointer)

        if cmp(json_map, existing_json_map) == 0:
          Logger.info("The storm upgrade has already removed the local directories for {0}-{1} for request {2} and stage {3}".format(
            stack_name, stack_version, request_id, stage_id))

          # nothing else to do here for this as it appears to have already been
          # removed by another component being upgraded
          return

      except:
        Logger.error("The upgrade file {0} appears to be corrupt; removing...".format(upgrade_file))
        File(upgrade_file, action="delete")
    else:
      # delete the upgrade file since it does not match
      File(upgrade_file, action="delete")

    # delete from local directory
    Directory(storm_local_directory, action="delete", recursive=True)

    # recreate storm local directory
    Directory(storm_local_directory, mode=0755, owner = params.storm_user,
      group = params.user_group, recursive = True)

    # the file doesn't exist, so create it
    with open(upgrade_file, 'w') as file_pointer:
      json.dump(json_map, file_pointer, indent=2)
Beispiel #4
0
def loadJson(path):
  try:
    with open(path, 'r') as f:
      return json.load(f)
  except Exception as err:
    traceback.print_exc()
    raise StackAdvisorException("Error loading file at: {0}".format(path))
  def __load_definitions(self):
    """
    Loads all alert definitions from a file. All clusters are stored in
    a single file.
    :return:
    """
    definitions = []

    all_commands = None
    alerts_definitions_path = os.path.join(self.cachedir, self.FILENAME)
    try:
      with open(alerts_definitions_path) as fp:
        all_commands = json.load(fp)
    except:
      logger.warning('[AlertScheduler] {0} not found or invalid. No alerts will be scheduled until registration occurs.'.format(alerts_definitions_path))
      return definitions

    for command_json in all_commands:
      clusterName = '' if not 'clusterName' in command_json else command_json['clusterName']
      hostName = '' if not 'hostName' in command_json else command_json['hostName']

      for definition in command_json['alertDefinitions']:
        alert = self.__json_to_callable(clusterName, hostName, definition)

        if alert is None:
          continue

        alert.set_helpers(self._collector, self._cluster_configuration)

        definitions.append(alert)

    return definitions
Beispiel #6
0
def get_live_status(pid_file, flume_conf_directory):
  """
  Gets the status information of a flume agent, including source, sink, and
  channel counts.
  :param pid_file: the PID file of the agent to check
  :param flume_conf_directory:  the configuration directory (ie /etc/flume/conf)
  :return: a dictionary of information about the flume agent
  """
  pid_file_part = pid_file.split(os.sep).pop()

  res = {}
  res['name'] = pid_file_part

  if pid_file_part.endswith(".pid"):
    res['name'] = pid_file_part[:-4]

  res['status'] = 'RUNNING' if is_flume_process_live(pid_file) else 'NOT_RUNNING'
  res['sources_count'] = 0
  res['sinks_count'] = 0
  res['channels_count'] = 0

  flume_agent_conf_dir = flume_conf_directory + os.sep + res['name']
  flume_agent_meta_file = flume_agent_conf_dir + os.sep + 'ambari-meta.json'

  try:
    with open(flume_agent_meta_file) as fp:
      meta = json.load(fp)
      res['sources_count'] = meta['sources_count']
      res['sinks_count'] = meta['sinks_count']
      res['channels_count'] = meta['channels_count']
  except:
    pass

  return res
Beispiel #7
0
  def __init__(self, cluster_config_cache_dir):
    """
    Initializes the configuration cache.
    :param cluster_config_cache_dir:
    :return:
    """
    self.cluster_config_cache_dir = cluster_config_cache_dir

    # keys are cluster names, values are configurations
    self.__configurations = {}

    self.__file_lock = threading.RLock()
    self.__cache_lock = threading.RLock()
    self.__config_json_file = os.path.join(self.cluster_config_cache_dir, self.FILENAME)

    # ensure that our cache directory exists
    if not os.path.exists(cluster_config_cache_dir):
      try:
        os.makedirs(cluster_config_cache_dir)
      except:
        logger.critical("Could not create the cluster configuration cache directory {0}".format(cluster_config_cache_dir))

    # if the file exists, then load it
    try:
      if os.path.isfile(self.__config_json_file):
        with open(self.__config_json_file, 'r') as fp:
          self.__configurations = json.load(fp)
    except Exception, exception:
      logger.warning("Unable to load configurations from {0}. This file will be regenerated on registration".format(self.__config_json_file))
def get_policycache_service_name(service_name, repo_name, cache_service_list):
    service_name_exist_flag = False
    policycache_path = os.path.join('/etc', 'ranger', repo_name, 'policycache')
    try:
        for cache_service in cache_service_list:
            policycache_json_file = format(
                '{policycache_path}/{cache_service}_{repo_name}.json')
            if os.path.isfile(policycache_json_file
                              ) and os.path.getsize(policycache_json_file) > 0:
                with open(policycache_json_file) as json_file:
                    json_data = json.load(json_file)
                    if 'serviceName' in json_data and json_data[
                            'serviceName'] == repo_name:
                        Logger.info(
                            "Skipping Ranger API calls, as policy cache file exists for {0}"
                            .format(service_name))
                        Logger.warning(
                            "If service name for {0} is not created on Ranger Admin, then to re-create it delete policy cache file: {1}"
                            .format(service_name, policycache_json_file))
                        service_name_exist_flag = True
                        break
    except Exception, err:
        Logger.error(
            "Error occurred while fetching service name from policy cache file.\nError: {0}"
            .format(err))
Beispiel #9
0
def read_mapping():
  if os.path.isfile(Options.MR_MAPPING_FILE):
    if Options.MR_MAPPING is not None:
      return Options.MR_MAPPING
    else:
      Options.MR_MAPPING = json.load(open(Options.MR_MAPPING_FILE))
      return Options.MR_MAPPING
  else:
    raise FatalException(-1, "MAPREDUCE host mapping file, mr_mapping, is not available or badly formatted. Execute "
                             "action save-mr-mapping. Ensure the file is present in the directory where you are "
                             "executing this command.")
Beispiel #10
0
def load_version(struct_out_file):
    """
  Load version from file.  Made a separate method for testing
  """
    try:
        with open(struct_out_file, 'r') as fp:
            json_info = json.load(fp)

        return json_info['version']
    except (IOError, KeyError, TypeError):
        return None
Beispiel #11
0
  def execute(self):
    """
    Sets up logging;
    Parses command parameters and executes method relevant to command type
    """
    # parse arguments
    if len(sys.argv) < 7:
     print "Script expects at least 6 arguments"
     print USAGE.format(os.path.basename(sys.argv[0])) # print to stdout
     sys.exit(1)

    self.command_name = str.lower(sys.argv[1])
    self.command_data_file = sys.argv[2]
    self.basedir = sys.argv[3]
    self.stroutfile = sys.argv[4]
    self.load_structured_out()
    self.logging_level = sys.argv[5]
    Script.tmp_dir = sys.argv[6]

    logging_level_str = logging._levelNames[self.logging_level]
    Logger.initialize_logger(__name__, logging_level=logging_level_str)

    # on windows we need to reload some of env variables manually because there is no default paths for configs(like
    # /etc/something/conf on linux. When this env vars created by one of the Script execution, they can not be updated
    # in agent, so other Script executions will not be able to access to new env variables
    if OSCheck.is_windows_family():
      reload_windows_env()

    try:
      with open(self.command_data_file) as f:
        pass
        Script.config = ConfigDictionary(json.load(f))
        # load passwords here(used on windows to impersonate different users)
        Script.passwords = {}
        for k, v in _PASSWORD_MAP.iteritems():
          if get_path_from_configuration(k, Script.config) and get_path_from_configuration(v, Script.config):
            Script.passwords[get_path_from_configuration(k, Script.config)] = get_path_from_configuration(v, Script.config)

    except IOError:
      Logging.logger.exception("Can not read json file with command parameters: ")
      sys.exit(1)

    # Run class method depending on a command type
    try:
      method = self.choose_method_to_execute(self.command_name)
      with Environment(self.basedir, tmp_dir=Script.tmp_dir) as env:
        env.config.download_path = Script.tmp_dir
        method(env)
    finally:
      if self.should_expose_component_version(self.command_name):
        self.save_component_version_to_structured_out()
Beispiel #12
0
  def delete_storm_local_data(self, env):
    """
    Deletes Storm data from local directories. This will create a marker file
    with JSON data representing the upgrade stack and request/stage ID. This
    will prevent multiple Storm components on the same host from removing
    the local directories more than once.
    :return:
    """
    import params

    Logger.info('Clearing Storm data from local directories...')

    storm_local_directory = params.local_dir
    if storm_local_directory is None:
      raise Fail("The storm local directory specified by storm-site/storm.local.dir must be specified")

    request_id = default("/requestId", None)

    stack_name = params.stack_name
    stack_version = params.version
    upgrade_direction = params.upgrade_direction

    json_map = {}
    json_map["requestId"] = request_id
    json_map["stackName"] = stack_name
    json_map["stackVersion"] = stack_version
    json_map["direction"] = upgrade_direction

    temp_directory = params.tmp_dir
    marker_file = os.path.join(temp_directory, "storm-upgrade-{0}.json".format(stack_version))
    Logger.info("Marker file for upgrade/downgrade of Storm, {0}".format(marker_file))

    if os.path.exists(marker_file):
      Logger.info("The marker file exists.")
      try:
        with open(marker_file) as file_pointer:
          existing_json_map = json.load(file_pointer)

        if cmp(json_map, existing_json_map) == 0:
          Logger.info("The storm upgrade has already removed the local directories for {0}-{1} for "
                      "request {2} and direction {3}. Nothing else to do.".format(stack_name, stack_version, request_id, upgrade_direction))

          # Nothing else to do here for this as it appears to have already been
          # removed by another component being upgraded
          return
        else:
          Logger.info("The marker file differs from the new value. Will proceed to delete Storm local dir, "
                      "and generate new file. Current marker file: {0}".format(str(existing_json_map)))
      except Exception, e:
        Logger.error("The marker file {0} appears to be corrupt; removing it. Error: {1}".format(marker_file, str(e)))
        File(marker_file, action="delete")
Beispiel #13
0
    def load_structured_out(self):
        Script.structuredOut = {}
        if os.path.exists(self.stroutfile):
            with open(self.stroutfile, 'r') as fp:
                Script.structuredOut = json.load(fp)

        # version is only set in a specific way and should not be carried
        if "version" in Script.structuredOut:
            del Script.structuredOut["version"]
        # reset security issues and errors found on previous runs
        if "securityIssuesFound" in Script.structuredOut:
            del Script.structuredOut["securityIssuesFound"]
        if "securityStateErrorInfo" in Script.structuredOut:
            del Script.structuredOut["securityStateErrorInfo"]
def load_version(struct_out_file):
  """
  Load version from file.  Made a separate method for testing
  """
  json_version = None
  try:
    if os.path.exists(struct_out_file):
      with open(struct_out_file, 'r') as fp:
        json_info = json.load(fp)
        json_version = json_info['version']
  except:
    pass

  return json_version
def load_version(struct_out_file):
    """
  Load version from file.  Made a separate method for testing
  """
    json_version = None
    try:
        if os.path.exists(struct_out_file):
            with open(struct_out_file, 'r') as fp:
                json_info = json.load(fp)
                json_version = json_info['version']
    except:
        pass

    return json_version
Beispiel #16
0
 def read_result_from_files(self, out_path, err_path, structured_out_path):
     out = open(out_path, 'r').read()
     error = open(err_path, 'r').read()
     try:
         with open(structured_out_path, 'r') as fp:
             structured_out = json.load(fp)
     except Exception:
         if os.path.exists(structured_out_path):
             errMsg = 'Unable to read structured output from ' + structured_out_path
             structured_out = {'msg': errMsg}
             logger.warn(structured_out)
         else:
             structured_out = {}
     return out, error, structured_out
Beispiel #17
0
  def load_structured_out(self):
    Script.structuredOut = {}
    if os.path.exists(self.stroutfile):
      if os.path.getsize(self.stroutfile) > 0:
        with open(self.stroutfile, 'r') as fp:
          Script.structuredOut = json.load(fp)

    # version is only set in a specific way and should not be carried
    if "version" in Script.structuredOut:
      del Script.structuredOut["version"]
    # reset security issues and errors found on previous runs
    if "securityIssuesFound" in Script.structuredOut:
      del Script.structuredOut["securityIssuesFound"]
    if "securityStateErrorInfo" in Script.structuredOut:
      del Script.structuredOut["securityStateErrorInfo"]
Beispiel #18
0
 def read_result_from_files(self, out_path, err_path, structured_out_path):
     out = open(out_path, 'r').read()
     error = open(err_path, 'r').read()
     try:
         with open(structured_out_path, 'r') as fp:
             structured_out = json.load(fp)
     except (TypeError, ValueError):
         structured_out = {
             "msg":
             "Unable to read structured output from " + structured_out_path
         }
         self.logger.warn(structured_out)
     except (OSError, IOError):
         structured_out = {}
     return out, error, structured_out
 def read_result_from_files(self, out_path, err_path, structured_out_path):
   out = open(out_path, 'r').read()
   error = open(err_path, 'r').read()
   try:
     with open(structured_out_path, 'r') as fp:
       structured_out = json.load(fp)
   except Exception:
     if os.path.exists(structured_out_path):
       errMsg = 'Unable to read structured output from ' + structured_out_path
       structured_out = {
         'msg' : errMsg
       }
       logger.warn(structured_out)
     else:
       structured_out = {}
   return out, error, structured_out
    def read_file(self, filename):
        runDir = self.findRunDir()
        fullname = os.path.join(runDir, filename)
        if os.path.isfile(fullname):
            res = None
            conf_file = open(os.path.join(runDir, filename), 'r')
            try:
                res = json.load(conf_file)
                if (0 == len(res)):
                    res = None
            except Exception, e:
                logger.error("Error parsing " + filename + ": " + repr(e))
                res = None
                pass
            conf_file.close()

            return res
def cached_kinit_executor(kinit_path,
                          exec_user,
                          keytab_file,
                          principal,
                          hostname,
                          temp_dir,
                          expiration_time=5):
    """
  Main cached kinit executor - Uses a temporary file on the FS to cache executions. Each command
  will have its own file and only one entry (last successful execution) will be stored
  """
    key = str(hash("%s|%s" % (principal, keytab_file)))
    filename = key + "_tmp.txt"
    file_path = temp_dir + os.sep + "kinit_executor_cache"
    output = None

    # First execution scenario dir file existence check
    if not os.path.exists(file_path):
        os.makedirs(file_path)

    file_path += os.sep + filename

    # If the file does not exist create before read
    if not os.path.isfile(file_path):
        with open(file_path, 'w+') as new_file:
            new_file.write("{}")
    try:
        with open(file_path, 'r') as cache_file:
            output = json.load(cache_file)
    except:
        # In the extraordinary case the temporary file gets corrupted the cache should be reset to avoid error loop
        with open(file_path, 'w+') as cache_file:
            cache_file.write("{}")

    if (not output) or (key not in output) or ("last_successful_execution"
                                               not in output[key]):
        new_cached_exec(key, file_path, kinit_path, temp_dir, exec_user,
                        keytab_file, principal, hostname)
    else:
        last_run_time = output[key]["last_successful_execution"]
        now = datetime.now()
        if (now - datetime.strptime(last_run_time, "%Y-%m-%d %H:%M:%S.%f") >
                timedelta(minutes=expiration_time)):
            new_cached_exec(key, file_path, kinit_path, temp_dir, exec_user,
                            keytab_file, principal, hostname)
Beispiel #22
0
    def __load_definitions(self):
        """
    Loads all alert definitions from a file. All clusters are stored in
    a single file. This wil also populate the cluster-to-hash dictionary.
    :return:
    """
        definitions = []

        alerts_definitions_path = os.path.join(self.cachedir, self.FILENAME)
        try:
            with open(alerts_definitions_path) as fp:
                all_commands = json.load(fp)
        except:
            logger.warning(
                '[AlertScheduler] {0} not found or invalid. No alerts will be scheduled until registration occurs.'
                .format(alerts_definitions_path))
            return definitions

        for command_json in all_commands:
            clusterName = '' if not 'clusterName' in command_json else command_json[
                'clusterName']
            hostName = '' if not 'hostName' in command_json else command_json[
                'hostName']
            clusterHash = None if not 'hash' in command_json else command_json[
                'hash']

            # cache the cluster and cluster hash after loading the JSON
            if clusterName != '' and clusterHash is not None:
                logger.info(
                    '[AlertScheduler] Caching cluster {0} with alert hash {1}'.
                    format(clusterName, clusterHash))
                self._cluster_hashes[clusterName] = clusterHash

            for definition in command_json['alertDefinitions']:
                alert = self.__json_to_callable(clusterName, hostName,
                                                definition)

                if alert is None:
                    continue

                alert.set_helpers(self._collector, self._cluster_configuration)

                definitions.append(alert)

        return definitions
Beispiel #23
0
    def load_structured_out(self):
        Script.structuredOut = {}
        if os.path.exists(self.stroutfile):
            if os.path.getsize(self.stroutfile) > 0:
                with open(self.stroutfile, 'r') as fp:
                    try:
                        Script.structuredOut = json.load(fp)
                    except Exception:
                        errMsg = 'Unable to read structured output from ' + self.stroutfile
                        Logger.logger.exception(errMsg)
                        pass

        # version is only set in a specific way and should not be carried
        if "version" in Script.structuredOut:
            del Script.structuredOut["version"]
        # reset security issues and errors found on previous runs
        if "securityIssuesFound" in Script.structuredOut:
            del Script.structuredOut["securityIssuesFound"]
        if "securityStateErrorInfo" in Script.structuredOut:
            del Script.structuredOut["securityStateErrorInfo"]
Beispiel #24
0
  def load_structured_out(self):
    Script.structuredOut = {}
    if os.path.exists(self.stroutfile):
      if os.path.getsize(self.stroutfile) > 0:
        with open(self.stroutfile, 'r') as fp:
          try:
            Script.structuredOut = json.load(fp)
          except Exception:
            errMsg = 'Unable to read structured output from ' + self.stroutfile
            Logger.logger.exception(errMsg)
            pass

    # version is only set in a specific way and should not be carried
    if "version" in Script.structuredOut:
      del Script.structuredOut["version"]
    # reset security issues and errors found on previous runs
    if "securityIssuesFound" in Script.structuredOut:
      del Script.structuredOut["securityIssuesFound"]
    if "securityStateErrorInfo" in Script.structuredOut:
      del Script.structuredOut["securityStateErrorInfo"]
  def __init__(self, cluster_cache_dir):
    """
    Initializes the cache.
    :param cluster_cache_dir:
    :return:
    """

    self.cluster_cache_dir = cluster_cache_dir

    self.__current_cache_json_file = os.path.join(self.cluster_cache_dir, self.get_cache_name()+'.json')
    self.__current_cache_hash_file = os.path.join(self.cluster_cache_dir, '.'+self.get_cache_name()+'.hash')

    self._cache_lock = threading.RLock()
    self.__file_lock = ClusterCache.file_locks[self.__current_cache_json_file]

    self.hash = None
    cache_dict = {}

    try:
      with self.__file_lock:
        if os.path.isfile(self.__current_cache_json_file):
          with open(self.__current_cache_json_file, 'r') as fp:
            cache_dict = json.load(fp)

        if os.path.isfile(self.__current_cache_hash_file):
          with open(self.__current_cache_hash_file, 'r') as fp:
            self.hash = fp.read()
    except (IOError,ValueError):
      logger.exception("Cannot load data from {0} and {1}".format(self.__current_cache_json_file, self.__current_cache_hash_file))
      self.hash = None
      cache_dict = {}

    try:
      self.rewrite_cache(cache_dict, self.hash)
    except:
      # Example: hostname change and restart causes old topology loading to fail with exception
      logger.exception("Loading saved cache for {0} failed".format(self.__class__.__name__))
      self.rewrite_cache({}, None)
  def __load_definitions(self):
    """
    Loads all alert definitions from a file. All clusters are stored in
    a single file. This wil also populate the cluster-to-hash dictionary.
    :return:
    """
    definitions = []

    alerts_definitions_path = os.path.join(self.cachedir, self.FILENAME)
    try:
      with open(alerts_definitions_path) as fp:
        all_commands = json.load(fp)
    except:
      logger.warning('[AlertScheduler] {0} not found or invalid. No alerts will be scheduled until registration occurs.'.format(alerts_definitions_path))
      return definitions

    for command_json in all_commands:
      clusterName = '' if not 'clusterName' in command_json else command_json['clusterName']
      hostName = '' if not 'hostName' in command_json else command_json['hostName']
      clusterHash = None if not 'hash' in command_json else command_json['hash']

      # cache the cluster and cluster hash after loading the JSON
      if clusterName != '' and clusterHash is not None:
        logger.info('[AlertScheduler] Caching cluster {0} with alert hash {1}'.format(clusterName, clusterHash))
        self._cluster_hashes[clusterName] = clusterHash

      for definition in command_json['alertDefinitions']:
        alert = self.__json_to_callable(clusterName, hostName, definition)

        if alert is None:
          continue

        alert.set_helpers(self._collector, self._cluster_configuration)

        definitions.append(alert)

    return definitions
def cached_kinit_executor(kinit_path, exec_user, keytab_file, principal, hostname, temp_dir,
                          expiration_time=5):
  """
  Main cached kinit executor - Uses a temporary file on the FS to cache executions. Each command
  will have its own file and only one entry (last successful execution) will be stored
  """
  key = str(hash("%s|%s" % (principal, keytab_file)))
  filename = key + "_tmp.txt"
  file_path = temp_dir + os.sep + "kinit_executor_cache"
  output = None

  # First execution scenario dir file existence check
  if not os.path.exists(file_path):
    os.makedirs(file_path)

  file_path += os.sep + filename

  # If the file does not exist create before read
  if not os.path.isfile(file_path):
    with open(file_path, 'w+') as new_file:
      new_file.write("{}")
  try:
    with open(file_path, 'r') as cache_file:
      output = json.load(cache_file)
  except:
    # In the extraordinary case the temporary file gets corrupted the cache should be reset to avoid error loop
    with open(file_path, 'w+') as cache_file:
      cache_file.write("{}")

  if (not output) or (key not in output) or ("last_successful_execution" not in output[key]):
    new_cached_exec(key, file_path, kinit_path, temp_dir, exec_user, keytab_file, principal, hostname)
  else:
    last_run_time = output[key]["last_successful_execution"]
    now = datetime.now()
    if (now - datetime.strptime(last_run_time, "%Y-%m-%d %H:%M:%S.%f") > timedelta(minutes=expiration_time)):
      new_cached_exec(key, file_path, kinit_path, temp_dir, exec_user, keytab_file, principal, hostname)
def create_ams_dashboards():
  """
  Create dashboards in grafana from the json files
  """
  import params
  server = Server(protocol = params.ams_grafana_protocol.strip(),
                  host = params.ams_grafana_host.strip(),
                  port = params.ams_grafana_port,
                  user = params.ams_grafana_admin_user,
                  password = params.ams_grafana_admin_pwd)

  dashboard_files = params.get_grafana_dashboard_defs()
  version = params.get_ambari_version()
  Logger.info("Checking dashboards to update for Ambari version : %s" % version)
  # Friendly representation of dashboard
  Dashboard = namedtuple('Dashboard', ['uri', 'id', 'title', 'tags'])

  existing_dashboards = []
  response = perform_grafana_get_call(GRAFANA_SEARCH_BULTIN_DASHBOARDS, server)
  if response and response.status == 200:
    data = response.read()
    try:
      dashboards = json.loads(data)
    except:
      Logger.error("Unable to parse JSON response from grafana request: %s" %
                   GRAFANA_SEARCH_BULTIN_DASHBOARDS)
      Logger.info(data)
      return

    for dashboard in dashboards:
      if dashboard['title'] == 'HBase - Performance':
        perform_grafana_delete_call("/api/dashboards/" + dashboard['uri'], server)
      else:
        existing_dashboards.append(
            Dashboard(uri = dashboard['uri'], id = dashboard['id'],
                    title = dashboard['title'], tags = dashboard['tags'])
          )
    pass
  else:
    Logger.error("Failed to execute search query on Grafana dashboards. "
                 "query = %s\n statuscode = %s\n reason = %s\n data = %s\n" %
                 (GRAFANA_SEARCH_BULTIN_DASHBOARDS, response.status, response.reason, response.read()))
    return

  Logger.debug('Dashboard definitions found = %s' % str(dashboard_files))

  if dashboard_files:
    for dashboard_file in dashboard_files:
      try:
        with open(dashboard_file, 'r') as file:
          dashboard_def = json.load(file)
      except Exception, e:
        Logger.error('Unable to load dashboard json file %s' % dashboard_file)
        Logger.error(str(e))
        continue

      if dashboard_def:
        update_def = True
        # Make sure static json does not have id
        if "id" in dashboard_def:
          dashboard_def['id'] = None
        # Set correct tags
        if 'tags' in dashboard_def:
          dashboard_def['tags'].append('builtin')
          dashboard_def['tags'].append(version)
        else:
          dashboard_def['tags'] = [ 'builtin', version ]
        
        for dashboard in existing_dashboards:
          if dashboard.title == dashboard_def['title']:
            if version not in dashboard.tags:
              # Found existing dashboard with wrong version - update dashboard
              update_def = True
            else:
              update_def = False # Skip update
        pass

        if update_def:
          Logger.info("Updating dashboard definition for %s with tags: %s" %
                      (dashboard_def['title'], dashboard_def['tags']))

          # Discrepancy in grafana export vs import format
          dashboard_def_payload = { "dashboard" : dashboard_def, 'overwrite': True }
          paylaod = json.dumps(dashboard_def_payload).strip()

          (response, data) = perform_grafana_post_call(GRAFANA_DASHBOARDS_URL, paylaod, server)

          if response and response.status == 200:
            Logger.info("Dashboard created successfully.\n %s" % str(data))
          else:
            Logger.error("Failed creating dashboard: %s" % dashboard_def['title'])
          pass
        else:
          Logger.info('No update needed for dashboard = %s' % dashboard_def['title'])
      pass
    pass
Beispiel #29
0
  def execute(self):
    """
    Sets up logging;
    Parses command parameters and executes method relevant to command type
    """
    parser = OptionParser()
    parser.add_option("-o", "--out-files-logging", dest="log_out_files", action="store_true",
                      help="use this option to enable outputting *.out files of the service pre-start")
    (self.options, args) = parser.parse_args()

    self.log_out_files = self.options.log_out_files

    # parse arguments
    if len(args) < 6:
     print "Script expects at least 6 arguments"
     print USAGE.format(os.path.basename(sys.argv[0])) # print to stdout
     sys.exit(1)

    self.command_name = str.lower(sys.argv[1])
    self.command_data_file = sys.argv[2]
    self.basedir = sys.argv[3]
    self.stroutfile = sys.argv[4]
    self.load_structured_out()
    self.logging_level = sys.argv[5]
    Script.tmp_dir = sys.argv[6]
    # optional script arguments for forcing https protocol and ca_certs file
    if len(sys.argv) >= 8:
      Script.force_https_protocol = sys.argv[7]
    if len(sys.argv) >= 9:
      Script.ca_cert_file_path = sys.argv[8]

    logging_level_str = logging._levelNames[self.logging_level]
    Logger.initialize_logger(__name__, logging_level=logging_level_str)

    # on windows we need to reload some of env variables manually because there is no default paths for configs(like
    # /etc/something/conf on linux. When this env vars created by one of the Script execution, they can not be updated
    # in agent, so other Script executions will not be able to access to new env variables
    if OSCheck.is_windows_family():
      reload_windows_env()

    # !!! status commands re-use structured output files; if the status command doesn't update the
    # the file (because it doesn't have to) then we must ensure that the file is reset to prevent
    # old, stale structured output from a prior status command from being used
    if self.command_name == "status":
      Script.structuredOut = {}
      self.put_structured_out({})

    # make sure that script has forced https protocol and ca_certs file passed from agent
    ensure_ssl_using_protocol(Script.get_force_https_protocol_name(), Script.get_ca_cert_file_path())

    try:
      with open(self.command_data_file) as f:
        pass
        Script.config = ConfigDictionary(json.load(f))
        # load passwords here(used on windows to impersonate different users)
        Script.passwords = {}
        for k, v in _PASSWORD_MAP.iteritems():
          if get_path_from_configuration(k, Script.config) and get_path_from_configuration(v, Script.config):
            Script.passwords[get_path_from_configuration(k, Script.config)] = get_path_from_configuration(v, Script.config)

    except IOError:
      Logger.logger.exception("Can not read json file with command parameters: ")
      sys.exit(1)

    from resource_management.libraries.functions import lzo_utils

    repo_tags_to_skip = set()
    if not lzo_utils.is_gpl_license_accepted():
      repo_tags_to_skip.add("GPL")

    Script.repository_util = RepositoryUtil(Script.config, repo_tags_to_skip)

    # Run class method depending on a command type
    try:
      method = self.choose_method_to_execute(self.command_name)
      with Environment(self.basedir, tmp_dir=Script.tmp_dir) as env:
        env.config.download_path = Script.tmp_dir

        if not self.is_hook():
          self.execute_prefix_function(self.command_name, 'pre', env)

        method(env)

        if not self.is_hook():
          self.execute_prefix_function(self.command_name, 'post', env)

    except Fail as ex:
      ex.pre_raise()
      raise
    finally:
      if self.should_expose_component_version(self.command_name):
        self.save_component_version_to_structured_out(self.command_name)
Beispiel #30
0
    def execute(self):
        """
    Sets up logging;
    Parses command parameters and executes method relevant to command type
    """
        parser = OptionParser()
        parser.add_option(
            "-o",
            "--out-files-logging",
            dest="log_out_files",
            action="store_true",
            help=
            "use this option to enable outputting *.out files of the service pre-start"
        )
        (self.options, args) = parser.parse_args()

        self.log_out_files = self.options.log_out_files

        # parse arguments
        if len(args) < 6:
            print "Script expects at least 6 arguments"
            print USAGE.format(os.path.basename(
                sys.argv[0]))  # print to stdout
            sys.exit(1)

        self.command_name = str.lower(sys.argv[1])
        self.command_data_file = sys.argv[2]
        self.basedir = sys.argv[3]
        self.stroutfile = sys.argv[4]
        self.load_structured_out()
        self.logging_level = sys.argv[5]
        Script.tmp_dir = sys.argv[6]
        # optional script argument for forcing https protocol
        if len(sys.argv) >= 8:
            Script.force_https_protocol = sys.argv[7]

        logging_level_str = logging._levelNames[self.logging_level]
        Logger.initialize_logger(__name__, logging_level=logging_level_str)

        # on windows we need to reload some of env variables manually because there is no default paths for configs(like
        # /etc/something/conf on linux. When this env vars created by one of the Script execution, they can not be updated
        # in agent, so other Script executions will not be able to access to new env variables
        if OSCheck.is_windows_family():
            reload_windows_env()

        try:
            with open(self.command_data_file) as f:
                pass
                Script.config = ConfigDictionary(json.load(f))
                # load passwords here(used on windows to impersonate different users)
                Script.passwords = {}
                for k, v in _PASSWORD_MAP.iteritems():
                    if get_path_from_configuration(
                            k, Script.config) and get_path_from_configuration(
                                v, Script.config):
                        Script.passwords[get_path_from_configuration(
                            k, Script.config)] = get_path_from_configuration(
                                v, Script.config)

        except IOError:
            Logger.logger.exception(
                "Can not read json file with command parameters: ")
            sys.exit(1)

        # Run class method depending on a command type
        try:
            method = self.choose_method_to_execute(self.command_name)
            with Environment(self.basedir, tmp_dir=Script.tmp_dir) as env:
                env.config.download_path = Script.tmp_dir

                if self.command_name == "start" and not self.is_hook():
                    self.pre_start()

                method(env)

                if self.command_name == "start" and not self.is_hook():
                    self.post_start()
        except Fail as ex:
            ex.pre_raise()
            raise
        finally:
            if self.should_expose_component_version(self.command_name):
                self.save_component_version_to_structured_out()
Beispiel #31
0
    def delete_storm_local_data(self, env):
        """
    Deletes Storm data from local directories. This will create a marker file
    with JSON data representing the upgrade stack and request/stage ID. This
    will prevent multiple Storm components on the same host from removing
    the local directories more than once.
    :return:
    """
        import params

        Logger.info('Clearing Storm data from local directories...')

        storm_local_directory = params.local_dir
        if storm_local_directory is None:
            raise Fail(
                "The storm local directory specified by storm-site/storm.local.dir must be specified"
            )

        request_id = default("/requestId", None)
        stage_id = default("/stageId", None)
        stack_version = params.version
        stack_name = params.stack_name

        json_map = {}
        json_map["requestId"] = request_id
        json_map["stageId"] = stage_id
        json_map["stackVersion"] = stack_version
        json_map["stackName"] = stack_name

        temp_directory = params.tmp_dir
        upgrade_file = os.path.join(
            temp_directory, "storm-upgrade-{0}.json".format(stack_version))

        if os.path.exists(upgrade_file):
            try:
                with open(upgrade_file) as file_pointer:
                    existing_json_map = json.load(file_pointer)

                if cmp(json_map, existing_json_map) == 0:
                    Logger.info(
                        "The storm upgrade has already removed the local directories for {0}-{1} for request {2} and stage {3}"
                        .format(stack_name, stack_version, request_id,
                                stage_id))

                    # nothing else to do here for this as it appears to have already been
                    # removed by another component being upgraded
                    return

            except:
                Logger.error(
                    "The upgrade file {0} appears to be corrupt; removing...".
                    format(upgrade_file))
                File(upgrade_file, action="delete")
        else:
            # delete the upgrade file since it does not match
            File(upgrade_file, action="delete")

        # delete from local directory
        Directory(storm_local_directory, action="delete", recursive=True)

        # recreate storm local directory
        Directory(storm_local_directory,
                  mode=0755,
                  owner=params.storm_user,
                  group=params.user_group,
                  recursive=True)

        # the file doesn't exist, so create it
        with open(upgrade_file, 'w') as file_pointer:
            json.dump(json_map, file_pointer, indent=2)
Beispiel #32
0
def setup_ranger_plugin(component_select_name,
                        service_name,
                        previous_jdbc_jar,
                        component_downloaded_custom_connector,
                        component_driver_curl_source,
                        component_driver_curl_target,
                        java_home,
                        repo_name,
                        plugin_repo_dict,
                        ranger_env_properties,
                        plugin_properties,
                        policy_user,
                        policymgr_mgr_url,
                        plugin_enabled,
                        conf_dict,
                        component_user,
                        component_group,
                        cache_service_list,
                        plugin_audit_properties,
                        plugin_audit_attributes,
                        plugin_security_properties,
                        plugin_security_attributes,
                        plugin_policymgr_ssl_properties,
                        plugin_policymgr_ssl_attributes,
                        component_list,
                        audit_db_is_enabled,
                        credential_file,
                        xa_audit_db_password,
                        ssl_truststore_password,
                        ssl_keystore_password,
                        api_version=None,
                        stack_version_override=None,
                        skip_if_rangeradmin_down=True,
                        is_security_enabled=False,
                        is_stack_supports_ranger_kerberos=False,
                        component_user_principal=None,
                        component_user_keytab=None,
                        cred_lib_path_override=None,
                        cred_setup_prefix_override=None):

    if audit_db_is_enabled and component_driver_curl_source is not None and not component_driver_curl_source.endswith(
            "/None"):
        if previous_jdbc_jar and os.path.isfile(previous_jdbc_jar):
            File(previous_jdbc_jar, action='delete')

        File(component_downloaded_custom_connector,
             content=DownloadSource(component_driver_curl_source),
             mode=0644)

        Execute(('cp', '--remove-destination',
                 component_downloaded_custom_connector,
                 component_driver_curl_target),
                path=["/bin", "/usr/bin/"],
                sudo=True)

        File(component_driver_curl_target, mode=0644)

    if policymgr_mgr_url.endswith('/'):
        policymgr_mgr_url = policymgr_mgr_url.rstrip('/')

    if stack_version_override is None:
        stack_version = get_stack_version(component_select_name)
    else:
        stack_version = stack_version_override

    component_conf_dir = conf_dict

    if plugin_enabled:

        service_name_exist = False
        policycache_path = os.path.join('/etc', 'ranger', repo_name,
                                        'policycache')
        try:
            for cache_service in cache_service_list:
                policycache_json_file = format(
                    '{policycache_path}/{cache_service}_{repo_name}.json')
                if os.path.isfile(policycache_json_file) and os.path.getsize(
                        policycache_json_file) > 0:
                    with open(policycache_json_file) as json_file:
                        json_data = json.load(json_file)
                        if 'serviceName' in json_data and json_data[
                                'serviceName'] == repo_name:
                            service_name_exist = True
                            Logger.info(
                                "Skipping Ranger API calls, as policy cache file exists for {0}"
                                .format(service_name))
                            Logger.warning(
                                "If service name for {0} is not created on Ranger Admin UI, then to re-create it delete policy cache file: {1}"
                                .format(service_name, policycache_json_file))
                            break
        except Exception, err:
            Logger.error(
                "Error occurred while fetching service name from policy cache file.\nError: {0}"
                .format(err))

        if not service_name_exist:
            if api_version is not None and api_version == 'v2':
                ranger_adm_obj = RangeradminV2(
                    url=policymgr_mgr_url,
                    skip_if_rangeradmin_down=skip_if_rangeradmin_down)
                ranger_adm_obj.create_ranger_repository(
                    service_name, repo_name, plugin_repo_dict,
                    ranger_env_properties['ranger_admin_username'],
                    ranger_env_properties['ranger_admin_password'],
                    ranger_env_properties['admin_username'],
                    ranger_env_properties['admin_password'], policy_user,
                    is_security_enabled, is_stack_supports_ranger_kerberos,
                    component_user, component_user_principal,
                    component_user_keytab)
            else:
                ranger_adm_obj = Rangeradmin(
                    url=policymgr_mgr_url,
                    skip_if_rangeradmin_down=skip_if_rangeradmin_down)
                ranger_adm_obj.create_ranger_repository(
                    service_name, repo_name, plugin_repo_dict,
                    ranger_env_properties['ranger_admin_username'],
                    ranger_env_properties['ranger_admin_password'],
                    ranger_env_properties['admin_username'],
                    ranger_env_properties['admin_password'], policy_user)

        current_datetime = datetime.now()

        File(
            format('{component_conf_dir}/ranger-security.xml'),
            owner=component_user,
            group=component_group,
            mode=0644,
            content=InlineTemplate(
                format(
                    '<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>'
                )))

        Directory([
            os.path.join('/etc', 'ranger', repo_name),
            os.path.join('/etc', 'ranger', repo_name, 'policycache')
        ],
                  owner=component_user,
                  group=component_group,
                  mode=0775,
                  create_parents=True,
                  cd_access='a')

        for cache_service in cache_service_list:
            File(os.path.join('/etc', 'ranger', repo_name, 'policycache',
                              format('{cache_service}_{repo_name}.json')),
                 owner=component_user,
                 group=component_group,
                 mode=0644)

        # remove plain-text password from xml configs
        plugin_audit_password_property = 'xasecure.audit.destination.db.password'
        plugin_audit_properties_copy = {}
        plugin_audit_properties_copy.update(plugin_audit_properties)

        if plugin_audit_password_property in plugin_audit_properties_copy:
            plugin_audit_properties_copy[
                plugin_audit_password_property] = "crypted"

        XmlConfig(format('ranger-{service_name}-audit.xml'),
                  conf_dir=component_conf_dir,
                  configurations=plugin_audit_properties_copy,
                  configuration_attributes=plugin_audit_attributes,
                  owner=component_user,
                  group=component_group,
                  mode=0744)

        XmlConfig(format('ranger-{service_name}-security.xml'),
                  conf_dir=component_conf_dir,
                  configurations=plugin_security_properties,
                  configuration_attributes=plugin_security_attributes,
                  owner=component_user,
                  group=component_group,
                  mode=0744)

        # remove plain-text password from xml configs
        plugin_password_properties = [
            'xasecure.policymgr.clientssl.keystore.password',
            'xasecure.policymgr.clientssl.truststore.password'
        ]
        plugin_policymgr_ssl_properties_copy = {}
        plugin_policymgr_ssl_properties_copy.update(
            plugin_policymgr_ssl_properties)

        for prop in plugin_password_properties:
            if prop in plugin_policymgr_ssl_properties_copy:
                plugin_policymgr_ssl_properties_copy[prop] = "crypted"

        if str(service_name).lower() == 'yarn':
            XmlConfig("ranger-policymgr-ssl-yarn.xml",
                      conf_dir=component_conf_dir,
                      configurations=plugin_policymgr_ssl_properties_copy,
                      configuration_attributes=plugin_policymgr_ssl_attributes,
                      owner=component_user,
                      group=component_group,
                      mode=0744)
        else:
            XmlConfig("ranger-policymgr-ssl.xml",
                      conf_dir=component_conf_dir,
                      configurations=plugin_policymgr_ssl_properties_copy,
                      configuration_attributes=plugin_policymgr_ssl_attributes,
                      owner=component_user,
                      group=component_group,
                      mode=0744)

        # creating symblink should be done by rpm package
        # setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list)

        setup_ranger_plugin_keystore(
            service_name, audit_db_is_enabled, stack_version, credential_file,
            xa_audit_db_password, ssl_truststore_password,
            ssl_keystore_password, component_user, component_group, java_home,
            cred_lib_path_override, cred_setup_prefix_override)