示例#1
0
  def format(self, format_string, *args, **kwargs):
    variables = kwargs
    
    if Environment.has_instance():
      env = Environment.get_instance()
      params = env.config.params
  
      # don't use checked_unite for this as it would interfere with reload(module)
      # for things like params and status_params; instead, start out copying
      # the environment parameters and add in any locally declared variables to
      # override existing env parameters
      all_params = params.copy()
    else:
      all_params = {}
      
    all_params.update(variables)

    self.convert_field = self.convert_field_protected
    result_protected = self.vformat(format_string, args, all_params)
    
    self.convert_field = self.convert_field_unprotected
    result_unprotected = self.vformat(format_string, args, all_params)
    
    if result_protected != result_unprotected:
      Logger.sensitive_strings[result_unprotected] = result_protected
      
    return result_unprotected
  def action_remove(self):
    with Environment.get_instance_copy() as env:
      repo_file_name = self.resource.repo_file_name
      repo_dir = get_repo_dir()

      File(format("{repo_dir}/{repo_file_name}.repo"),
           action="delete")
  def action_create(self):
    with Environment.get_instance_copy() as env:
      repo_file_name = self.resource.repo_file_name
      repo_dir = get_repo_dir()
      new_content = InlineTemplate(self.resource.repo_template, repo_id=self.resource.repo_id, repo_file_name=self.resource.repo_file_name,
                             base_url=self.resource.base_url, mirror_list=self.resource.mirror_list)
      repo_file_path = format("{repo_dir}/{repo_file_name}.repo")

      if os.path.isfile(repo_file_path):
        existing_content_str = sudo.read_file(repo_file_path)
        new_content_str = new_content.get_content()
        if existing_content_str != new_content_str and OSCheck.is_suse_family():
          # We need to reset package manager's cache when we replace base urls
          # at existing repo. That is a case at least under SLES
          Logger.info("Flushing package manager cache since repo file content is about to change")
          checked_call(self.update_cmd, sudo=True)
        if self.resource.append_to_file:
          content = existing_content_str + '\n' + new_content_str
        else:
          content = new_content_str
      else: # If repo file does not exist yet
        content = new_content

      File(repo_file_path,
           content=content
      )
示例#4
0
    def action_create(self):
        with Environment.get_instance_copy() as env:
            with tempfile.NamedTemporaryFile() as tmpf:
                repo_file_name = format("{repo_file_name}.list", repo_file_name=self.resource.repo_file_name)
                repo_file_path = format("{repo_dir}/{repo_file_name}", repo_dir=self.repo_dir)

                new_content = Template(
                    self.resource.repo_template,
                    package_type=self.package_type,
                    base_url=self.resource.base_url,
                    components=" ".join(self.resource.components),
                ).get_content()
                old_content = ""
                if self.resource.append_to_file and os.path.isfile(repo_file_path):
                    with open(repo_file_path) as repo_file:
                        old_content = repo_file.read() + "\n"

                File(tmpf.name, content=old_content + new_content)

                if not os.path.isfile(repo_file_path) or not filecmp.cmp(tmpf.name, repo_file_path):
                    File(repo_file_path, content=StaticFile(tmpf.name))

                    update_cmd_formatted = [format(x) for x in self.update_cmd]
                    # this is time expensive
                    retcode, out = checked_call(update_cmd_formatted, sudo=True)

                    # add public keys for new repos
                    missing_pkeys = set(re.findall(self.missing_pkey_regex, out))
                    for pkey in missing_pkeys:
                        Execute(
                            format(self.add_pkey_cmd),
                            timeout=15,  # in case we are on the host w/o internet (using localrepo), we should ignore hanging
                            ignore_failures=True,
                        )
  def action_create(self):
    filename = self.resource.filename
    xml_config_provider_config_dir = self.resource.conf_dir

    # |e - for html-like escaping of <,>,',"
    config_content = InlineTemplate('''  <configuration>
    {% for key, value in configurations_dict|dictsort %}
    <property>
      <name>{{ key|e }}</name>
      <value>{{ resource_management.core.source.InlineTemplate(str(value)).get_content() |e }}</value>
      {%- if not configuration_attrs is none -%}
      {%- for attrib_name, attrib_occurances in  configuration_attrs.items() -%}
      {%- for property_name, attrib_value in  attrib_occurances.items() -%}
      {% if property_name == key and attrib_name %}
      <{{attrib_name|e}}>{{attrib_value|e}}</{{attrib_name|e}}>
      {%- endif -%}
      {%- endfor -%}
      {%- endfor -%}
      {%- endif %}
    </property>
    {% endfor %}
  </configuration>''', extra_imports=[time, resource_management, resource_management.core, resource_management.core.source], configurations_dict=self.resource.configurations,
                                    configuration_attrs=self.resource.configuration_attributes)

    xml_config_dest_file_path = os.path.join(xml_config_provider_config_dir, filename)
    Logger.info("Generating config: {0}".format(xml_config_dest_file_path))

    with Environment.get_instance_copy() as env:
      File (xml_config_dest_file_path,
        content = config_content,
        owner = self.resource.owner,
        group = self.resource.group,
        mode = self.resource.mode,
        encoding = self.resource.encoding
      )
示例#6
0
def falcon(type, action = None):
  import params

  if action == 'config':
    env = Environment.get_instance()
    # These 2 parameters are used in ../templates/client.properties.j2
    env.config.params["falcon_host"] = params.falcon_host
    env.config.params["falcon_port"] = params.falcon_port
    File(os.path.join(params.falcon_conf_dir, 'falcon-env.sh'),
      content = InlineTemplate(params.falcon_env_sh_template))

    File(os.path.join(params.falcon_conf_dir, 'client.properties'),
      content = Template('client.properties.j2'))

    PropertiesFile(os.path.join(params.falcon_conf_dir, 'runtime.properties'),
      properties = params.falcon_runtime_properties)

    PropertiesFile(os.path.join(params.falcon_conf_dir, 'startup.properties'),
      properties = params.falcon_startup_properties)

  if type == 'server':
    ServiceConfig(params.falcon_win_service_name,
      action = "change_user",
      username = params.falcon_user,
      password = Script.get_password(params.falcon_user))

    if action == 'start':
      Service(params.falcon_win_service_name, action = "start")

    if action == 'stop':
      Service(params.falcon_win_service_name, action = "stop")
def get_check_command(oozie_url, host_name, configurations):
  if OOZIE_USER in configurations:
    oozie_user = configurations[OOZIE_USER]
  else:
    raise Exception("Oozie user is required")
    
  security_enabled = False
  if SECURITY_ENABLED in configurations:
    security_enabled = str(configurations[SECURITY_ENABLED]).upper() == 'TRUE'
  kerberos_env = None
  if security_enabled:
    if OOZIE_KEYTAB in configurations and OOZIE_PRINCIPAL in configurations:
      oozie_keytab = configurations[OOZIE_KEYTAB]
      oozie_principal = configurations[OOZIE_PRINCIPAL]

      # substitute _HOST in kerberos principal with actual fqdn
      oozie_principal = oozie_principal.replace('_HOST', host_name)
    else:
      raise KerberosPropertiesNotFound('The Oozie keytab and principal are required configurations when security is enabled.')

    # Create the kerberos credentials cache (ccache) file and set it in the environment to use
    # when executing curl
    env = Environment.get_instance()
    ccache_file = "{0}{1}oozie_alert_cc_{2}".format(env.tmp_dir, os.sep, os.getpid())
    kerberos_env = {'KRB5CCNAME': ccache_file}

    # Get the configured Kerberos executable search paths, if any
    if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
      kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
    else:
      kerberos_executable_search_paths = None

    klist_path_local = get_klist_path(kerberos_executable_search_paths)
    klist_command = format("{klist_path_local} -s {ccache_file}")

    # Determine if we need to kinit by testing to see if the relevant cache exists and has
    # non-expired tickets.  Tickets are marked to expire after 5 minutes to help reduce the number
    # it kinits we do but recover quickly when keytabs are regenerated
    return_code, _ = call(klist_command, user=oozie_user)
    if return_code != 0:
      kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
      kinit_command = format("{kinit_path_local} -l 5m -kt {oozie_keytab} {oozie_principal}; ")

      # kinit
      Execute(kinit_command, 
              environment=kerberos_env,
              user=oozie_user,
      )

  # oozie configuration directory uses a symlink when > HDP 2.2
  oozie_config_directory = OOZIE_CONF_DIR_LEGACY
  if os.path.exists(OOZIE_CONF_DIR):
    oozie_config_directory = OOZIE_CONF_DIR

  command = "source {0}/oozie-env.sh ; oozie admin -oozie {1} -status".format(
    oozie_config_directory, oozie_url)

  return (command, kerberos_env, oozie_user)
示例#8
0
    def action_remove(self):
        with Environment.get_instance_copy() as env:
            repo_file_name = format("{repo_file_name}.list", repo_file_name=self.resource.repo_file_name)
            repo_file_path = format("{repo_dir}/{repo_file_name}", repo_dir=self.repo_dir)

            if os.path.isfile(repo_file_path):
                File(repo_file_path, action="delete")

                # this is time expensive
                update_cmd_formatted = [format(x) for x in self.update_cmd]
                Execute(update_cmd_formatted)
示例#9
0
  def test_attribute_creates(self, popen_mock, exists_mock):
    exists_mock.return_value = True

    subproc_mock = MagicMock()
    subproc_mock.returncode = 0
    subproc_mock.stdout.readline = MagicMock(side_effect = ['OK'])
    subproc_mock.communicate.side_effect = [["1"]]
    popen_mock.return_value = subproc_mock

    with Environment("/") as env:
      Execute('echo "1"',
              creates="/must/be/created")

    exists_mock.assert_called_with("/must/be/created")
    self.assertEqual(subproc_mock.call_count, 0)
示例#10
0
    def test_attribute_group(self, getpwnam_mock, getgrnam_mock):
        def error(argument):
            self.assertEqual(argument, "test_group")
            raise KeyError("fail")

        getpwnam_mock.side_effect = 1
        getgrnam_mock.side_effect = error
        try:
            with Environment("/") as env:
                Execute(
                    'echo "1"',
                    group="test_group",
                )
        except Fail as e:
            pass
示例#11
0
 def test_attribute_path(self, popen_mock, select_mock, os_read_mock):
   subproc_mock = MagicMock()
   subproc_mock.wait.return_value = MagicMock()
   subproc_mock.stdout = MagicMock()
   subproc_mock.returncode = 0
   popen_mock.return_value = subproc_mock
   select_mock.return_value = ([subproc_mock.stdout], None, None)
   os_read_mock.return_value = None
   
   with Environment("/") as env:
     execute_resource = Execute('echo "1"',
                                path=["/test/one", "test/two"]
     )
   expected_command = ['/bin/bash', '--login', '--noprofile', '-c', 'echo "1"']
   self.assertEqual(popen_mock.call_args_list[0][0][0], expected_command)
示例#12
0
  def test_attribute_command_one_line(self, popen_mock, select_mock, os_read_mock):
    expected_command = "rm -rf /somedir"

    subproc_mock = MagicMock()
    subproc_mock.wait.return_value = MagicMock()
    subproc_mock.stdout = MagicMock()
    subproc_mock.returncode = 0
    popen_mock.return_value = subproc_mock
    select_mock.return_value = ([subproc_mock.stdout], None, None)
    os_read_mock.return_value = None

    with Environment("/") as env:
      Execute(expected_command)

    self.assertEqual(popen_mock.call_args_list[0][0][0][4], expected_command)
示例#13
0
    def test_create_repo_ubuntu_gpg_key_wrong_output(self, file_mock,
                                                     execute_mock,
                                                     tempfile_mock, call_mock):
        """
      Checks that GPG key is extracted from output without \r sign
      """
        tempfile_mock.return_value = MagicMock(spec=file)
        tempfile_mock.return_value.__enter__.return_value.name = "/tmp/1.txt"
        call_mock.return_value = 0, "The following signatures couldn't be verified because the public key is not available: NO_PUBKEY 123ABCD\r\n"

        with Environment('/') as env:
            with patch.object(repository,
                              "__file__",
                              new='/ambari/test/repo/dummy/path/file'):
                Repository('HDP',
                           base_url='http://download.base_url.org/rpm/',
                           repo_file_name='HDP',
                           repo_template=DEBIAN_DEFAUTL_TEMPLATE,
                           components=['a', 'b', 'c'])

        call_content = file_mock.call_args_list[0]
        template_name = call_content[0][0]
        template_content = call_content[1]['content']

        self.assertEquals(template_name, '/tmp/1.txt')
        self.assertEquals(template_content,
                          'deb http://download.base_url.org/rpm/ a b c')

        copy_item0 = str(file_mock.call_args_list[1])
        copy_item1 = str(file_mock.call_args_list[2])
        self.assertEqual(
            copy_item0,
            "call('/tmp/1.txt', content=StaticFile('/etc/apt/sources.list.d/HDP.list'))"
        )
        self.assertEqual(
            copy_item1,
            "call('/etc/apt/sources.list.d/HDP.list', content=StaticFile('/tmp/1.txt'))"
        )
        execute_command_item = execute_mock.call_args_list[0][0][0]

        self.assertEqual(call_mock.call_args_list[0][0][0], [
            'apt-get', 'update', '-qq', '-o',
            'Dir::Etc::sourcelist=sources.list.d/HDP.list', '-o',
            'Dir::Etc::sourceparts=-', '-o', 'APT::Get::List-Cleanup=0'
        ])
        self.assertEqual(execute_command_item,
                         ('apt-key', 'adv', '--recv-keys', '--keyserver',
                          'keyserver.ubuntu.com', '123ABCD'))
示例#14
0
    def test_recreate_repo_suse(self, filecmp_mock, read_file_mock,
                                checked_call_mock, file_mock, is_redhat_family,
                                is_ubuntu_family, is_suse_family):
        filecmp_mock.return_value = False
        is_redhat_family.return_value = False
        is_ubuntu_family.return_value = False
        is_suse_family.return_value = True
        read_file_mock.return_value = "Dummy repo file contents"
        checked_call_mock.return_value = 0, "Flushing zypper cache"
        with Environment('/') as env:
            with patch.object(repository,
                              "__file__",
                              new='/ambari/test/repo/dummy/path/file'):
                # Check that zypper cache is flushed
                Repository(
                    'hadoop',
                    base_url='http://download.base_url.org/rpm/',
                    mirror_list=
                    'https://mirrors.base_url.org/?repo=Repository&arch=$basearch',
                    repo_template=RHEL_SUSE_DEFAULT_TEMPLATE,
                    repo_file_name='Repository')

                Repository(None, action="create")

                self.assertTrue(checked_call_mock.called)

                expected_repo_file_content = "[hadoop]\nname=hadoop\nmirrorlist=https://mirrors.base_url.org/?repo=Repository&arch=$basearch\n\npath=/\nenabled=1\ngpgcheck=0"
                template = file_mock.call_args_list[0][1]['content']
                self.assertEqual(expected_repo_file_content, template)

                # Check that if content is equal, zypper cache is not flushed
                checked_call_mock.reset_mock()
                filecmp_mock.return_value = True

                Repository(
                    'hadoop',
                    base_url='http://download.base_url.org/rpm/',
                    mirror_list=
                    'https://mirrors.base_url.org/?repo=Repository&arch=$basearch',
                    repo_template=RHEL_SUSE_DEFAULT_TEMPLATE,
                    repo_file_name='Repository')
                Repository(None, action="create")

                self.assertFalse(checked_call_mock.called)

                expected_repo_file_content = "[hadoop]\nname=hadoop\nmirrorlist=https://mirrors.base_url.org/?repo=Repository&arch=$basearch\n\npath=/\nenabled=1\ngpgcheck=0"
                template = file_mock.call_args_list[0][1]['content']
                self.assertEqual(expected_repo_file_content, template)
示例#15
0
 def action_create(self):
     with Environment.get_instance_copy() as env:
         repo_file_name = self.resource.repo_file_name
         repo_dir = get_repo_dir()
         repo_template = self.resource.repo_template
         new_content = Template(repo_template,
                                repo_id=self.resource.repo_id,
                                repo_file_name=self.resource.repo_file_name,
                                base_url=self.resource.base_url,
                                mirror_list=self.resource.mirror_list)
         repo_file_path = format("{repo_dir}/{repo_file_name}.repo")
         if self.resource.append_to_file and os.path.isfile(repo_file_path):
             with open(repo_file_path, 'a') as repo_file:
                 repo_file.write('\n' + new_content.get_content())
         else:
             File(repo_file_path, content=new_content)
示例#16
0
文件: format.py 项目: indoos/ambari
    def format(self, format_string, *args, **kwargs):
        env = Environment.get_instance()
        variables = kwargs
        params = env.config.params
        all_params = checked_unite(variables, params)

        self.convert_field = self.convert_field_protected
        result_protected = self.vformat(format_string, args, all_params)

        self.convert_field = self.convert_field_unprotected
        result_unprotected = self.vformat(format_string, args, all_params)

        if result_protected != result_unprotected:
            Logger.sensitive_strings[result_unprotected] = result_protected

        return result_unprotected
示例#17
0
  def __get_delegation_token(self, user, keytab, principal, kinit_path):
    """
    Gets the kerberos delegation token from name node
    """
    import params
    url = params.namenode_path + "/webhdfs/v1/?op=GETDELEGATIONTOKEN"
    Logger.info("Getting delegation token from {0}".format(url))
    response, _, _  = curl_krb_request(Environment.get_instance().tmp_dir, keytab, principal,
        url, "get_delegation_token", kinit_path, False, "Delegation Token", user)
    json_response = json.loads(response)
    if json_response['Token'] and json_response['Token']['urlString']:
      return json_response['Token']['urlString']

    error_msg = "Get Token: Unable to get kerberos delegation token from webhdfs: \nurl = {0}, user = {1}, keytab = {2}, principal = {3}, kinit-path = {4} \nresponse = {5}".format(url, user, keytab, principal, kinit_path, json_response)
    Logger.error(error_msg)
    self.checks_failed += 1
示例#18
0
文件: format.py 项目: jghoman/slider
    def format(self, format_string, *args, **kwargs):
        env = Environment.get_instance()
        variables = kwargs
        params = env.config.params
        all_params = checked_unite(variables, params)

        self.convert_field = self.convert_field_protected
        result_protected = self.vformat(format_string, args, all_params)

        self.convert_field = self.convert_field_unprotected
        result_unprotected = self.vformat(format_string, args, all_params)

        if result_protected != result_unprotected:
            Logger.sensitive_strings[result_unprotected] = result_protected

        return result_unprotected
示例#19
0
    def test_attribute_logoutput(self, popen_mock, info_mock, select_mock,
                                 os_read_mock):
        subproc_mock = MagicMock()
        subproc_mock.wait.return_value = MagicMock()
        subproc_mock.stdout = MagicMock()
        subproc_mock.returncode = 0
        popen_mock.return_value = subproc_mock
        select_mock.return_value = ([subproc_mock.stdout], None, None)
        os_read_mock.return_value = None

        with Environment("/") as env:
            Execute('echo "1"', logoutput=True)
            Execute('echo "2"', logoutput=False)

        info_mock.assert_called('1')
        self.assertTrue("call('2')" not in str(info_mock.mock_calls))
示例#20
0
    def test_attribute_path(self, popen_mock):
        subproc_mock = MagicMock()
        subproc_mock.returncode = 0
        subproc_mock.communicate.side_effect = [["1"]]
        popen_mock.return_value = subproc_mock

        with Environment("/") as env:
            execute_resource = Execute('echo "1"',
                                       path=["/test/one", "test/two"])

        if IS_WINDOWS:
            self.assertEqual(execute_resource.environment["PATH"],
                             '/test/one;test/two')
        else:
            self.assertEqual(execute_resource.environment["PATH"],
                             '/test/one:test/two')
示例#21
0
    def test_add_block_to_filestream(self, file_contents, data, start_sentinel,
                                     end_sentinel, expected_result):
        new_file_contents = []

        mocked_file = self.__mock_file(file_contents)
        mocked_file.return_value.write = Mock(
            side_effect=lambda s: new_file_contents.append(s)  #pylint:disable=unnecessary-lambda
        )

        with self.__with_mocked_file(mocked_file), Environment('/'):
            utilities.add_block_to_file('/test', data, None, start_sentinel,
                                        end_sentinel)

        new_file_contents = "".join(new_file_contents)

        self.assertEqual(new_file_contents, expected_result)
示例#22
0
文件: script.py 项目: indoos/ambari
 def execute(self):
     """
 Sets up logging;
 Parses command parameters and executes method relevant to command type
 """
     # set up logging (two separate loggers for stderr and stdout with different loglevels)
     logger = logging.getLogger('resource_management')
     logger.setLevel(logging.DEBUG)
     formatter = logging.Formatter('%(asctime)s - %(message)s')
     chout = logging.StreamHandler(sys.stdout)
     chout.setLevel(logging.INFO)
     chout.setFormatter(formatter)
     cherr = logging.StreamHandler(sys.stderr)
     cherr.setLevel(logging.ERROR)
     cherr.setFormatter(formatter)
     logger.addHandler(cherr)
     logger.addHandler(chout)
     # parse arguments
     if len(sys.argv) < 5:
         logger.error("Script expects at least 4 arguments")
         sys.exit(1)
     command_name = str.lower(sys.argv[1])
     # parse command parameters
     command_data_file = sys.argv[2]
     basedir = sys.argv[3]
     self.stroutfile = sys.argv[4]
     try:
         with open(command_data_file, "r") as f:
             pass
             Script.config = ConfigDictionary(json.load(f))
     except IOError:
         logger.exception(
             "Can not read json file with command parameters: ")
         sys.exit(1)
     # Run class method depending on a command type
     try:
         method = self.choose_method_to_execute(command_name)
         with Environment(basedir) as env:
             method(env)
     except ClientComponentHasNoStatus or ComponentIsNotRunning:
         # Support of component status checks.
         # Non-zero exit code is interpreted as an INSTALLED status of a component
         sys.exit(1)
     except Fail:
         logger.exception(
             "Error while executing command '{0}':".format(command_name))
         sys.exit(1)
示例#23
0
  def test_attribute_cwd(self, popen_mock, select_mock, os_read_mock):
    expected_cwd = "/test/work/directory"

    subproc_mock = MagicMock()
    subproc_mock.wait.return_value = MagicMock()
    subproc_mock.stdout = MagicMock()
    subproc_mock.returncode = 0
    popen_mock.return_value = subproc_mock
    select_mock.return_value = ([subproc_mock.stdout], None, None)
    os_read_mock.return_value = None

    with Environment("/") as env:
      Execute('echo "1"',
              cwd=expected_cwd
      )

    self.assertEqual(popen_mock.call_args_list[0][1]["cwd"], expected_cwd)
示例#24
0
    def test_attribute_environment(self, popen_mock, select_mock,
                                   os_read_mock):
        expected_dict = {"JAVA_HOME": "/test/java/home"}

        subproc_mock = MagicMock()
        subproc_mock.wait.return_value = MagicMock()
        subproc_mock.stdout = MagicMock()
        subproc_mock.returncode = 0
        popen_mock.return_value = subproc_mock
        select_mock.return_value = ([subproc_mock.stdout], None, None)
        os_read_mock.return_value = None

        with Environment("/") as env:
            Execute('echo "1"', environment=expected_dict)

        self.assertEqual(popen_mock.call_args_list[0][1]["env"], expected_dict)
        pass
示例#25
0
  def action_delayed(self, action_name, main_resource):
    resource = {}
    env = Environment.get_instance()
    if not 'hdfs_files' in env.config:
      env.config['hdfs_files'] = []

    # Put values in dictionary-resource
    for field_name, json_field_name in RESOURCE_TO_JSON_FIELDS.iteritems():
      if field_name == 'action':
        resource[json_field_name] = action_name
      elif field_name == 'mode' and main_resource.resource.mode:
        resource[json_field_name] = oct(main_resource.resource.mode)[1:]
      elif getattr(main_resource.resource, field_name):
        resource[json_field_name] = getattr(main_resource.resource, field_name)

    # Add resource to create
    env.config['hdfs_files'].append(resource)
示例#26
0
    def action_create(self):
        template_tag = self.resource.template_tag
        qualified_file_name = self.resource.name
        file_name = os.path.basename(qualified_file_name)

        if not template_tag:
            template_name = format("{file_name}.j2")
        else:
            template_name = format("{file_name}-{template_tag}.j2")

        with Environment.get_instance_copy() as env:
            File(qualified_file_name,
                 owner=self.resource.owner,
                 group=self.resource.group,
                 mode=self.resource.mode,
                 content=Template(template_name,
                                  extra_imports=self.resource.extra_imports))
  def action_create(self):
    template_tag = self.resource.template_tag
    qualified_file_name = self.resource.name
    file_name = os.path.basename(qualified_file_name)

    if not template_tag:
      template_name = format("{file_name}.j2")
    else:
      template_name = format("{file_name}-{template_tag}.j2")

    with Environment.get_instance_copy() as env:
      File( qualified_file_name,
       owner   = self.resource.owner,
       group   = self.resource.group,
       mode    = self.resource.mode,
       content = Template(template_name, extra_imports=self.resource.extra_imports)
      )
示例#28
0
    def test_create_repo_redhat(self, file_mock, is_redhat_family,
                                is_ubuntu_family, is_suse_family):
        is_redhat_family.return_value = True
        is_ubuntu_family.return_value = False
        is_suse_family.return_value = False
        with Environment('/') as env:
            with patch.object(repository,
                              "__file__",
                              new='/ambari/test/repo/dummy/path/file'):
                Repository(
                    'hadoop',
                    base_url='http://download.base_url.org/rpm/',
                    mirror_list=
                    'https://mirrors.base_url.org/?repo=Repository&arch=$basearch',
                    repo_file_name='Repository',
                    repo_template=RHEL_SUSE_DEFAULT_TEMPLATE)

                self.assertTrue('hadoop' in env.resources['Repository'])
                defined_arguments = env.resources['Repository'][
                    'hadoop'].arguments
                expected_arguments = {
                    'repo_template': RHEL_SUSE_DEFAULT_TEMPLATE,
                    'base_url': 'http://download.base_url.org/rpm/',
                    'mirror_list':
                    'https://mirrors.base_url.org/?repo=Repository&arch=$basearch',
                    'repo_file_name': 'Repository'
                }
                expected_template_arguments = {
                    'base_url': 'http://download.base_url.org/rpm/',
                    'mirror_list':
                    'https://mirrors.base_url.org/?repo=Repository&arch=$basearch',
                    'repo_file_name': 'Repository'
                }

                self.assertEqual(defined_arguments, expected_arguments)
                self.assertEqual(file_mock.call_args[0][0],
                                 '/etc/yum.repos.d/Repository.repo')

                template_item = file_mock.call_args[1]['content']
                template = str(template_item.name)
                expected_template_arguments.update({'repo_id': 'hadoop'})

                self.assertEqual(expected_template_arguments,
                                 template_item.context._dict)
                self.assertEqual(RHEL_SUSE_DEFAULT_TEMPLATE, template)
示例#29
0
    def action_execute(self, main_resource, sudo=False):
        env = Environment.get_instance()
        env_dict_key = 'hdfs_files_sudo' if sudo else 'hdfs_files'

        if not env_dict_key in env.config or not env.config[env_dict_key]:
            return

        # Check required parameters
        if not sudo:
            main_resource.assert_parameter_is_set('user')
            user = main_resource.resource.user
        else:
            user = None

        hadoop_bin_dir = main_resource.resource.hadoop_bin_dir
        hadoop_conf_dir = main_resource.resource.hadoop_conf_dir
        security_enabled = main_resource.resource.security_enabled
        keytab_file = main_resource.resource.keytab
        kinit_path = main_resource.resource.kinit_path_local
        logoutput = main_resource.resource.logoutput
        principal_name = main_resource.resource.principal_name
        jar_path = JAR_PATH
        timestamp = time.time()
        json_path = format(JSON_PATH)

        if security_enabled:
            main_resource.kinit()

        # Write json file to disk
        File(json_path,
             owner=user,
             content=json.dumps(env.config[env_dict_key]))

        # Execute jar to create/delete resources in hadoop
        Execute(
            ('hadoop', '--config', hadoop_conf_dir, 'jar', jar_path,
             json_path),
            user=user,
            path=[hadoop_bin_dir],
            logoutput=logoutput,
            sudo=sudo,
        )

        # Clean
        env.config[env_dict_key] = []
示例#30
0
    def action_execute(self, main_resource):
        env = Environment.get_instance()

        # Check required parameters
        if main_resource.has_core_configs:
            main_resource.assert_parameter_is_set('user')

        if not 'hdfs_files' in env.config or not env.config['hdfs_files']:
            Logger.info(
                "No resources to create. 'create_on_execute' or 'delete_on_execute' or 'download_on_execute' wasn't triggered before this 'execute' action."
            )
            return

        hadoop_bin_dir = main_resource.resource.hadoop_bin_dir
        hadoop_conf_dir = main_resource.resource.hadoop_conf_dir
        user = main_resource.resource.user if main_resource.has_core_configs else None
        security_enabled = main_resource.resource.security_enabled
        keytab_file = main_resource.resource.keytab
        kinit_path = main_resource.resource.kinit_path_local
        logoutput = main_resource.resource.logoutput
        principal_name = main_resource.resource.principal_name
        jar_path = JAR_PATH
        timestamp = time.time()
        json_path = format(JSON_PATH)

        if security_enabled:
            main_resource.kinit()

        # Write json file to disk
        File(json_path,
             owner=user,
             content=json.dumps(env.config['hdfs_files']))

        # Execute jar to create/delete resources in hadoop
        Execute(
            format(
                "hadoop --config {hadoop_conf_dir} jar {jar_path} {json_path}"
            ),
            user=user,
            path=[hadoop_bin_dir],
            logoutput=logoutput,
        )

        # Clean
        env.config['hdfs_files'] = []
示例#31
0
文件: base.py 项目: screeley44/ambari
    def __new__(cls, name, env=None, provider=None, **kwargs):
        if isinstance(name, list):
            while len(name) != 1:
                cls(name.pop(0), env, provider, **kwargs)

            name = name[0]

        env = env or Environment.get_instance()
        provider = provider or getattr(cls, 'provider', None)

        r_type = cls.__name__
        if r_type not in env.resources:
            env.resources[r_type] = {}

        obj = super(Resource, cls).__new__(cls)
        env.resources[r_type][name] = obj
        env.resource_list.append(obj)
        return obj
示例#32
0
    def action_create(self):
        with Environment.get_instance_copy() as env:
            repo_file_name = self.resource.repo_file_name
            repo_dir = get_repo_dir()
            new_content = InlineTemplate(
                self.resource.repo_template,
                repo_id=self.resource.repo_id,
                repo_file_name=self.resource.repo_file_name,
                base_url=self.resource.base_url,
                mirror_list=self.resource.mirror_list)
            repo_file_path = format("{repo_dir}/{repo_file_name}.repo")
            if self.resource.append_to_file and os.path.isfile(repo_file_path):
                content = sudo.read_file(
                    repo_file_path) + '\n' + new_content.get_content()
            else:
                content = new_content

            File(repo_file_path, content=content)
示例#33
0
def _get_delegation_token(namenode_address, user, keytab, principal,
                          kinit_path):
    """
  Gets the kerberos delegation token from name node
  """
    url = namenode_address + "/webhdfs/v1/?op=GETDELEGATIONTOKEN"
    logger.info("Getting delegation token from {0} for PXF".format(url))
    response, _, _ = curl_krb_request(Environment.get_instance().tmp_dir,
                                      keytab, principal, url,
                                      "get_delegation_token", kinit_path,
                                      False, "Delegation Token", user)
    json_response = json.loads(response)
    if json_response['Token'] and json_response['Token']['urlString']:
        return json_response['Token']['urlString']

    msg = "Unable to get delegation token for PXF"
    logger.error(msg)
    raise Exception(msg)
示例#34
0
文件: base.py 项目: duxia/ambari
  def __new__(cls, name, env=None, provider=None, **kwargs):
    if isinstance(name, list):
      while len(name) != 1:
        cls(name.pop(0), env, provider, **kwargs)
        
      name = name[0]
    
    env = env or Environment.get_instance()
    provider = provider or getattr(cls, 'provider', None)
    
    r_type = cls.__name__
    if r_type not in env.resources:
      env.resources[r_type] = {}

    obj = super(Resource, cls).__new__(cls)
    env.resources[r_type][name] = obj
    env.resource_list.append(obj)
    return obj
示例#35
0
    def test_set_kernel_parameter(self, execute_mock):
        sysctl_contents = ""
        mocked_file = self.__mock_file(sysctl_contents)

        with self.__with_mocked_file(mocked_file), Environment('/'):
            utilities.set_kernel_parameter('pname', 'pvalue')

        self.assertTrue(execute_mock.called)

        execute_mock_called_command = execute_mock.call_args[0][0].command
        self.assertTrue(
            'sysctl -w pname="pvalue"' in execute_mock_called_command,
            "Invalid sysctl command called. %s" %
            pformat(execute_mock_called_command))

        handle = mocked_file()

        handle.write.assert_called_once_with("pname = pvalue\n")
示例#36
0
 def action_create(self):
     with Environment.get_instance_copy() as env:
         repo_file_name = self.resource.repo_file_name
         repo_dir = repos_dirs[env.system.os_family]
         repo_template = self.resource.repo_template
         new_content = Template(
             repo_template,
             repo_id=self.resource.repo_id,
             repo_file_name=self.resource.repo_file_name,
             base_url=self.resource.base_url,
             mirror_list=self.resource.mirror_list,
         )
         repo_file_path = format("{repo_dir}/{repo_file_name}.repo")
         if self.resource.append_to_file and os.path.isfile(repo_file_path):
             with open(repo_file_path, "a") as repo_file:
                 repo_file.write("\n" + new_content.get_content())
         else:
             File(repo_file_path, content=new_content)
示例#37
0
    def action_create(self):
        with Environment.get_instance_copy() as env:
            with tempfile.NamedTemporaryFile() as tmpf:
                repo_file_name = format(
                    "{repo_file_name}.list",
                    repo_file_name=self.resource.repo_file_name)
                repo_file_path = format("{repo_dir}/{repo_file_name}",
                                        repo_dir=self.repo_dir)
                repo_template = os.path.join(
                    os.path.dirname(os.path.realpath(__file__)), '..',
                    REPO_TEMPLATE_FOLDER, self.resource.repo_template)

                new_content = Template(
                    repo_template,
                    package_type=self.package_type,
                    base_url=self.resource.base_url,
                    components=' '.join(
                        self.resource.components)).get_content()
                old_content = ''
                if self.resource.append_to_file and os.path.isfile(
                        repo_file_path):
                    with open(repo_file_path) as repo_file:
                        old_content = repo_file.read() + '\n'

                File(tmpf.name, content=old_content + new_content)

                if not os.path.isfile(repo_file_path) or not filecmp.cmp(
                        tmpf.name, repo_file_path):
                    File(repo_file_path, content=StaticFile(tmpf.name))

                    update_cmd_formatted = [format(x) for x in self.update_cmd]
                    # this is time expensive
                    retcode, out = checked_call(update_cmd_formatted,
                                                sudo=True)

                    # add public keys for new repos
                    missing_pkeys = set(
                        re.findall(self.missing_pkey_regex, out))
                    for pkey in missing_pkeys:
                        Execute(
                            format(self.add_pkey_cmd),
                            timeout=
                            15,  # in case we are on the host w/o internet (using localrepo), we should ignore hanging
                            ignore_failures=True)
示例#38
0
  def call_curl_request(self,user,keytab,principal, url, flag_http_response, request_method='GET',request_body='',header=''):
    """
    :param user: service user for which call is to be made
    :param keytab: keytab of service user
    :param principal: principal of service user
    :param url: url with which call is to be made
    :param flag_http_response: flag to get only response-code or response string
    :param request_method: http method (GET / POST / PUT / DELETE)
    :param request_body: data to be send along with the request
    :param header: http header required for the call
    :return: Returns the response error_msg , time_millis
    """
    response = None
    error_msg = None
    time_millis = 0
    response, error_msg, time_millis = curl_krb_request(Environment.get_instance().tmp_dir, keytab, principal, url, 'ranger_admin_calls',
                                                         None, flag_http_response, "Ranger-Admin API calls", user,kinit_timer_ms=0,method = request_method,body=request_body,header=header)

    return response, error_msg, time_millis
示例#39
0
    def test_remove_repo_redhat(self, file_mock,
                              is_redhat_family, is_ubuntu_family, is_suse_family):
        is_redhat_family.return_value = True
        is_ubuntu_family.return_value = False
        is_suse_family.return_value = False
        with Environment('/') as env:
            Repository('hadoop',
                       action='remove',
                       base_url='http://download.base_url.org/rpm/',
                       mirror_list='https://mirrors.base_url.org/?repo=Repository&arch=$basearch',
                       repo_file_name='Repository')

            self.assertTrue('hadoop' in env.resources['Repository'])
            defined_arguments = env.resources['Repository']['hadoop'].arguments
            expected_arguments = {'action': ['remove'],
                                  'base_url': 'http://download.base_url.org/rpm/',
                                  'mirror_list': 'https://mirrors.base_url.org/?repo=Repository&arch=$basearch',
                                  'repo_file_name': 'Repository'}
            self.assertEqual(defined_arguments, expected_arguments)
示例#40
0
  def call_curl_request(self,user,keytab,principal, url, flag_http_response, request_method='GET',request_body='',header=''):
    """
    :param user: service user for which call is to be made
    :param keytab: keytab of service user
    :param principal: principal of service user
    :param url: url with which call is to be made
    :param flag_http_response: flag to get only response-code or response string
    :param request_method: http method (GET / POST / PUT / DELETE)
    :param request_body: data to be send along with the request
    :param header: http header required for the call
    :return: Returns the response error_msg , time_millis
    """
    response = None
    error_msg = None
    time_millis = 0
    response, error_msg, time_millis = curl_krb_request(Environment.get_instance().tmp_dir, keytab, principal, url, 'ranger_admin_calls',
                                                         None, flag_http_response, "Ranger-Admin API calls", user,kinit_timer_ms=0,method = request_method,body=request_body,header=header)

    return response, error_msg, time_millis
示例#41
0
def get_check_command(oozie_url, host_name, configurations):
  security_enabled = False
  if SECURITY_ENABLED in configurations:
    security_enabled = str(configurations[SECURITY_ENABLED]).upper() == 'TRUE'
  kerberos_env = None
  if security_enabled:
    if OOZIE_KEYTAB in configurations and OOZIE_PRINCIPAL in configurations:
      oozie_keytab = configurations[OOZIE_KEYTAB]
      oozie_principal = configurations[OOZIE_PRINCIPAL]

      # substitute _HOST in kerberos principal with actual fqdn
      oozie_principal = oozie_principal.replace('_HOST', host_name)
    else:
      raise KerberosPropertiesNotFound('The Oozie keytab and principal are required configurations when security is enabled.')

    # Create the kerberos credentials cache (ccache) file and set it in the environment to use
    # when executing curl
    env = Environment.get_instance()
    ccache_file = "{0}{1}oozie_alert_cc_{2}".format(env.tmp_dir, os.sep, os.getpid())
    kerberos_env = {'KRB5CCNAME': ccache_file}

    # Get the configured Kerberos executable search paths, if any
    if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
      kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
    else:
      kerberos_executable_search_paths = None

    klist_path_local = get_klist_path(kerberos_executable_search_paths)
    klist_command = format("{klist_path_local} -s {ccache_file}")

    # Determine if we need to kinit by testing to see if the relevant cache exists and has
    # non-expired tickets.  Tickets are marked to expire after 5 minutes to help reduce the number
    # it kinits we do but recover quickly when keytabs are regenerated
    return_code, _ = call(klist_command)
    if return_code != 0:
      kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
      kinit_command = format("{kinit_path_local} -l 5m -kt {oozie_keytab} {oozie_principal}; ")

      # kinit
      Execute(kinit_command, environment=kerberos_env)
  command = format("source /etc/oozie/conf/oozie-env.sh ; oozie admin -oozie {oozie_url} -status")
  return (command, kerberos_env)
示例#42
0
    def action_create(self):
        filename = self.resource.filename
        xml_config_provider_config_dir = self.resource.conf_dir

        # |e - for html-like escaping of <,>,',"
        config_content = InlineTemplate(
            '''<!--{{time.asctime(time.localtime())}}-->
    <configuration>
    {% for key, value in configurations_dict|dictsort %}
    <property>
      <name>{{ key|e }}</name>
      <value>{{ resource_management.core.source.InlineTemplate(str(value)).get_content().strip() |e }}</value>
      {%- if not configuration_attrs is none -%}
      {%- for attrib_name, attrib_occurances in  configuration_attrs.items() -%}
      {%- for property_name, attrib_value in  attrib_occurances.items() -%}
      {% if property_name == key and attrib_name %}
      <{{attrib_name|e}}>{{attrib_value|e}}</{{attrib_name|e}}>
      {%- endif -%}
      {%- endfor -%}
      {%- endfor -%}
      {%- endif %}
    </property>
    {% endfor %}
  </configuration>''',
            extra_imports=[
                time, resource_management, resource_management.core,
                resource_management.core.source
            ],
            configurations_dict=self.resource.configurations,
            configuration_attrs=self.resource.configuration_attributes)

        xml_config_dest_file_path = os.path.join(
            xml_config_provider_config_dir, filename)
        Logger.info("Generating config: {0}".format(xml_config_dest_file_path))

        with Environment.get_instance_copy() as env:
            File(xml_config_dest_file_path,
                 content=config_content,
                 owner=self.resource.owner,
                 group=self.resource.group,
                 mode=self.resource.mode,
                 encoding=self.resource.encoding)
  def action_create(self):
    filename = self.resource.filename
    comment_symbols = self.resource.comment_symbols
    delimiter = self.resource.key_value_delimiter
    properties = self.resource.properties
    unsaved_values = properties.keys()
    new_content_lines = []
    
    if sudo.path_isfile(filename):
      file_content = sudo.read_file(filename, encoding=self.resource.encoding)
      new_content_lines += file_content.split('\n')

      Logger.info(format("Modifying existing properties file: {filename}"))
      
      for line_num in range(len(new_content_lines)):
        line = new_content_lines[line_num]
        
        if line.lstrip() and not line.lstrip()[0] in comment_symbols and delimiter in line:
          in_var_name = line.split(delimiter)[0].strip()
          in_var_value = line.split(delimiter)[1].strip()
          
          if in_var_name in properties:
            value = InlineTemplate(unicode(properties[in_var_name])).get_content()
            new_content_lines[line_num] = u"{0}{1}{2}".format(unicode(in_var_name), delimiter, value)
            unsaved_values.remove(in_var_name)
    else:
      Logger.info(format("Creating new properties file as {filename} doesn't exist"))
       
    for property_name in unsaved_values:
      value = InlineTemplate(unicode(properties[property_name])).get_content()
      line = u"{0}{1}{2}".format(unicode(property_name), delimiter, value)
      new_content_lines.append(line)
          
    with Environment.get_instance_copy() as env:
      File (filename,
            content = u"\n".join(new_content_lines) + "\n",
            owner = self.resource.owner,
            group = self.resource.group,
            mode = self.resource.mode,
            encoding = self.resource.encoding,
      )
示例#44
0
 def action_create(self):
   with Environment.get_instance_copy() as env:
     with tempfile.NamedTemporaryFile() as tmpf:
       with tempfile.NamedTemporaryFile() as old_repo_tmpf:
         repo_file_name = format("{repo_file_name}.list",repo_file_name=self.resource.repo_file_name)
         repo_file_path = format("{repo_dir}/{repo_file_name}", repo_dir=self.repo_dir)
 
         new_content = InlineTemplate(self.resource.repo_template, package_type=self.package_type,
                                       base_url=self.resource.base_url,
                                       components=' '.join(self.resource.components)).get_content()
         old_content = ''
         if self.resource.append_to_file and os.path.isfile(repo_file_path):
             old_content = sudo.read_file(repo_file_path) + '\n'
 
         File(tmpf.name, 
              content=old_content+new_content
         )
         
         if os.path.isfile(repo_file_path):
           # a copy of old repo file, which will be readable by current user
           File(old_repo_tmpf.name, 
                content=StaticFile(repo_file_path),
           )
 
         if not os.path.isfile(repo_file_path) or not filecmp.cmp(tmpf.name, old_repo_tmpf.name):
           File(repo_file_path,
                content = StaticFile(tmpf.name)
           )
           
           update_cmd_formatted = [format(x) for x in self.update_cmd]
           # this is time expensive
           retcode, out = checked_call(update_cmd_formatted, sudo=True, quiet=False)
           
           # add public keys for new repos
           missing_pkeys = set(re.findall(self.missing_pkey_regex, out))
           for pkey in missing_pkeys:
             Execute(self.app_pkey_cmd_prefix + (pkey,),
                     timeout = 15, # in case we are on the host w/o internet (using localrepo), we should ignore hanging
                     ignore_failures = True,
                     sudo = True,
             )
示例#45
0
  def configure(self, env):
    import params
    env.set_params(params)

    if params.monitor_security_enabled and self.component == 'monitor':
      import os
      import random
      import string

      basedir = Environment.get_instance().config.basedir
      keystore_file = os.path.join(basedir, "files", "keystore.jks")
      truststore_file = os.path.join(basedir, "files", "cacerts.jks")
      cert_file = os.path.join(basedir, "files", "server.cer")

      if os.path.exists(keystore_file) or os.path.exists(truststore_file) or os.path.exists(cert_file):
        self.fail_with_error("trying to create monitor certs but they already existed")

      goodchars = string.lowercase + string.uppercase + string.digits + '#%+,-./:=?@^_'
      keypass = ''.join(random.choice(goodchars) for x in range(20))
      storepass = ''.join(random.choice(goodchars) for x in range(20))

      https_params = {}
      https_params[params.keystore_property] = params.keystore_path
      https_params[params.truststore_property] = params.truststore_path
      https_params[params.keystore_password_property] = keypass
      https_params[params.truststore_password_property] = storepass

      setup_conf_dir(name=self.component, extra_params=https_params)

      Execute( format("{java64_home}/bin/keytool -genkey -alias \"default\" -keyalg RSA -keypass {keypass} -storepass {storepass} -keystore {keystore_file} -dname \"CN=Unknown, OU=Unknown, O=Unknown, L=Unknown, ST=Unknown, C=Unknown\""),
               user=params.accumulo_user)
      Execute( format("{java64_home}/bin/keytool -export -alias \"default\" -storepass {storepass} -file {cert_file} -keystore {keystore_file}"),
               user=params.accumulo_user)
      Execute( format("echo \"yes\" | {java64_home}/bin/keytool -import -v -trustcacerts -alias \"default\" -file {cert_file} -keystore {truststore_file} -keypass {keypass} -storepass {storepass}"),
               user=params.accumulo_user)

      accumulo_StaticFile("keystore.jks")
      accumulo_StaticFile("cacerts.jks")

    else:
      setup_conf_dir(name=self.component)
示例#46
0
  def _collect(self):
    cmd_module = self._load_source()

    if cmd_module is not None:
      configurations = {}

      try:
        tokens = cmd_module.get_tokens()
        if tokens is not None:
          # for each token, if there is a value, store in; otherwise don't store
          # a key with a value of None
          for token in tokens:
            value = self._get_configuration_value(token)
            if value is not None:
              configurations[token] = value
      except AttributeError:
        # it's OK if the module doesn't have get_tokens() ; no tokens will
        # be passed in so hopefully the script doesn't need any
        logger.debug("The script {0} does not have a get_tokens() function".format(str(cmd_module)))

      # try to get basedir for scripts
      # it's needed for server side scripts to properly use resource management
      matchObj = re.match( r'((.*)services(.*)package)', self.path_to_script)
      if matchObj:
        basedir = matchObj.group(1)
        with Environment(basedir, tmp_dir=AGENT_TMP_DIR, logger=logging.getLogger('ambari_alerts')) as env:
          result = cmd_module.execute(configurations, self.parameters, self.host_name)
      else:
        result = cmd_module.execute(configurations, self.parameters, self.host_name)

      loggerMsg = "[Alert][{0}] Failed with result {2}: {3}".format(
        self.get_name(), self.path_to_script, result[0], result[1])

      if result[0] == self.RESULT_CRITICAL:
        logger.error(loggerMsg)
      elif result[0] == self.RESULT_WARNING or result[0] == self.RESULT_UNKNOWN:
        logger.debug(loggerMsg)

      return result
    else:
      return (self.RESULT_UNKNOWN, ["Unable to execute script {0}".format(self.path)])
  def action_execute(self, main_resource):
    env = Environment.get_instance()

    # Check required parameters
    main_resource.assert_parameter_is_set('user')

    if not 'hdfs_files' in env.config or not env.config['hdfs_files']:
      Logger.info("No resources to create. 'create_on_execute' or 'delete_on_execute' wasn't triggered before this 'execute' action.")
      return

    hadoop_bin_dir = main_resource.resource.hadoop_bin_dir
    hadoop_conf_dir = main_resource.resource.hadoop_conf_dir
    user = main_resource.resource.user
    security_enabled = main_resource.resource.security_enabled
    keytab_file = main_resource.resource.keytab
    kinit_path = main_resource.resource.kinit_path_local
    logoutput = main_resource.resource.logoutput
    principal_name = main_resource.resource.principal_name
    jar_path=JAR_PATH
    timestamp = time.time()
    json_path=format(JSON_PATH)

    if security_enabled:
      main_resource.kinit()

    # Write json file to disk
    File(json_path,
         owner = user,
         content = json.dumps(env.config['hdfs_files'])
    )

    # Execute jar to create/delete resources in hadoop
    Execute(format("hadoop --config {hadoop_conf_dir} jar {jar_path} {json_path}"),
            user=user,
            path=[hadoop_bin_dir],
            logoutput=logoutput,
    )

    # Clean
    env.config['hdfs_files'] = []
  def action_create(self):
    filename = self.resource.filename
    dir = self.resource.dir
    if dir == None:
      filepath = filename
    else:
      filepath = os.path.join(dir, filename)

    config_content = InlineTemplate('''# Generated by Apache Ambari. {{time.asctime(time.localtime())}}
    {% for key, value in properties_dict|dictsort %}
{{key}}{{key_value_delimiter}}{{ resource_management.core.source.InlineTemplate(str(value)).get_content() }}{% endfor %}
    ''', extra_imports=[time, resource_management, resource_management.core, resource_management.core.source], properties_dict=self.resource.properties, key_value_delimiter=self.resource.key_value_delimiter)

    Logger.info(format("Generating properties file: {filepath}"))

    with Environment.get_instance_copy() as env:
      File (format("{filepath}"),
            content = config_content,
            owner = self.resource.owner,
            group = self.resource.group,
            mode = self.resource.mode
      )
示例#49
0
    def action_run(self):
        kinit__path_local = self.resource.kinit_path_local
        keytab = self.resource.keytab
        conf_dir = self.resource.conf_dir
        command = self.resource.command
        principal = self.resource.principal

        if isinstance(command, (list, tuple)):
            command = " ".join(quote_bash_args(x) for x in command)

        with Environment.get_instance_copy() as env:
            if self.resource.security_enabled and not self.resource.kinit_override:
                Execute(format("{kinit__path_local} -kt {keytab} {principal}"), path=["/bin"], user=self.resource.user)

            Execute(
                format("hadoop --config {conf_dir} {command}"),
                user=self.resource.user,
                tries=self.resource.tries,
                try_sleep=self.resource.try_sleep,
                logoutput=self.resource.logoutput,
                path=self.resource.bin_dir,
                environment=self.resource.environment,
            )
示例#50
0
文件: base.py 项目: duxia/ambari
  def __init__(self, name, env=None, provider=None, **kwargs):
    if isinstance(name, list):
      name = name.pop(0)
    
    if hasattr(self, 'name'):
      return

    self.env = env or Environment.get_instance()
    self.name = name
     
    self.provider = provider or getattr(self, 'provider', None)

    self.arguments = {}
    for key, value in kwargs.items():
      try:
        arg = self._arguments[key]
      except KeyError:
        raise Fail("%s received unsupported argument %s" % (self, key))
      else:
        try:
          self.arguments[key] = arg.validate(value)
        except InvalidArgument, exc:
          raise InvalidArgument("%s %s" % (self, exc))
def execute(configurations={}, parameters={}, host_name=None):
  """
  Returns a tuple containing the result code and a pre-formatted result label

  Keyword arguments:
  configurations (dictionary): a mapping of configuration key to value
  parameters (dictionary): a mapping of script parameter key to value
  host_name (string): the name of this host where the alert is running
  """

  result_code = RESULT_CODE_UNKNOWN

  if configurations is None:
    return (result_code, ['There were no configurations supplied to the script.'])

  webhcat_port = WEBHCAT_PORT_DEFAULT
  if TEMPLETON_PORT_KEY in configurations:
    webhcat_port = int(configurations[TEMPLETON_PORT_KEY])

  security_enabled = False
  if SECURITY_ENABLED_KEY in configurations:
    security_enabled = configurations[SECURITY_ENABLED_KEY].lower() == 'true'

  # parse script arguments
  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
  curl_connection_timeout = CURL_CONNECTION_TIMEOUT_DEFAULT
  if CONNECTION_TIMEOUT_KEY in parameters:
    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
    curl_connection_timeout = str(int(connection_timeout))


  # the alert will always run on the webhcat host
  if host_name is None:
    host_name = socket.getfqdn()

  smokeuser = SMOKEUSER_DEFAULT

  if SMOKEUSER_KEY in configurations:
    smokeuser = configurations[SMOKEUSER_KEY]

  # webhcat always uses http, never SSL
  query_url = "http://{0}:{1}/templeton/v1/status?user.name={2}".format(host_name, webhcat_port, smokeuser)

  # initialize
  total_time = 0
  json_response = {}

  if security_enabled:
    try:
      # defaults
      smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
      smokeuser_principal = SMOKEUSER_PRINCIPAL_DEFAULT

      # check script params
      if SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
        smokeuser_principal = parameters[SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY]
      if SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
        smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY]

      # check configurations last as they should always take precedence
      if SMOKEUSER_PRINCIPAL_KEY in configurations:
        smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
      if SMOKEUSER_KEYTAB_KEY in configurations:
        smokeuser_keytab = configurations[SMOKEUSER_KEYTAB_KEY]

      # Get the configured Kerberos executable search paths, if any
      kerberos_executable_search_paths = None
      if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
        kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]

      env = Environment.get_instance()
      stdout, stderr, time_millis = curl_krb_request(env.tmp_dir, smokeuser_keytab, smokeuser_principal,
                                                      query_url, "webhcat_alert_cc_", kerberos_executable_search_paths, True,
                                                      "WebHCat Server Status", smokeuser,
                                                      connection_timeout=curl_connection_timeout)

      # check the response code
      response_code = int(stdout)

      # 0 indicates no connection
      if response_code == 0:
        label = CRITICAL_CONNECTION_MESSAGE.format(query_url)
        return (RESULT_CODE_CRITICAL, [label])

      # any other response aside from 200 is a problem
      if response_code != 200:
        label = CRITICAL_HTTP_MESSAGE.format(response_code, query_url)
        return (RESULT_CODE_CRITICAL, [label])

      # now that we have the http status and it was 200, get the content
      stdout, stderr, total_time = curl_krb_request(env.tmp_dir, smokeuser_keytab, smokeuser_principal,
                                                      query_url, "webhcat_alert_cc_", kerberos_executable_search_paths,
                                                      False, "WebHCat Server Status", smokeuser,
                                                      connection_timeout=curl_connection_timeout)
      json_response = json.loads(stdout)
    except Exception, exception:
      return (RESULT_CODE_CRITICAL, [str(exception)])
def execute(configurations={}, parameters={}, host_name=None):
  """
  Returns a tuple containing the result code and a pre-formatted result label

  Keyword arguments:
  configurations (dictionary): a mapping of configuration key to value
  parameters (dictionary): a mapping of script parameter key to value
  host_name (string): the name of this host where the alert is running
  """

  result_code = RESULT_CODE_UNKNOWN

  if configurations is None:
    return (result_code, ['There were no configurations supplied to the script.'])

  webhcat_port = WEBHCAT_PORT_DEFAULT
  if TEMPLETON_PORT_KEY in configurations:
    webhcat_port = int(configurations[TEMPLETON_PORT_KEY])

  security_enabled = False
  if SECURITY_ENABLED_KEY in configurations:
    security_enabled = configurations[SECURITY_ENABLED_KEY].lower() == 'true'

  # parse script arguments
  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
  curl_connection_timeout = CURL_CONNECTION_TIMEOUT_DEFAULT
  if CONNECTION_TIMEOUT_KEY in parameters:
    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
    curl_connection_timeout = str(int(connection_timeout))


  # the alert will always run on the webhcat host
  if host_name is None:
    host_name = socket.getfqdn()

  smokeuser = SMOKEUSER_DEFAULT

  if SMOKEUSER_KEY in configurations:
    smokeuser = configurations[SMOKEUSER_KEY]

  # webhcat always uses http, never SSL
  query_url = "http://{0}:{1}/templeton/v1/status?user.name={2}".format(host_name, webhcat_port, smokeuser)

  # initialize
  total_time = 0
  json_response = {}

  if security_enabled:
    try:
      # defaults
      smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
      smokeuser_principal = SMOKEUSER_PRINCIPAL_DEFAULT

      # check script params
      if SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
        smokeuser_principal = parameters[SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY]
      if SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
        smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY]

      # check configurations last as they should always take precedence
      if SMOKEUSER_PRINCIPAL_KEY in configurations:
        smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
      if SMOKEUSER_KEYTAB_KEY in configurations:
        smokeuser_keytab = configurations[SMOKEUSER_KEYTAB_KEY]

      # Get the configured Kerberos executable search paths, if any
      kerberos_executable_search_paths = None
      if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
        kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]

      env = Environment.get_instance()
      stdout, stderr, time_millis = curl_krb_request(env.tmp_dir, smokeuser_keytab, smokeuser_principal,
                                                      query_url, "webhcat_alert_cc_", kerberos_executable_search_paths, True,
                                                      "WebHCat Server Status", smokeuser,
                                                      connection_timeout=curl_connection_timeout)

      # check the response code
      response_code = int(stdout)

      # 0 indicates no connection
      if response_code == 0:
        label = CRITICAL_CONNECTION_MESSAGE.format(query_url, traceback.format_exc())
        return (RESULT_CODE_CRITICAL, [label])

      # any other response aside from 200 is a problem
      if response_code != 200:
        label = CRITICAL_HTTP_MESSAGE.format(response_code, query_url, traceback.format_exc())
        return (RESULT_CODE_CRITICAL, [label])

      # now that we have the http status and it was 200, get the content
      stdout, stderr, total_time = curl_krb_request(env.tmp_dir, smokeuser_keytab, smokeuser_principal,
                                                      query_url, "webhcat_alert_cc_", kerberos_executable_search_paths,
                                                      False, "WebHCat Server Status", smokeuser,
                                                      connection_timeout=curl_connection_timeout)
      json_response = json.loads(stdout)
    except:
      return (RESULT_CODE_CRITICAL, [traceback.format_exc()])
  else:
    url_response = None

    try:
      # execute the query for the JSON that includes WebHCat status
      start_time = time.time()
      url_response = urllib2.urlopen(query_url, timeout=connection_timeout)
      total_time = time.time() - start_time

      json_response = json.loads(url_response.read())
    except urllib2.HTTPError as httpError:
      label = CRITICAL_HTTP_MESSAGE.format(httpError.code, query_url, traceback.format_exc())
      return (RESULT_CODE_CRITICAL, [label])
    except:
      label = CRITICAL_CONNECTION_MESSAGE.format(query_url, traceback.format_exc())
      return (RESULT_CODE_CRITICAL, [label])
    finally:
      if url_response is not None:
        try:
          url_response.close()
        except:
          pass


  # if status is not in the response, we can't do any check; return CRIT
  if 'status' not in json_response:
    return (RESULT_CODE_CRITICAL, [CRITICAL_WEBHCAT_UNKNOWN_JSON_MESSAGE + str(json_response)])


  # URL response received, parse it
  try:
    webhcat_status = json_response['status']
  except:
    return (RESULT_CODE_CRITICAL, [CRITICAL_WEBHCAT_UNKNOWN_JSON_MESSAGE + "\n" + traceback.format_exc()])


  # proper JSON received, compare against known value
  if webhcat_status.lower() == WEBHCAT_OK_RESPONSE:
    result_code = RESULT_CODE_OK
    label = OK_MESSAGE.format(total_time, query_url)
  else:
    result_code = RESULT_CODE_CRITICAL
    label = CRITICAL_WEBHCAT_STATUS_MESSAGE.format(webhcat_status)

  return (result_code, [label])
def get_check_command(oozie_url, host_name, configurations, parameters, only_kinit):
  kerberos_env = None

  user = USER_DEFAULT
  if USER_KEY in configurations:
    user = configurations[USER_KEY]

  if is_security_enabled(configurations):
    # defaults
    user_keytab = USER_KEYTAB_DEFAULT
    user_principal = USER_PRINCIPAL_DEFAULT

    # check script params
    if USER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
      user_principal = parameters[USER_PRINCIPAL_SCRIPT_PARAM_KEY]
      user_principal = user_principal.replace('_HOST', host_name.lower())
    if USER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
      user_keytab = parameters[USER_KEYTAB_SCRIPT_PARAM_KEY]

    # check configurations last as they should always take precedence
    if USER_PRINCIPAL_KEY in configurations:
      user_principal = configurations[USER_PRINCIPAL_KEY]
      user_principal = user_principal.replace('_HOST', host_name.lower())
    if USER_KEYTAB_KEY in configurations:
      user_keytab = configurations[USER_KEYTAB_KEY]

    # Create the kerberos credentials cache (ccache) file and set it in the environment to use
    # when executing curl
    env = Environment.get_instance()
    ccache_file = "{0}{1}oozie_alert_cc_{2}".format(env.tmp_dir, os.sep, os.getpid())
    kerberos_env = {'KRB5CCNAME': ccache_file}

    # Get the configured Kerberos executable search paths, if any
    kerberos_executable_search_paths = None
    if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
      kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]

    klist_path_local = get_klist_path(kerberos_executable_search_paths)
    kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
    kinit_part_command = format("{kinit_path_local} -l 5m20s -c {ccache_file} -kt {user_keytab} {user_principal}; ")

    # Determine if we need to kinit by testing to see if the relevant cache exists and has
    # non-expired tickets.  Tickets are marked to expire after 5 minutes to help reduce the number
    # it kinits we do but recover quickly when keytabs are regenerated

    if only_kinit:
      kinit_command = kinit_part_command
    else:
      kinit_command = "{0} -s {1} || ".format(klist_path_local, ccache_file) + kinit_part_command

    Execute(kinit_command, environment=kerberos_env, user=user)

  # oozie configuration directory uses a symlink when > HDP 2.2
  oozie_config_directory = OOZIE_CONF_DIR_LEGACY
  if os.path.exists(OOZIE_CONF_DIR):
    oozie_config_directory = OOZIE_CONF_DIR

  command = "source {0}/oozie-env.sh ; oozie admin -oozie {1} -status".format(
    oozie_config_directory, oozie_url)

  return (command, kerberos_env, user)
def execute(configurations={}, parameters={}, host_name=None):
  """
  Returns a tuple containing the result code and a pre-formatted result label

  Keyword arguments:
  configurations (dictionary): a mapping of configuration key to value
  parameters (dictionary): a mapping of script parameter key to value
  host_name (string): the name of this host where the alert is running
  """

  if configurations is None:
    return (('UNKNOWN', ['There were no configurations supplied to the script.']))

  scheme = 'http'  
  http_uri = None
  https_uri = None
  http_policy = 'HTTP_ONLY'

  security_enabled = False
  if SECURITY_ENABLED_KEY in configurations:
    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'

  executable_paths = None
  if EXECUTABLE_SEARCH_PATHS in configurations:
    executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]

  kerberos_keytab = None
  if KERBEROS_KEYTAB in configurations:
    kerberos_keytab = configurations[KERBEROS_KEYTAB]

  kerberos_principal = None
  if KERBEROS_PRINCIPAL in configurations:
    kerberos_principal = configurations[KERBEROS_PRINCIPAL]
    kerberos_principal = kerberos_principal.replace('_HOST', host_name)

  if NODEMANAGER_HTTP_ADDRESS_KEY in configurations:
    http_uri = configurations[NODEMANAGER_HTTP_ADDRESS_KEY]

  if NODEMANAGER_HTTPS_ADDRESS_KEY in configurations:
    https_uri = configurations[NODEMANAGER_HTTPS_ADDRESS_KEY]

  if YARN_HTTP_POLICY_KEY in configurations:
    http_policy = configurations[YARN_HTTP_POLICY_KEY]
    
  if SMOKEUSER_KEY in configurations:
    smokeuser = configurations[SMOKEUSER_KEY]

  # parse script arguments
  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
  if CONNECTION_TIMEOUT_KEY in parameters:
    connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])

  # determine the right URI and whether to use SSL
  uri = http_uri
  if http_policy == 'HTTPS_ONLY':
    scheme = 'https'

    if https_uri is not None:
      uri = https_uri

  uri = str(host_name) + ":" + uri.split(":")[1]
  live_nodemanagers_qry = "{0}://{1}/jmx?qry=Hadoop:service=ResourceManager,name=RMNMInfo".format(scheme, uri)
  convert_to_json_failed = False
  response_code = None
  try:
    if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
      env = Environment.get_instance()

      # curl requires an integer timeout
      curl_connection_timeout = int(connection_timeout)

      url_response, error_msg, time_millis  = curl_krb_request(env.tmp_dir, kerberos_keytab, kerberos_principal,
        live_nodemanagers_qry, "nm_health_summary_alert", executable_paths, False,
        "NodeManager Health Summary", smokeuser, connection_timeout=curl_connection_timeout)

      try:
        url_response_json = json.loads(url_response)
        live_nodemanagers = json.loads(url_response_json["beans"][0]["LiveNodeManagers"])
      except ValueError, error:
        convert_to_json_failed = True
        logger.exception("[Alert][{0}] Convert response to json failed or json doesn't contain needed data: {1}".
        format("NodeManager Health Summary", str(error)))

      if convert_to_json_failed:
        response_code, error_msg, time_millis  = curl_krb_request(env.tmp_dir, kerberos_keytab, kerberos_principal,
          live_nodemanagers_qry, "nm_health_summary_alert", executable_paths, True,
          "NodeManager Health Summary", smokeuser, connection_timeout=curl_connection_timeout)
    else:
def execute(configurations={}, parameters={}, host_name=None):
    """
  Returns a tuple containing the result code and a pre-formatted result label

  Keyword arguments:
  configurations (dictionary): a mapping of configuration key to value
  parameters (dictionary): a mapping of script parameter key to value
  host_name (string): the name of this host where the alert is running
  """

    if configurations is None:
        return ("UNKNOWN", ["There were no configurations supplied to the script."])

    uri = None
    scheme = "http"
    http_uri = None
    https_uri = None
    http_policy = "HTTP_ONLY"
    checkpoint_tx = CHECKPOINT_TX_DEFAULT
    checkpoint_period = CHECKPOINT_PERIOD_DEFAULT

    if NN_HTTP_ADDRESS_KEY in configurations:
        http_uri = configurations[NN_HTTP_ADDRESS_KEY]

    if NN_HTTPS_ADDRESS_KEY in configurations:
        https_uri = configurations[NN_HTTPS_ADDRESS_KEY]

    if NN_HTTP_POLICY_KEY in configurations:
        http_policy = configurations[NN_HTTP_POLICY_KEY]

    if NN_CHECKPOINT_TX_KEY in configurations:
        checkpoint_tx = configurations[NN_CHECKPOINT_TX_KEY]

    if NN_CHECKPOINT_PERIOD_KEY in configurations:
        checkpoint_period = configurations[NN_CHECKPOINT_PERIOD_KEY]

    if SMOKEUSER_KEY in configurations:
        smokeuser = configurations[SMOKEUSER_KEY]

    executable_paths = None
    if EXECUTABLE_SEARCH_PATHS in configurations:
        executable_paths = configurations[EXECUTABLE_SEARCH_PATHS]

    security_enabled = False
    if SECURITY_ENABLED_KEY in configurations:
        security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == "TRUE"

    kerberos_keytab = None
    if KERBEROS_KEYTAB in configurations:
        kerberos_keytab = configurations[KERBEROS_KEYTAB]

    kerberos_principal = None
    if KERBEROS_PRINCIPAL in configurations:
        kerberos_principal = configurations[KERBEROS_PRINCIPAL]
        kerberos_principal = kerberos_principal.replace("_HOST", host_name)

    # parse script arguments
    connection_timeout = CONNECTION_TIMEOUT_DEFAULT
    if CONNECTION_TIMEOUT_KEY in parameters:
        connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])

    percent_warning = PERCENT_WARNING_DEFAULT
    if PERCENT_WARNING_KEY in parameters:
        percent_warning = float(parameters[PERCENT_WARNING_KEY]) * 100

    percent_critical = PERCENT_CRITICAL_DEFAULT
    if PERCENT_CRITICAL_KEY in parameters:
        percent_critical = float(parameters[PERCENT_CRITICAL_KEY]) * 100

    # determine the right URI and whether to use SSL
    uri = http_uri
    if http_policy == "HTTPS_ONLY":
        scheme = "https"

        if https_uri is not None:
            uri = https_uri

    current_time = int(round(time.time() * 1000))

    last_checkpoint_time_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem".format(scheme, uri)
    journal_transaction_info_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo".format(scheme, uri)

    # start out assuming an OK status
    label = None
    result_code = "OK"

    try:
        if kerberos_principal is not None and kerberos_keytab is not None and security_enabled:
            env = Environment.get_instance()

            # curl requires an integer timeout
            curl_connection_timeout = int(connection_timeout)

            last_checkpoint_time_response, error_msg, time_millis = curl_krb_request(
                env.tmp_dir,
                kerberos_keytab,
                kerberos_principal,
                last_checkpoint_time_qry,
                "checkpoint_time_alert",
                executable_paths,
                False,
                "NameNode Last Checkpoint",
                smokeuser,
                connection_timeout=curl_connection_timeout,
            )

            last_checkpoint_time_response_json = json.loads(last_checkpoint_time_response)
            last_checkpoint_time = int(last_checkpoint_time_response_json["beans"][0]["LastCheckpointTime"])

            journal_transaction_info_response, error_msg, time_millis = curl_krb_request(
                env.tmp_dir,
                kerberos_keytab,
                kerberos_principal,
                journal_transaction_info_qry,
                "checkpoint_time_alert",
                executable_paths,
                False,
                "NameNode Last Checkpoint",
                smokeuser,
                connection_timeout=curl_connection_timeout,
            )

            journal_transaction_info_response_json = json.loads(journal_transaction_info_response)
            journal_transaction_info = journal_transaction_info_response_json["beans"][0]["JournalTransactionInfo"]
        else:
            last_checkpoint_time = int(
                get_value_from_jmx(last_checkpoint_time_qry, "LastCheckpointTime", connection_timeout)
            )

            journal_transaction_info = get_value_from_jmx(
                journal_transaction_info_qry, "JournalTransactionInfo", connection_timeout
            )

        journal_transaction_info_dict = json.loads(journal_transaction_info)

        last_tx = int(journal_transaction_info_dict["LastAppliedOrWrittenTxId"])
        most_recent_tx = int(journal_transaction_info_dict["MostRecentCheckpointTxId"])
        transaction_difference = last_tx - most_recent_tx

        delta = (current_time - last_checkpoint_time) / 1000

        label = LABEL.format(h=get_time(delta)["h"], m=get_time(delta)["m"], tx=transaction_difference)

        if (transaction_difference > int(checkpoint_tx)) and (
            float(delta) / int(checkpoint_period) * 100 >= int(percent_critical)
        ):
            result_code = "CRITICAL"
        elif (transaction_difference > int(checkpoint_tx)) and (
            float(delta) / int(checkpoint_period) * 100 >= int(percent_warning)
        ):
            result_code = "WARNING"

    except Exception, e:
        label = str(e)
        result_code = "UNKNOWN"
示例#56
0
文件: source.py 项目: duxia/ambari
 def __init__(self, name):
     self.env = Environment.get_instance()
     self.name = name
示例#57
0
    def action_remove(self):
        with Environment.get_instance_copy() as env:
            repo_file_name = self.resource.repo_file_name
            repo_dir = repos_dirs[env.system.os_family]

            File(format("{repo_dir}/{repo_file_name}.repo"), action="delete")
示例#58
0
文件: source.py 项目: duxia/ambari
 def __init__(self, env=None):
     self.env = env or Environment.get_instance()
def get_check_command(oozie_url, host_name, configurations, parameters):
  kerberos_env = None

  smokeuser = SMOKEUSER_DEFAULT
  if SMOKEUSER_KEY in configurations:
    smokeuser = configurations[SMOKEUSER_KEY]

  security_enabled = False
  if SECURITY_ENABLED in configurations:
    security_enabled = str(configurations[SECURITY_ENABLED]).upper() == 'TRUE'

  if security_enabled:
    # defaults
    smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
    smokeuser_principal = SMOKEUSER_PRINCIPAL_DEFAULT

    # check script params
    if SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY in parameters:
      smokeuser_principal = parameters[SMOKEUSER_PRINCIPAL_SCRIPT_PARAM_KEY]
    if SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY in parameters:
      smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_SCRIPT_PARAM_KEY]

    # check configurations last as they should always take precedence
    if SMOKEUSER_PRINCIPAL_KEY in configurations:
      smokeuser_principal = configurations[SMOKEUSER_PRINCIPAL_KEY]
    if SMOKEUSER_KEYTAB_KEY in configurations:
      smokeuser_keytab = configurations[SMOKEUSER_KEYTAB_KEY]

    # Create the kerberos credentials cache (ccache) file and set it in the environment to use
    # when executing curl
    env = Environment.get_instance()
    ccache_file = "{0}{1}oozie_alert_cc_{2}".format(env.tmp_dir, os.sep, os.getpid())
    kerberos_env = {'KRB5CCNAME': ccache_file}

    # Get the configured Kerberos executable search paths, if any
    kerberos_executable_search_paths = None
    if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
      kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]

    klist_path_local = get_klist_path(kerberos_executable_search_paths)
    klist_command = format("{klist_path_local} -s {ccache_file}")

    # Determine if we need to kinit by testing to see if the relevant cache exists and has
    # non-expired tickets.  Tickets are marked to expire after 5 minutes to help reduce the number
    # it kinits we do but recover quickly when keytabs are regenerated
    return_code, _ = call(klist_command, user=smokeuser)
    if return_code != 0:
      kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
      kinit_command = format("{kinit_path_local} -l 5m -kt {smokeuser_keytab} {smokeuser_principal}; ")

      # kinit
      Execute(kinit_command, environment=kerberos_env, user=smokeuser)

  # oozie configuration directory uses a symlink when > HDP 2.2
  oozie_config_directory = OOZIE_CONF_DIR_LEGACY
  if os.path.exists(OOZIE_CONF_DIR):
    oozie_config_directory = OOZIE_CONF_DIR

  command = "source {0}/oozie-env.sh ; oozie admin -oozie {1} -status".format(
    oozie_config_directory, oozie_url)

  return (command, kerberos_env, smokeuser)