Ejemplo n.º 1
0
    def __init__(self, config, controller):
        self.config = config
        self.tmp_dir = config.get('agent', 'prefix')
        self.force_https_protocol = config.get_force_https_protocol()
        self.exec_tmp_dir = Constants.AGENT_TMP_DIR
        self.file_cache = FileCache(config)
        self.status_commands_stdout = os.path.join(
            self.tmp_dir, 'status_command_stdout.txt')
        self.status_commands_stderr = os.path.join(
            self.tmp_dir, 'status_command_stderr.txt')
        self.public_fqdn = hostname.public_hostname(config)
        # cache reset will be called on every agent registration
        controller.registration_listeners.append(self.file_cache.reset)

        # Construct the hadoop credential lib JARs path
        self.credential_shell_lib_path = os.path.join(
            config.get('security', 'credential_lib_dir',
                       self.DEFAULT_CREDENTIAL_SHELL_LIB_PATH), '*')

        self.credential_conf_dir = config.get('security',
                                              'credential_conf_dir',
                                              self.DEFAULT_CREDENTIAL_CONF_DIR)

        self.credential_shell_cmd = config.get(
            'security', 'credential_shell_cmd',
            self.DEFAULT_CREDENTIAL_SHELL_CMD)

        # Clean up old status command files if any
        try:
            os.unlink(self.status_commands_stdout)
            os.unlink(self.status_commands_stderr)
        except OSError:
            pass  # Ignore fail
        self.commands_in_progress_lock = threading.RLock()
        self.commands_in_progress = {}
Ejemplo n.º 2
0
  def test_unpack_archive(self):
    tmpdir = tempfile.mkdtemp()
    dummy_archive_name = os.path.join("ambari_agent", "dummy_files",
                                 "dummy_archive.zip")
    archive_file = open(dummy_archive_name, "rb")
    fileCache = FileCache(self.config)
    fileCache.unpack_archive(archive_file, tmpdir)
    # Count summary size of unpacked files:
    total_size = 0
    total_files = 0
    total_dirs = 0
    for dirpath, dirnames, filenames in os.walk(tmpdir):
      total_dirs += 1
      for f in filenames:
        fp = os.path.join(dirpath, f)
        total_size += os.path.getsize(fp)
        total_files += 1
    self.assertEquals(total_size, 51258L)
    self.assertEquals(total_files, 28)
    self.assertEquals(total_dirs, 8)
    shutil.rmtree(tmpdir)

    # Test exception handling
    with patch("os.path.isdir") as isdir_mock:
      isdir_mock.side_effect = self.exc_side_effect
      try:
        fileCache.unpack_archive(archive_file, tmpdir)
        self.fail('CachingException not thrown')
      except CachingException:
        pass # Expected
      except Exception, e:
        self.fail('Unexpected exception thrown:' + str(e))
Ejemplo n.º 3
0
  def test_get_hook_base_dir(self, provide_directory_mock):
    fileCache = FileCache(self.config)
    # Check missing parameter
    command = {
      'commandParams' : {
      }
    }
    base = fileCache.get_hook_base_dir(command, "server_url_pref")
    self.assertEqual(base, None)
    self.assertFalse(provide_directory_mock.called)

    # Check existing dir case
    command = {
      'commandParams' : {
        'hooks_folder' : os.path.join('HDP', '2.1.1', 'hooks')
      }
    }
    provide_directory_mock.return_value = "dummy value"
    fileCache = FileCache(self.config)
    res = fileCache.get_hook_base_dir(command, "server_url_pref")
    self.assertEquals(
      pprint.pformat(provide_directory_mock.call_args_list[0][0]),
      "('/var/lib/ambari-agent/cache', "
      "{0}, "
      "'server_url_pref')".format(pprint.pformat(os.path.join('stacks','HDP', '2.1.1', 'hooks'))))
    self.assertEquals(res, "dummy value")
Ejemplo n.º 4
0
 def test_get_custom_actions_base_dir(self, provide_directory_mock):
   provide_directory_mock.return_value = "dummy value"
   fileCache = FileCache(self.config)
   res = fileCache.get_custom_actions_base_dir("server_url_pref")
   self.assertEquals(
     pprint.pformat(provide_directory_mock.call_args_list[0][0]),
     "('/var/lib/ambari-agent/cache', 'custom_actions', 'server_url_pref')")
   self.assertEquals(res, "dummy value")
Ejemplo n.º 5
0
 def test_get_custom_actions_base_dir(self, provide_directory_mock):
   provide_directory_mock.return_value = "dummy value"
   fileCache = FileCache(self.config)
   res = fileCache.get_custom_actions_base_dir("server_url_pref")
   self.assertEquals(
     pprint.pformat(provide_directory_mock.call_args_list[0][0]),
     "('/var/lib/ambari-agent/cache', 'custom_actions', 'server_url_pref')")
   self.assertEquals(res, "dummy value")
Ejemplo n.º 6
0
 def test_get_service_base_dir(self, isdir_mock):
     fileCache = FileCache(self.config)
     isdir_mock.return_value = True
     base = fileCache.get_service_base_dir("HDP", "2.0.7", "HBASE",
                                           "REGION_SERVER")
     self.assertEqual(
         base,
         "/var/lib/ambari-agent/cache/stacks/HDP/2.0.7/services/HBASE")
Ejemplo n.º 7
0
    def test_invalidate_directory(self, makedirs_mock, rmtree_mock,
                                  unlink_mock, isdir_mock, isfile_mock,
                                  exists_mock):
        fileCache = FileCache(self.config)
        # Test execution flow if path points to file
        isfile_mock.return_value = True
        isdir_mock.return_value = False
        exists_mock.return_value = True

        fileCache.invalidate_directory("dummy-dir")

        self.assertTrue(unlink_mock.called)
        self.assertFalse(rmtree_mock.called)
        self.assertTrue(makedirs_mock.called)

        unlink_mock.reset_mock()
        rmtree_mock.reset_mock()
        makedirs_mock.reset_mock()

        # Test execution flow if path points to dir
        isfile_mock.return_value = False
        isdir_mock.return_value = True
        exists_mock.return_value = True

        fileCache.invalidate_directory("dummy-dir")

        self.assertFalse(unlink_mock.called)
        self.assertTrue(rmtree_mock.called)
        self.assertTrue(makedirs_mock.called)

        unlink_mock.reset_mock()
        rmtree_mock.reset_mock()
        makedirs_mock.reset_mock()

        # Test execution flow if path points nowhere
        isfile_mock.return_value = False
        isdir_mock.return_value = False
        exists_mock.return_value = False

        fileCache.invalidate_directory("dummy-dir")

        self.assertFalse(unlink_mock.called)
        self.assertFalse(rmtree_mock.called)
        self.assertTrue(makedirs_mock.called)

        unlink_mock.reset_mock()
        rmtree_mock.reset_mock()
        makedirs_mock.reset_mock()

        # Test exception handling
        makedirs_mock.side_effect = self.exc_side_effect
        try:
            fileCache.invalidate_directory("dummy-dir")
            self.fail('CachingException not thrown')
        except CachingException:
            pass  # Expected
        except Exception, e:
            self.fail('Unexpected exception thrown:' + str(e))
Ejemplo n.º 8
0
 def test_build_download_url(self):
     fileCache = FileCache(self.config)
     url = fileCache.build_download_url('http://localhost:8080/resources/',
                                        'stacks/HDP/2.1.1/hooks',
                                        'archive.zip')
     self.assertEqual(
         url,
         'http://localhost:8080/resources//stacks/HDP/2.1.1/hooks/archive.zip'
     )
Ejemplo n.º 9
0
 def include_dependent_declaration_headers(self,
                                           file_generator: FileGenerator,
                                           file_cache: FileCache):
     parent_generator = self.enum_argument_generator.parent_generator
     if type(parent_generator) is NamespaceGenerator:
         header_to_include = file_cache.enums_header(
             parent_generator.full_name_array)
     else:
         header_to_include = file_cache.class_header_decl(
             parent_generator.full_name_array)
     file_generator.include_user_header(header_to_include)
Ejemplo n.º 10
0
  def test_invalidate_directory(self, makedirs_mock, rmtree_mock,
                                unlink_mock, isdir_mock, isfile_mock,
                                exists_mock):
    fileCache = FileCache(self.config)
    # Test execution flow if path points to file
    isfile_mock.return_value = True
    isdir_mock.return_value = False
    exists_mock.return_value = True

    fileCache.invalidate_directory("dummy-dir")

    self.assertTrue(unlink_mock.called)
    self.assertFalse(rmtree_mock.called)
    self.assertTrue(makedirs_mock.called)

    unlink_mock.reset_mock()
    rmtree_mock.reset_mock()
    makedirs_mock.reset_mock()

    # Test execution flow if path points to dir
    isfile_mock.return_value = False
    isdir_mock.return_value = True
    exists_mock.return_value = True

    fileCache.invalidate_directory("dummy-dir")

    self.assertFalse(unlink_mock.called)
    self.assertTrue(rmtree_mock.called)
    self.assertTrue(makedirs_mock.called)

    unlink_mock.reset_mock()
    rmtree_mock.reset_mock()
    makedirs_mock.reset_mock()

    # Test execution flow if path points nowhere
    isfile_mock.return_value = False
    isdir_mock.return_value = False
    exists_mock.return_value = False

    fileCache.invalidate_directory("dummy-dir")

    self.assertFalse(unlink_mock.called)
    self.assertFalse(rmtree_mock.called)
    self.assertTrue(makedirs_mock.called)

    unlink_mock.reset_mock()
    rmtree_mock.reset_mock()
    makedirs_mock.reset_mock()

    # Test exception handling
    makedirs_mock.side_effect = self.exc_side_effect
    try:
      fileCache.invalidate_directory("dummy-dir")
      self.fail('CachingException not thrown')
    except CachingException:
      pass # Expected
    except Exception, e:
      self.fail('Unexpected exception thrown:' + str(e))
Ejemplo n.º 11
0
  def test_provide_directory_no_update(self, build_download_url_mock):
    try:
      self.config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, FileCache.ENABLE_AUTO_AGENT_CACHE_UPDATE_KEY, "false")
      fileCache = FileCache(self.config)

      # Test uptodate dirs after start
      path = os.path.join("cache_path", "subdirectory")
      res = fileCache.provide_directory("cache_path", "subdirectory",
                                        "server_url_prefix")
      self.assertEquals(res, path)
      self.assertFalse(build_download_url_mock.called)
    finally:
      self.config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, FileCache.ENABLE_AUTO_AGENT_CACHE_UPDATE_KEY, "true")
    pass
Ejemplo n.º 12
0
 def test_get_service_base_dir(self, provide_directory_mock):
   provide_directory_mock.return_value = "dummy value"
   fileCache = FileCache(self.config)
   command = {
     'commandParams' : {
       'service_package_folder' : 'HDP/2.1.1/services/ZOOKEEPER/package'
     }
   }
   res = fileCache.get_service_base_dir(command, "server_url_pref")
   self.assertEquals(
     pprint.pformat(provide_directory_mock.call_args_list[0][0]),
     "('/var/lib/ambari-agent/cache',\n "
     "'stacks/HDP/2.1.1/services/ZOOKEEPER/package',\n"
     " 'server_url_pref')")
   self.assertEquals(res, "dummy value")
Ejemplo n.º 13
0
 def test_get_service_base_dir(self, provide_directory_mock):
   provide_directory_mock.return_value = "dummy value"
   fileCache = FileCache(self.config)
   command = {
     'commandParams' : {
       'service_package_folder' : os.path.join('stacks', 'HDP', '2.1.1', 'services', 'ZOOKEEPER', 'package')
     }
   }
   res = fileCache.get_service_base_dir(command, "server_url_pref")
   self.assertEquals(
     pprint.pformat(provide_directory_mock.call_args_list[0][0]),
     "('/var/lib/ambari-agent/cache',\n "
     "{0},\n"
     " 'server_url_pref')".format(pprint.pformat(os.path.join('stacks', 'HDP', '2.1.1', 'services', 'ZOOKEEPER', 'package'))))
   self.assertEquals(res, "dummy value")
Ejemplo n.º 14
0
    def __generate(self):
        self.__process()
        for namespace in self.api_description.namespaces:
            Capi.__substitute_implementation_class_name(namespace)
        namespace_generators = create_namespace_generators(
            self.api_description, self.params_description)
        by_first_argument_exception_traits = ExceptionTraits.ByFirstArgument(
            self.params_description, namespace_generators)
        no_handling_exception_traits = ExceptionTraits.NoHandling()
        if self.params_description.exception_handling_mode == TExceptionHandlingMode.by_first_argument:
            main_exception_traits = by_first_argument_exception_traits
        else:
            main_exception_traits = no_handling_exception_traits
        capi_generator = CapiGenerator(main_exception_traits,
                                       no_handling_exception_traits,
                                       self.params_description,
                                       self.api_description)
        file_cache = FileCache(self.params_description)
        for namespace_generator in namespace_generators:
            namespace_generator.generate(file_cache, capi_generator)
        capi_generator.generate(file_cache)
        self.__generate_root_header(namespace_generators, file_cache)

        if self.unit_tests_generator:
            self.unit_tests_generator.generate(namespace_generators)
Ejemplo n.º 15
0
def cache_factory(mgr, kind):
    if kind == cache_options[0]:
        return Cache(mgr)
    elif kind == cache_options[1]:
        return FileCache(mgr)
    else:
        raise ValueError("%s is not a valid cache type!".format(kind))
Ejemplo n.º 16
0
 def __generate_enums_header(self, file_cache: FileCache):
     if self.enum_generators:
         enums_header = file_cache.get_file_for_enums(self.full_name_array)
         enums_header.put_begin_cpp_comments(self.params)
         with WatchdogScope(enums_header,
                            self.full_name.upper() + '_ENUMS_INCLUDED'):
             self.__generate_namespace_enumerators(enums_header)
Ejemplo n.º 17
0
    def __generate_root_header(self, namespace_generators: [], file_cache: FileCache):
        if self.params_description.root_header and self.api_description.project_name:
            root_header = FileGenerator(os.path.join(self.output_folder, self.params_description.root_header))
            root_header.put_begin_cpp_comments(self.params_description)
            with WatchdogScope(root_header, self.api_description.project_name.upper() + '_LIBRARY_ROOT_INCLUDED'):
                with IfDefScope(root_header, '{0}_LIBRARY_USE_DYNAMIC_LOADER'.format(
                        self.api_description.project_name.upper()), False):
                    for namespace_generator in namespace_generators:
                        root_header.put_line('#define {0}_CAPI_USE_DYNAMIC_LOADER'.format(
                            namespace_generator.wrap_name.upper()))
                root_header.put_line('')

                with IfDefScope(root_header, '{0}_LIBRARY_DEFINE_FUNCTION_POINTERS'.format(
                        self.api_description.project_name.upper()), False):
                    for namespace_generator in namespace_generators:
                        root_header.put_line('#define {0}_CAPI_DEFINE_FUNCTION_POINTERS'.format(
                            namespace_generator.wrap_name.upper()))
                root_header.put_line('')

                root_header.put_include_files(False)
                for namespace_generator in namespace_generators:
                    root_header.include_user_header(file_cache.namespace_header(namespace_generator.full_name_array))
                if self.params_description.root_header_initializer:
                    root_header.put_line('')
                    with IfDefScope(root_header, '__cplusplus'):
                        if self.params_description.root_header_namespace:
                            root_header.put_line('namespace {0}'.format(self.params_description.root_header_namespace))
                            with IndentScope(root_header):
                                self.__generate_root_initializer(root_header, namespace_generators)
                        else:
                            self.__generate_root_initializer(root_header, namespace_generators)
Ejemplo n.º 18
0
 def process_external_namespaces(namespaces: [object], external_namespaces: [object]):
     for cur_namespace in namespaces:
         external_namespace = TExternalNamespace()
         external_namespace.name = cur_namespace.name
         external_namespace.detach_method_name = new_params.detach_method_name
         external_namespace.get_raw_pointer_method_name = new_params.get_raw_pointer_method_name
         file_cache = FileCache(new_params)
         external_namespace.include = file_cache.namespace_header(cur_namespace.full_name_array)
         process_external_namespaces(cur_namespace.nested_namespaces, external_namespace.namespaces)
         for cur_class in cur_namespace.classes:
             external_class = TExternalClass()
             external_class.name = cur_class.name
             external_class.wrap_name = cur_class.wrap_name
             external_class.include_declaration = file_cache.class_header_decl(cur_class.full_name_array)
             external_class.include_definition = file_cache.class_header(cur_class.full_name_array)
             external_namespace.classes.append(external_class)
         external_namespaces.append(external_namespace)
Ejemplo n.º 19
0
 def __init__(self, config, controller):
   self.config = config
   self.tmp_dir = config.get('agent', 'prefix')
   self.file_cache = FileCache(config)
   self.python_executor = PythonExecutor(self.tmp_dir, config)
   self.status_commands_stdout = os.path.join(self.tmp_dir,
                                              'status_command_stdout.txt')
   self.status_commands_stderr = os.path.join(self.tmp_dir,
                                              'status_command_stderr.txt')
   # cache reset will be called on every agent registration
   controller.registration_listeners.append(self.file_cache.reset)
   # Clean up old status command files if any
   try:
     os.unlink(self.status_commands_stdout)
     os.unlink(self.status_commands_stderr)
   except OSError:
     pass # Ignore fail
Ejemplo n.º 20
0
 def generate_check_and_throw_exception(self, file_cache: FileCache):
     out = file_cache.get_file_for_check_and_throw_exception()
     out.put_begin_cpp_comments(self.params)
     with WatchdogScope(out,
                        self.params.beautiful_capi_namespace.upper() + '_CHECK_AND_THROW_EXCEPTION_INCLUDED'):
         with IfDefScope(out, '__cplusplus'):
             out.put_include_files()
             out.include_system_header('stdexcept')
             out.include_system_header('cassert')
             for exception_class in self.exception_classes:
                 out.include_user_header(
                     file_cache.class_header(exception_class.full_name_array))
             out.put_line('namespace {0}'.format(self.params.beautiful_capi_namespace))
             with IndentScope(out):
                 out.put_line(
                     'inline void check_and_throw_exception(uint32_t exception_code, void* exception_object)')
                 with IndentScope(out):
                     self.__create_check_and_throw_exceptions_body(out, ByFirstArgument.__generate_throw_wrap)
Ejemplo n.º 21
0
    def test_provide_directory_no_update(self, build_download_url_mock):
        try:
            self.config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY,
                            FileCache.ENABLE_AUTO_AGENT_CACHE_UPDATE_KEY,
                            "false")
            fileCache = FileCache(self.config)

            # Test uptodate dirs after start
            path = os.path.join("cache_path", "subdirectory")
            res = fileCache.provide_directory("cache_path", "subdirectory",
                                              "server_url_prefix")
            self.assertEquals(res, path)
            self.assertFalse(build_download_url_mock.called)
        finally:
            self.config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY,
                            FileCache.ENABLE_AUTO_AGENT_CACHE_UPDATE_KEY,
                            "true")
        pass
Ejemplo n.º 22
0
    def _ProcessDir( self, DirToProcess ):
        """ internal function to do the actual processing """
        # we always use cache (might be in memory)
        Cache = FileCache( DirToProcess )
        CompleteCache = FileCache()
        
        # loop through all entries
        for CurDir, dirs, files in os.walk( DirToProcess ): #@UnusedVariable
      
            # Cache handling
            if self._UseCache or self._Validate:
                if CurDir != Cache.GetDirectory():
                    if self._UseCache:
                        Cache.saveCache() # save before new cache is initialized
                    CompleteCache += Cache
                    Cache = FileCache( CurDir, BaseDir = os.path.relpath(CurDir, DirToProcess) )
                try:
                    Cache.loadCache()
                except IOError:
                    pass # IOError here means that cache file does not exist

            # loop through files
            for filename in files:
                ResDict = self._ProcessFile(DirToProcess, Cache, CurDir, filename)
                
                # checking if file was skipped for some reason
                if ResDict == None:
                    continue

                # always add to Cache (might be in memory only)
                Cache.addEntry( ResDict )
         
                # output/update/whatever for each file
                if self._ProgressFunction:
                    self._ProgressFunction( ResDict )
                   
        # Cache handling. save to disk
        if self._UseCache:
            Cache.saveCache()

        # get the last entries also and return
        CompleteCache += Cache
        return CompleteCache
Ejemplo n.º 23
0
    def __init__(self, config, controller):
        self.config = config
        self.tmp_dir = config.get('agent', 'prefix')
        self.exec_tmp_dir = Constants.AGENT_TMP_DIR
        self.file_cache = FileCache(config)
        self.status_commands_stdout = os.path.join(
            self.tmp_dir, 'status_command_stdout.txt')
        self.status_commands_stderr = os.path.join(
            self.tmp_dir, 'status_command_stderr.txt')
        self.public_fqdn = hostname.public_hostname(config)
        # cache reset will be called on every agent registration
        controller.registration_listeners.append(self.file_cache.reset)

        # Clean up old status command files if any
        try:
            os.unlink(self.status_commands_stdout)
            os.unlink(self.status_commands_stderr)
        except OSError:
            pass  # Ignore fail
        self.commands_in_progress_lock = threading.RLock()
        self.commands_in_progress = {}
Ejemplo n.º 24
0
 def __generate_namespace_header(self, file_cache: FileCache,
                                 capi_generator: CapiGenerator):
     namespace_header = file_cache.get_file_for_namespace(
         self.full_name_array)
     namespace_header.put_begin_cpp_comments(self.params)
     with WatchdogScope(namespace_header,
                        self.full_name.upper() + '_INCLUDED'):
         namespace_header.put_include_files()
         namespace_header.include_user_header(
             file_cache.capi_header(self.full_name_array))
         namespace_header.include_user_header(
             file_cache.fwd_header(self.full_name_array))
         if self.enum_generators:
             namespace_header.include_user_header(
                 file_cache.enums_header(self.full_name_array))
         for nested_namespace_generator in self.nested_namespaces:
             namespace_header.include_user_header(
                 file_cache.namespace_header(
                     nested_namespace_generator.full_name_array))
         for class_generator in self.classes:
             namespace_header.include_user_header(
                 file_cache.class_header(class_generator.full_name_array))
         self.__generate_namespace_functions(capi_generator, file_cache,
                                             namespace_header)
         include_headers(namespace_header,
                         self.namespace_object.include_headers)
         DoxygenCppGenerator().generate_for_namespace(
             namespace_header, self.namespace_object, self.full_wrap_name)
Ejemplo n.º 25
0
    def test_unpack_archive(self):
        tmpdir = tempfile.mkdtemp()
        dummy_archive_name = os.path.join("ambari_agent", "dummy_files",
                                          "dummy_archive.zip")
        archive_file = open(dummy_archive_name, "rb")
        fileCache = FileCache(self.config)
        fileCache.unpack_archive(archive_file, tmpdir)
        # Count summary size of unpacked files:
        total_size = 0
        total_files = 0
        total_dirs = 0
        for dirpath, dirnames, filenames in os.walk(tmpdir):
            total_dirs += 1
            for f in filenames:
                fp = os.path.join(dirpath, f)
                total_size += os.path.getsize(fp)
                total_files += 1
        self.assertEquals(total_size, 51258L)
        self.assertEquals(total_files, 28)
        self.assertEquals(total_dirs, 8)
        shutil.rmtree(tmpdir)

        # Test exception handling
        with patch("os.path.isdir") as isdir_mock:
            isdir_mock.side_effect = self.exc_side_effect
            try:
                fileCache.unpack_archive(archive_file, tmpdir)
                self.fail('CachingException not thrown')
            except CachingException:
                pass  # Expected
            except Exception, e:
                self.fail('Unexpected exception thrown:' + str(e))
Ejemplo n.º 26
0
    def test_get_hook_base_dir(self, provide_directory_mock):
        fileCache = FileCache(self.config)
        # Check missing parameter
        command = {'commandParams': {}}
        base = fileCache.get_hook_base_dir(command, "server_url_pref")
        self.assertEqual(base, None)
        self.assertFalse(provide_directory_mock.called)

        # Check existing dir case
        command = {
            'commandParams': {
                'hooks_folder': os.path.join('HDP', '2.1.1', 'hooks')
            }
        }
        provide_directory_mock.return_value = "dummy value"
        fileCache = FileCache(self.config)
        res = fileCache.get_hook_base_dir(command, "server_url_pref")
        self.assertEquals(
            pprint.pformat(provide_directory_mock.call_args_list[0][0]),
            "('/var/lib/ambari-agent/cache', "
            "{0}, "
            "'server_url_pref')".format(
                pprint.pformat(os.path.join('stacks', 'HDP', '2.1.1',
                                            'hooks'))))
        self.assertEquals(res, "dummy value")
Ejemplo n.º 27
0
  def test_fetch_url(self, urlopen_mock):
    fileCache = FileCache(self.config)
    remote_url = "http://dummy-url/"
    # Test normal download
    test_str = 'abc' * 100000 # Very long string
    test_string_io = StringIO.StringIO(test_str)
    test_buffer = MagicMock()
    test_buffer.read.side_effect = test_string_io.read
    urlopen_mock.return_value = test_buffer

    memory_buffer = fileCache.fetch_url(remote_url)

    self.assertEquals(memory_buffer.getvalue(), test_str)
    self.assertEqual(test_buffer.read.call_count, 20) # depends on buffer size
    # Test exception handling
    test_buffer.read.side_effect = self.exc_side_effect
    try:
      fileCache.fetch_url(remote_url)
      self.fail('CachingException not thrown')
    except CachingException:
      pass # Expected
    except Exception, e:
      self.fail('Unexpected exception thrown:' + str(e))
Ejemplo n.º 28
0
 def __generate_forward_declarations(self, file_cache: FileCache,
                                     capi_generator: CapiGenerator):
     forward_declarations = file_cache.get_file_for_fwd(
         self.full_name_array)
     forward_declarations.put_begin_cpp_comments(self.params)
     with WatchdogScope(forward_declarations,
                        self.full_name.upper() + '_FWD_INCLUDED'):
         with IfDefScope(forward_declarations, '__cplusplus'):
             if self.params.enable_cpp11_features_in_wrap_code:
                 forward_declarations.put_include_files()
                 forward_declarations.include_system_header('utility')
             capi_generator.main_exception_traits.generate_check_and_throw_exception_forward_declaration(
                 forward_declarations)
             self.__generate_forward_declarations_impl(forward_declarations)
Ejemplo n.º 29
0
  def test_fetch_url(self, urlopen_mock):
    fileCache = FileCache(self.config)
    remote_url = "http://dummy-url/"
    # Test normal download
    test_str = 'abc' * 100000 # Very long string
    test_string_io = StringIO.StringIO(test_str)
    test_buffer = MagicMock()
    test_buffer.read.side_effect = test_string_io.read
    urlopen_mock.return_value = test_buffer

    memory_buffer = fileCache.fetch_url(remote_url)

    self.assertEquals(memory_buffer.getvalue(), test_str)
    self.assertEqual(test_buffer.read.call_count, 20) # depends on buffer size
    # Test exception handling
    test_buffer.read.side_effect = self.exc_side_effect
    try:
      fileCache.fetch_url(remote_url)
      self.fail('CachingException not thrown')
    except CachingException:
      pass # Expected
    except Exception, e:
      self.fail('Unexpected exception thrown:' + str(e))
Ejemplo n.º 30
0
 def __init__(self, config, controller):
   self.config = config
   self.tmp_dir = config.get('agent', 'prefix')
   self.file_cache = FileCache(config)
   self.python_executor = PythonExecutor(self.tmp_dir, config)
   self.status_commands_stdout = os.path.join(self.tmp_dir,
                                              'status_command_stdout.txt')
   self.status_commands_stderr = os.path.join(self.tmp_dir,
                                              'status_command_stderr.txt')
   self.public_fqdn = hostname.public_hostname()
   # cache reset will be called on every agent registration
   controller.registration_listeners.append(self.file_cache.reset)
   # Clean up old status command files if any
   try:
     os.unlink(self.status_commands_stdout)
     os.unlink(self.status_commands_stderr)
   except OSError:
     pass # Ignore fail
Ejemplo n.º 31
0
 def test_read_write_hash_sum(self):
     tmpdir = tempfile.mkdtemp()
     dummyhash = "DUMMY_HASH"
     fileCache = FileCache(self.config)
     fileCache.write_hash_sum(tmpdir, dummyhash)
     newhash = fileCache.read_hash_sum(tmpdir)
     self.assertEquals(newhash, dummyhash)
     shutil.rmtree(tmpdir)
     # Test read of not existing file
     newhash = fileCache.read_hash_sum(tmpdir)
     self.assertEquals(newhash, None)
     # Test write to not existing file
     with patch("__builtin__.open") as open_mock:
         open_mock.side_effect = self.exc_side_effect
         try:
             fileCache.write_hash_sum(tmpdir, dummyhash)
             self.fail('CachingException not thrown')
         except CachingException:
             pass  # Expected
         except Exception, e:
             self.fail('Unexpected exception thrown:' + str(e))
  def __init__(self, config, controller):
    self.config = config
    self.tmp_dir = config.get('agent', 'prefix')
    self.exec_tmp_dir = Constants.AGENT_TMP_DIR
    self.file_cache = FileCache(config)
    self.status_commands_stdout = os.path.join(self.tmp_dir,
                                               'status_command_stdout.txt')
    self.status_commands_stderr = os.path.join(self.tmp_dir,
                                               'status_command_stderr.txt')
    self.public_fqdn = hostname.public_hostname(config)
    # cache reset will be called on every agent registration
    controller.registration_listeners.append(self.file_cache.reset)

    # Clean up old status command files if any
    try:
      os.unlink(self.status_commands_stdout)
      os.unlink(self.status_commands_stderr)
    except OSError:
      pass # Ignore fail
    self.commands_in_progress_lock = threading.RLock()
    self.commands_in_progress = {}
Ejemplo n.º 33
0
 def test_read_write_hash_sum(self):
   tmpdir = tempfile.mkdtemp()
   dummyhash = "DUMMY_HASH"
   fileCache = FileCache(self.config)
   fileCache.write_hash_sum(tmpdir, dummyhash)
   newhash = fileCache.read_hash_sum(tmpdir)
   self.assertEquals(newhash, dummyhash)
   shutil.rmtree(tmpdir)
   # Test read of not existing file
   newhash = fileCache.read_hash_sum(tmpdir)
   self.assertEquals(newhash, None)
   # Test write to not existing file
   with patch("__builtin__.open") as open_mock:
     open_mock.side_effect = self.exc_side_effect
     try:
       fileCache.write_hash_sum(tmpdir, dummyhash)
       self.fail('CachingException not thrown')
     except CachingException:
       pass # Expected
     except Exception, e:
       self.fail('Unexpected exception thrown:' + str(e))
Ejemplo n.º 34
0
class CustomServiceOrchestrator():
  """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

  SCRIPT_TYPE_PYTHON = "PYTHON"
  COMMAND_NAME_STATUS = "STATUS"
  CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
  CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'

  PRE_HOOK_PREFIX="before"
  POST_HOOK_PREFIX="after"

  def __init__(self, config, controller):
    self.config = config
    self.tmp_dir = config.get('agent', 'prefix')
    self.file_cache = FileCache(config)
    self.python_executor = PythonExecutor(self.tmp_dir, config)
    self.status_commands_stdout = os.path.join(self.tmp_dir,
                                               'status_command_stdout.txt')
    self.status_commands_stderr = os.path.join(self.tmp_dir,
                                               'status_command_stderr.txt')
    # cache reset will be called on every agent registration
    controller.registration_listeners.append(self.file_cache.reset)
    # Clean up old status command files if any
    try:
      os.unlink(self.status_commands_stdout)
      os.unlink(self.status_commands_stderr)
    except OSError:
      pass # Ignore fail


  def runCommand(self, command, tmpoutfile, tmperrfile, forsed_command_name = None,
                 override_output_files = True):
    """
    forsed_command_name may be specified manually. In this case, value, defined at
    command json, is ignored.
    """
    try:
      script_type = command['commandParams']['script_type']
      script = command['commandParams']['script']
      timeout = int(command['commandParams']['command_timeout'])
      server_url_prefix = command['hostLevelParams']['jdk_location']
      task_id = "status"
      try:
        task_id = command['taskId']
        command_name = command['roleCommand']
      except KeyError:
        pass # Status commands have no taskId

      if forsed_command_name is not None: # If not supplied as an argument
        command_name = forsed_command_name

      if command_name == self.CUSTOM_ACTION_COMMAND:
        base_dir = self.file_cache.get_custom_actions_base_dir(server_url_prefix)
        script_tuple = (os.path.join(base_dir, script) , base_dir)
        hook_dir = None
      else:
        if command_name == self.CUSTOM_COMMAND_COMMAND:
          command_name = command['hostLevelParams']['custom_command']
        hook_dir = self.file_cache.get_hook_base_dir(command, server_url_prefix)
        base_dir = self.file_cache.get_service_base_dir(command, server_url_prefix)
        script_path = self.resolve_script_path(base_dir, script, script_type)
        script_tuple = (script_path, base_dir)


      tmpstrucoutfile = os.path.join(self.tmp_dir,
                                    "structured-out-{0}.json".format(task_id))
      if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
      # We don't support anything else yet
        message = "Unknown script type {0}".format(script_type)
        raise AgentException(message)
      # Execute command using proper interpreter
      json_path = self.dump_command_to_json(command)
      pre_hook_tuple = self.resolve_hook_script_path(hook_dir,
          self.PRE_HOOK_PREFIX, command_name, script_type)
      post_hook_tuple = self.resolve_hook_script_path(hook_dir,
          self.POST_HOOK_PREFIX, command_name, script_type)
      py_file_list = [pre_hook_tuple, script_tuple, post_hook_tuple]
      # filter None values
      filtered_py_file_list = [i for i in py_file_list if i]

      # Executing hooks and script
      ret = None
      for py_file, current_base_dir in filtered_py_file_list:
        script_params = [command_name, json_path, current_base_dir]
        ret = self.python_executor.run_file(py_file, script_params,
                               tmpoutfile, tmperrfile, timeout,
                               tmpstrucoutfile, override_output_files)
        # Next run_file() invocations should always append to current output
        override_output_files = False
        if ret['exitcode'] != 0:
          break

      if not ret: # Something went wrong
        raise AgentException("No script has been executed")

    except Exception: # We do not want to let agent fail completely
      exc_type, exc_obj, exc_tb = sys.exc_info()
      message = "Catched an exception while executing "\
        "custom service command: {0}: {1}".format(exc_type, exc_obj)
      logger.exception(message)
      ret = {
        'stdout' : message,
        'stderr' : message,
        'structuredOut' : '{}',
        'exitcode': 1,
      }
    return ret


  def requestComponentStatus(self, command):
    """
     Component status is determined by exit code, returned by runCommand().
     Exit code 0 means that component is running and any other exit code means that
     component is not running
    """
    override_output_files=True # by default, we override status command output
    if logger.level == logging.DEBUG:
      override_output_files = False
    res = self.runCommand(command, self.status_commands_stdout,
                          self.status_commands_stderr, self.COMMAND_NAME_STATUS,
                          override_output_files=override_output_files)
    if res['exitcode'] == 0:
      return LiveStatus.LIVE_STATUS
    else:
      return LiveStatus.DEAD_STATUS


  def resolve_script_path(self, base_dir, script, script_type):
    """
    Incapsulates logic of script location determination.
    """
    path = os.path.join(base_dir, script)
    if not os.path.exists(path):
      message = "Script {0} does not exist".format(path)
      raise AgentException(message)
    return path


  def resolve_hook_script_path(self, stack_hooks_dir, prefix, command_name, script_type):
    """
    Returns a tuple(path to hook script, hook base dir) according to string prefix
    and command name. If script does not exist, returns None
    """
    if not stack_hooks_dir:
      return None
    hook_dir = "{0}-{1}".format(prefix, command_name)
    hook_base_dir = os.path.join(stack_hooks_dir, hook_dir)
    hook_script_path = os.path.join(hook_base_dir, "scripts", "hook.py")
    if not os.path.isfile(hook_script_path):
      logger.debug("Hook script {0} not found, skipping".format(hook_script_path))
      return None
    return hook_script_path, hook_base_dir


  def dump_command_to_json(self, command):
    """
    Converts command to json file and returns file path
    """
    # Perform few modifications to stay compatible with the way in which
    # site.pp files are generated by manifestGenerator.py
    public_fqdn = hostname.public_hostname()
    command['public_hostname'] = public_fqdn
    # Now, dump the json file
    command_type = command['commandType']
    from ActionQueue import ActionQueue  # To avoid cyclic dependency
    if command_type == ActionQueue.STATUS_COMMAND:
      # These files are frequently created, thats why we don't
      # store them all, but only the latest one
      file_path = os.path.join(self.tmp_dir, "status_command.json")
    else:
      task_id = command['taskId']
      command['clusterHostInfo'] = manifestGenerator.decompressClusterHostInfo(command['clusterHostInfo'])
      file_path = os.path.join(self.tmp_dir, "command-{0}.json".format(task_id))
    # Json may contain passwords, that's why we need proper permissions
    if os.path.isfile(file_path):
      os.unlink(file_path)
    with os.fdopen(os.open(file_path, os.O_WRONLY | os.O_CREAT,
                           0600), 'w') as f:
      content = json.dumps(command, sort_keys = False, indent = 4)
      f.write(content)
    return file_path
Ejemplo n.º 35
0
    def test_provide_directory(self, write_hash_sum_mock, unpack_archive_mock,
                               invalidate_directory_mock, read_hash_sum_mock,
                               fetch_url_mock, build_download_url_mock):
        build_download_url_mock.return_value = "http://dummy-url/"
        HASH1 = "hash1"
        membuffer = MagicMock()
        membuffer.getvalue.return_value.strip.return_value = HASH1
        fileCache = FileCache(self.config)

        # Test uptodate dirs after start
        self.assertFalse(fileCache.uptodate_paths)
        path = os.path.join("cache_path", "subdirectory")
        # Test initial downloading (when dir does not exist)
        fetch_url_mock.return_value = membuffer
        read_hash_sum_mock.return_value = "hash2"
        res = fileCache.provide_directory("cache_path", "subdirectory",
                                          "server_url_prefix")
        self.assertTrue(invalidate_directory_mock.called)
        self.assertTrue(write_hash_sum_mock.called)
        self.assertEquals(fetch_url_mock.call_count, 2)
        self.assertEquals(pprint.pformat(fileCache.uptodate_paths),
                          pprint.pformat([path]))
        self.assertEquals(res, path)

        fetch_url_mock.reset_mock()
        write_hash_sum_mock.reset_mock()
        invalidate_directory_mock.reset_mock()
        unpack_archive_mock.reset_mock()

        # Test cache invalidation when local hash does not differ
        fetch_url_mock.return_value = membuffer
        read_hash_sum_mock.return_value = HASH1
        fileCache.reset()

        res = fileCache.provide_directory("cache_path", "subdirectory",
                                          "server_url_prefix")
        self.assertFalse(invalidate_directory_mock.called)
        self.assertFalse(write_hash_sum_mock.called)
        self.assertEquals(fetch_url_mock.call_count, 1)

        self.assertEquals(pprint.pformat(fileCache.uptodate_paths),
                          pprint.pformat([path]))
        self.assertEquals(res, path)

        fetch_url_mock.reset_mock()
        write_hash_sum_mock.reset_mock()
        invalidate_directory_mock.reset_mock()
        unpack_archive_mock.reset_mock()

        # Test execution path when path is up-to date (already checked)
        res = fileCache.provide_directory("cache_path", "subdirectory",
                                          "server_url_prefix")
        self.assertFalse(invalidate_directory_mock.called)
        self.assertFalse(write_hash_sum_mock.called)
        self.assertEquals(fetch_url_mock.call_count, 0)
        self.assertEquals(pprint.pformat(fileCache.uptodate_paths),
                          pprint.pformat([path]))
        self.assertEquals(res, path)

        # Check exception handling when tolerance is disabled
        self.config.set('agent', 'tolerate_download_failures', "false")
        fetch_url_mock.side_effect = self.caching_exc_side_effect
        fileCache = FileCache(self.config)
        try:
            fileCache.provide_directory("cache_path", "subdirectory",
                                        "server_url_prefix")
            self.fail('CachingException not thrown')
        except CachingException:
            pass  # Expected
        except Exception, e:
            self.fail('Unexpected exception thrown:' + str(e))
Ejemplo n.º 36
0
 def test_reset(self):
   fileCache = FileCache(self.config)
   fileCache.uptodate_paths.append('dummy-path')
   fileCache.reset()
   self.assertFalse(fileCache.uptodate_paths)
Ejemplo n.º 37
0
class TestFileCache(TestCase):
    def setUp(self):
        # disable stdout
        out = StringIO.StringIO()
        sys.stdout = out
        # generate sample config
        tmpdir = tempfile.gettempdir()
        self.config = ConfigParser.RawConfigParser()
        self.config.add_section('agent')
        self.config.set('agent', 'prefix', tmpdir)
        self.config.set('agent', 'cache_dir', "/var/lib/ambari-agent/cache")
        self.config.set('agent', 'tolerate_download_failures', "true")

    def test_reset(self):
        fileCache = FileCache(self.config)
        fileCache.uptodate_paths.append('dummy-path')
        fileCache.reset()
        self.assertFalse(fileCache.uptodate_paths)

    @patch.object(FileCache, "provide_directory")
    def test_get_service_base_dir(self, provide_directory_mock):
        provide_directory_mock.return_value = "dummy value"
        fileCache = FileCache(self.config)
        command = {
            'commandParams': {
                'service_package_folder':
                os.path.join('stacks', 'HDP', '2.1.1', 'services', 'ZOOKEEPER',
                             'package')
            }
        }
        res = fileCache.get_service_base_dir(command, "server_url_pref")
        self.assertEquals(
            pprint.pformat(provide_directory_mock.call_args_list[0][0]),
            "('/var/lib/ambari-agent/cache',\n "
            "{0},\n"
            " 'server_url_pref')".format(
                pprint.pformat(
                    os.path.join('stacks', 'HDP', '2.1.1', 'services',
                                 'ZOOKEEPER', 'package'))))
        self.assertEquals(res, "dummy value")

    @patch.object(FileCache, "provide_directory")
    def test_get_hook_base_dir(self, provide_directory_mock):
        fileCache = FileCache(self.config)
        # Check missing parameter
        command = {'commandParams': {}}
        base = fileCache.get_hook_base_dir(command, "server_url_pref")
        self.assertEqual(base, None)
        self.assertFalse(provide_directory_mock.called)

        # Check existing dir case
        command = {
            'commandParams': {
                'hooks_folder': os.path.join('HDP', '2.1.1', 'hooks')
            }
        }
        provide_directory_mock.return_value = "dummy value"
        fileCache = FileCache(self.config)
        res = fileCache.get_hook_base_dir(command, "server_url_pref")
        self.assertEquals(
            pprint.pformat(provide_directory_mock.call_args_list[0][0]),
            "('/var/lib/ambari-agent/cache', "
            "{0}, "
            "'server_url_pref')".format(
                pprint.pformat(os.path.join('stacks', 'HDP', '2.1.1',
                                            'hooks'))))
        self.assertEquals(res, "dummy value")

    @patch.object(FileCache, "provide_directory")
    def test_get_custom_actions_base_dir(self, provide_directory_mock):
        provide_directory_mock.return_value = "dummy value"
        fileCache = FileCache(self.config)
        res = fileCache.get_custom_actions_base_dir("server_url_pref")
        self.assertEquals(
            pprint.pformat(provide_directory_mock.call_args_list[0][0]),
            "('/var/lib/ambari-agent/cache', 'custom_actions', 'server_url_pref')"
        )
        self.assertEquals(res, "dummy value")

    @patch.object(FileCache, "build_download_url")
    @patch.object(FileCache, "fetch_url")
    @patch.object(FileCache, "read_hash_sum")
    @patch.object(FileCache, "invalidate_directory")
    @patch.object(FileCache, "unpack_archive")
    @patch.object(FileCache, "write_hash_sum")
    def test_provide_directory(self, write_hash_sum_mock, unpack_archive_mock,
                               invalidate_directory_mock, read_hash_sum_mock,
                               fetch_url_mock, build_download_url_mock):
        build_download_url_mock.return_value = "http://dummy-url/"
        HASH1 = "hash1"
        membuffer = MagicMock()
        membuffer.getvalue.return_value.strip.return_value = HASH1
        fileCache = FileCache(self.config)

        # Test uptodate dirs after start
        self.assertFalse(fileCache.uptodate_paths)
        path = os.path.join("cache_path", "subdirectory")
        # Test initial downloading (when dir does not exist)
        fetch_url_mock.return_value = membuffer
        read_hash_sum_mock.return_value = "hash2"
        res = fileCache.provide_directory("cache_path", "subdirectory",
                                          "server_url_prefix")
        self.assertTrue(invalidate_directory_mock.called)
        self.assertTrue(write_hash_sum_mock.called)
        self.assertEquals(fetch_url_mock.call_count, 2)
        self.assertEquals(pprint.pformat(fileCache.uptodate_paths),
                          pprint.pformat([path]))
        self.assertEquals(res, path)

        fetch_url_mock.reset_mock()
        write_hash_sum_mock.reset_mock()
        invalidate_directory_mock.reset_mock()
        unpack_archive_mock.reset_mock()

        # Test cache invalidation when local hash does not differ
        fetch_url_mock.return_value = membuffer
        read_hash_sum_mock.return_value = HASH1
        fileCache.reset()

        res = fileCache.provide_directory("cache_path", "subdirectory",
                                          "server_url_prefix")
        self.assertFalse(invalidate_directory_mock.called)
        self.assertFalse(write_hash_sum_mock.called)
        self.assertEquals(fetch_url_mock.call_count, 1)

        self.assertEquals(pprint.pformat(fileCache.uptodate_paths),
                          pprint.pformat([path]))
        self.assertEquals(res, path)

        fetch_url_mock.reset_mock()
        write_hash_sum_mock.reset_mock()
        invalidate_directory_mock.reset_mock()
        unpack_archive_mock.reset_mock()

        # Test execution path when path is up-to date (already checked)
        res = fileCache.provide_directory("cache_path", "subdirectory",
                                          "server_url_prefix")
        self.assertFalse(invalidate_directory_mock.called)
        self.assertFalse(write_hash_sum_mock.called)
        self.assertEquals(fetch_url_mock.call_count, 0)
        self.assertEquals(pprint.pformat(fileCache.uptodate_paths),
                          pprint.pformat([path]))
        self.assertEquals(res, path)

        # Check exception handling when tolerance is disabled
        self.config.set('agent', 'tolerate_download_failures', "false")
        fetch_url_mock.side_effect = self.caching_exc_side_effect
        fileCache = FileCache(self.config)
        try:
            fileCache.provide_directory("cache_path", "subdirectory",
                                        "server_url_prefix")
            self.fail('CachingException not thrown')
        except CachingException:
            pass  # Expected
        except Exception, e:
            self.fail('Unexpected exception thrown:' + str(e))

        # Check that unexpected exceptions are still propagated when
        # tolerance is enabled
        self.config.set('agent', 'tolerate_download_failures', "false")
        fetch_url_mock.side_effect = self.exc_side_effect
        fileCache = FileCache(self.config)
        try:
            fileCache.provide_directory("cache_path", "subdirectory",
                                        "server_url_prefix")
            self.fail('Exception not thrown')
        except Exception:
            pass  # Expected

        # Check exception handling when tolerance is enabled
        self.config.set('agent', 'tolerate_download_failures', "true")
        fetch_url_mock.side_effect = self.caching_exc_side_effect
        fileCache = FileCache(self.config)
        res = fileCache.provide_directory("cache_path", "subdirectory",
                                          "server_url_prefix")
        self.assertEquals(res, path)
Ejemplo n.º 38
0
 def test_reset(self):
     fileCache = FileCache(self.config)
     fileCache.uptodate_paths.append('dummy-path')
     fileCache.reset()
     self.assertFalse(fileCache.uptodate_paths)
Ejemplo n.º 39
0
 def test_build_download_url(self):
   fileCache = FileCache(self.config)
   url = fileCache.build_download_url('http://localhost:8080/resources/',
                                      'stacks/HDP/2.1.1/hooks', 'archive.zip')
   self.assertEqual(url,
       'http://localhost:8080/resources//stacks/HDP/2.1.1/hooks/archive.zip')
Ejemplo n.º 40
0
  def test_provide_directory(self, write_hash_sum_mock, unpack_archive_mock,
                             invalidate_directory_mock,
                             read_hash_sum_mock, fetch_url_mock,
                             build_download_url_mock):
    build_download_url_mock.return_value = "http://dummy-url/"
    HASH1 = "hash1"
    membuffer = MagicMock()
    membuffer.getvalue.return_value.strip.return_value = HASH1
    fileCache = FileCache(self.config)

    # Test uptodate dirs after start
    self.assertFalse(fileCache.uptodate_paths)
    path = os.path.join("cache_path", "subdirectory")
    # Test initial downloading (when dir does not exist)
    fetch_url_mock.return_value = membuffer
    read_hash_sum_mock.return_value = "hash2"
    res = fileCache.provide_directory("cache_path", "subdirectory",
                                      "server_url_prefix")
    self.assertTrue(invalidate_directory_mock.called)
    self.assertTrue(write_hash_sum_mock.called)
    self.assertEquals(fetch_url_mock.call_count, 2)
    self.assertEquals(pprint.pformat(fileCache.uptodate_paths),
                      pprint.pformat([path]))
    self.assertEquals(res, path)

    fetch_url_mock.reset_mock()
    write_hash_sum_mock.reset_mock()
    invalidate_directory_mock.reset_mock()
    unpack_archive_mock.reset_mock()

    # Test cache invalidation when local hash does not differ
    fetch_url_mock.return_value = membuffer
    read_hash_sum_mock.return_value = HASH1
    fileCache.reset()

    res = fileCache.provide_directory("cache_path", "subdirectory",
                                      "server_url_prefix")
    self.assertFalse(invalidate_directory_mock.called)
    self.assertFalse(write_hash_sum_mock.called)
    self.assertEquals(fetch_url_mock.call_count, 1)

    self.assertEquals(pprint.pformat(fileCache.uptodate_paths),
                      pprint.pformat([path]))
    self.assertEquals(res, path)

    fetch_url_mock.reset_mock()
    write_hash_sum_mock.reset_mock()
    invalidate_directory_mock.reset_mock()
    unpack_archive_mock.reset_mock()

    # Test execution path when path is up-to date (already checked)
    res = fileCache.provide_directory("cache_path", "subdirectory",
                                      "server_url_prefix")
    self.assertFalse(invalidate_directory_mock.called)
    self.assertFalse(write_hash_sum_mock.called)
    self.assertEquals(fetch_url_mock.call_count, 0)
    self.assertEquals(pprint.pformat(fileCache.uptodate_paths),
                      pprint.pformat([path]))
    self.assertEquals(res, path)

    # Check exception handling when tolerance is disabled
    self.config.set('agent', 'tolerate_download_failures', "false")
    fetch_url_mock.side_effect = self.caching_exc_side_effect
    fileCache = FileCache(self.config)
    try:
      fileCache.provide_directory("cache_path", "subdirectory",
                                  "server_url_prefix")
      self.fail('CachingException not thrown')
    except CachingException:
      pass # Expected
    except Exception, e:
      self.fail('Unexpected exception thrown:' + str(e))
Ejemplo n.º 41
0
class CustomServiceOrchestrator():
  """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

  SCRIPT_TYPE_PYTHON = "PYTHON"
  COMMAND_NAME_STATUS = "STATUS"
  CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
  CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'

  PRE_HOOK_PREFIX="before"
  POST_HOOK_PREFIX="after"

  HOSTS_LIST_KEY = "all_hosts"
  PING_PORTS_KEY = "all_ping_ports"
  AMBARI_SERVER_HOST = "ambari_server_host"

  def __init__(self, config, controller):
    self.config = config
    self.tmp_dir = config.get('agent', 'prefix')
    self.exec_tmp_dir = config.get('agent', 'tmp_dir')
    self.file_cache = FileCache(config)
    self.python_executor = PythonExecutor(self.tmp_dir, config)
    self.status_commands_stdout = os.path.join(self.tmp_dir,
                                               'status_command_stdout.txt')
    self.status_commands_stderr = os.path.join(self.tmp_dir,
                                               'status_command_stderr.txt')
    self.public_fqdn = hostname.public_hostname(config)
    # cache reset will be called on every agent registration
    controller.registration_listeners.append(self.file_cache.reset)
    
    # Clean up old status command files if any
    try:
      os.unlink(self.status_commands_stdout)
      os.unlink(self.status_commands_stderr)
    except OSError:
      pass # Ignore fail
    self.commands_in_progress_lock = threading.RLock()
    self.commands_in_progress = {}

  def map_task_to_process(self, task_id, processId):
    with self.commands_in_progress_lock:
      logger.debug('Maps taskId=%s to pid=%s'%(task_id, processId))
      self.commands_in_progress[task_id] = processId

  def cancel_command(self, task_id, reason):
    with self.commands_in_progress_lock:
      if task_id in self.commands_in_progress.keys():
        pid = self.commands_in_progress.get(task_id)
        self.commands_in_progress[task_id] = reason
        logger.info("Canceling command with task_id - {tid}, " \
                    "reason - {reason} . Killing process {pid}"
        .format(tid = str(task_id), reason = reason, pid = pid))
        shell.kill_process_with_children(pid)
      else: 
        logger.warn("Unable to find pid by taskId = %s"%task_id)

  def runCommand(self, command, tmpoutfile, tmperrfile, forced_command_name = None,
                 override_output_files = True):
    """
    forced_command_name may be specified manually. In this case, value, defined at
    command json, is ignored.
    """
    try:
      script_type = command['commandParams']['script_type']
      script = command['commandParams']['script']
      timeout = int(command['commandParams']['command_timeout'])
      
      if 'hostLevelParams' in command and 'jdk_location' in command['hostLevelParams']:
        server_url_prefix = command['hostLevelParams']['jdk_location']
      else:
        server_url_prefix = command['commandParams']['jdk_location']
      task_id = "status"
      try:
        task_id = command['taskId']
        command_name = command['roleCommand']
      except KeyError:
        pass # Status commands have no taskId

      if forced_command_name is not None: # If not supplied as an argument
        command_name = forced_command_name

      if command_name == self.CUSTOM_ACTION_COMMAND:
        base_dir = self.file_cache.get_custom_actions_base_dir(server_url_prefix)
        script_tuple = (os.path.join(base_dir, script) , base_dir)
        hook_dir = None
      else:
        if command_name == self.CUSTOM_COMMAND_COMMAND:
          command_name = command['hostLevelParams']['custom_command']
        hook_dir = self.file_cache.get_hook_base_dir(command, server_url_prefix)
        base_dir = self.file_cache.get_service_base_dir(command, server_url_prefix)
        script_path = self.resolve_script_path(base_dir, script, script_type)
        script_tuple = (script_path, base_dir)

      tmpstrucoutfile = os.path.join(self.tmp_dir,
                                    "structured-out-{0}.json".format(task_id))

      if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
      # We don't support anything else yet
        message = "Unknown script type {0}".format(script_type)
        raise AgentException(message)
      # Execute command using proper interpreter
      handle = None
      if(command.has_key('__handle')):
        handle = command['__handle']
        handle.on_background_command_started = self.map_task_to_process
        del command['__handle']
      
      json_path = self.dump_command_to_json(command)
      pre_hook_tuple = self.resolve_hook_script_path(hook_dir,
          self.PRE_HOOK_PREFIX, command_name, script_type)
      post_hook_tuple = self.resolve_hook_script_path(hook_dir,
          self.POST_HOOK_PREFIX, command_name, script_type)
      py_file_list = [pre_hook_tuple, script_tuple, post_hook_tuple]
      # filter None values
      filtered_py_file_list = [i for i in py_file_list if i]

      logger_level = logging.getLevelName(logger.level)

      # Executing hooks and script
      ret = None
      from ActionQueue import ActionQueue
      if(command.has_key('commandType') and command['commandType'] == ActionQueue.BACKGROUND_EXECUTION_COMMAND and len(filtered_py_file_list) > 1):
        raise AgentException("Background commands are supported without hooks only")

      for py_file, current_base_dir in filtered_py_file_list:
        script_params = [command_name, json_path, current_base_dir]
        ret = self.python_executor.run_file(py_file, script_params,
                               self.exec_tmp_dir, tmpoutfile, tmperrfile, timeout,
                               tmpstrucoutfile, logger_level, self.map_task_to_process,
                               task_id, override_output_files, handle = handle)
        # Next run_file() invocations should always append to current output
        override_output_files = False
        if ret['exitcode'] != 0:
          break

      if not ret: # Something went wrong
        raise AgentException("No script has been executed")

      # if canceled and not background command
      if handle is None:
        cancel_reason = self.command_canceled_reason(task_id)
        if cancel_reason:
          ret['stdout'] += cancel_reason
          ret['stderr'] += cancel_reason
  
          with open(tmpoutfile, "a") as f:
            f.write(cancel_reason)
          with open(tmperrfile, "a") as f:
            f.write(cancel_reason)

    except Exception: # We do not want to let agent fail completely
      exc_type, exc_obj, exc_tb = sys.exc_info()
      message = "Catched an exception while executing "\
        "custom service command: {0}: {1}".format(exc_type, exc_obj)
      logger.exception(message)
      ret = {
        'stdout' : message,
        'stderr' : message,
        'structuredOut' : '{}',
        'exitcode': 1,
      }
    return ret
  def command_canceled_reason(self, task_id):
    with self.commands_in_progress_lock:
      if self.commands_in_progress.has_key(task_id):#Background command do not push in this collection (TODO)
        logger.debug('Pop with taskId %s' % task_id)
        pid = self.commands_in_progress.pop(task_id)
        if not isinstance(pid, int):
          return '\nCommand aborted. ' + pid
    return None
        
  def requestComponentStatus(self, command):
    """
     Component status is determined by exit code, returned by runCommand().
     Exit code 0 means that component is running and any other exit code means that
     component is not running
    """
    override_output_files=True # by default, we override status command output
    if logger.level == logging.DEBUG:
      override_output_files = False
    res = self.runCommand(command, self.status_commands_stdout,
                          self.status_commands_stderr, self.COMMAND_NAME_STATUS,
                          override_output_files=override_output_files)
    return res

  def resolve_script_path(self, base_dir, script, script_type):
    """
    Incapsulates logic of script location determination.
    """
    path = os.path.join(base_dir, script)
    if not os.path.exists(path):
      message = "Script {0} does not exist".format(path)
      raise AgentException(message)
    return path


  def resolve_hook_script_path(self, stack_hooks_dir, prefix, command_name, script_type):
    """
    Returns a tuple(path to hook script, hook base dir) according to string prefix
    and command name. If script does not exist, returns None
    """
    if not stack_hooks_dir:
      return None
    hook_dir = "{0}-{1}".format(prefix, command_name)
    hook_base_dir = os.path.join(stack_hooks_dir, hook_dir)
    hook_script_path = os.path.join(hook_base_dir, "scripts", "hook.py")
    if not os.path.isfile(hook_script_path):
      logger.debug("Hook script {0} not found, skipping".format(hook_script_path))
      return None
    return hook_script_path, hook_base_dir


  def dump_command_to_json(self, command):
    """
    Converts command to json file and returns file path
    """
    # Perform few modifications to stay compatible with the way in which
    public_fqdn = self.public_fqdn
    command['public_hostname'] = public_fqdn
    # Now, dump the json file
    command_type = command['commandType']
    from ActionQueue import ActionQueue  # To avoid cyclic dependency
    if command_type == ActionQueue.STATUS_COMMAND:
      # These files are frequently created, thats why we don't
      # store them all, but only the latest one
      file_path = os.path.join(self.tmp_dir, "status_command.json")
    else:
      task_id = command['taskId']
      if 'clusterHostInfo' in command and command['clusterHostInfo']:
        command['clusterHostInfo'] = self.decompressClusterHostInfo(command['clusterHostInfo'])
      file_path = os.path.join(self.tmp_dir, "command-{0}.json".format(task_id))
    # Json may contain passwords, that's why we need proper permissions
    if os.path.isfile(file_path):
      os.unlink(file_path)
    with os.fdopen(os.open(file_path, os.O_WRONLY | os.O_CREAT,
                           0600), 'w') as f:
      content = json.dumps(command, sort_keys = False, indent = 4)
      f.write(content)
    return file_path
Ejemplo n.º 42
0
class CustomServiceOrchestrator():
    """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

    SCRIPT_TYPE_PYTHON = "PYTHON"
    COMMAND_NAME_STATUS = "STATUS"
    CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
    CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'

    PRE_HOOK_PREFIX = "before"
    POST_HOOK_PREFIX = "after"

    HOSTS_LIST_KEY = "all_hosts"
    PING_PORTS_KEY = "all_ping_ports"
    AMBARI_SERVER_HOST = "ambari_server_host"

    def __init__(self, config, controller):
        self.config = config
        self.tmp_dir = config.get('agent', 'prefix')
        self.exec_tmp_dir = config.get('agent', 'tmp_dir')
        self.file_cache = FileCache(config)
        self.python_executor = PythonExecutor(self.tmp_dir, config)
        self.status_commands_stdout = os.path.join(
            self.tmp_dir, 'status_command_stdout.txt')
        self.status_commands_stderr = os.path.join(
            self.tmp_dir, 'status_command_stderr.txt')
        self.public_fqdn = hostname.public_hostname(config)
        # cache reset will be called on every agent registration
        controller.registration_listeners.append(self.file_cache.reset)

        # Clean up old status command files if any
        try:
            os.unlink(self.status_commands_stdout)
            os.unlink(self.status_commands_stderr)
        except OSError:
            pass  # Ignore fail
        self.commands_in_progress_lock = threading.RLock()
        self.commands_in_progress = {}

    def map_task_to_process(self, task_id, processId):
        with self.commands_in_progress_lock:
            logger.debug('Maps taskId=%s to pid=%s' % (task_id, processId))
            self.commands_in_progress[task_id] = processId

    def cancel_command(self, task_id, reason):
        with self.commands_in_progress_lock:
            if task_id in self.commands_in_progress.keys():
                pid = self.commands_in_progress.get(task_id)
                self.commands_in_progress[task_id] = reason
                logger.info("Canceling command with task_id - {tid}, " \
                            "reason - {reason} . Killing process {pid}"
                .format(tid = str(task_id), reason = reason, pid = pid))
                shell.kill_process_with_children(pid)
            else:
                logger.warn("Unable to find pid by taskId = %s" % task_id)

    def runCommand(self,
                   command,
                   tmpoutfile,
                   tmperrfile,
                   forced_command_name=None,
                   override_output_files=True):
        """
    forced_command_name may be specified manually. In this case, value, defined at
    command json, is ignored.
    """
        try:
            script_type = command['commandParams']['script_type']
            script = command['commandParams']['script']
            timeout = int(command['commandParams']['command_timeout'])

            if 'hostLevelParams' in command and 'jdk_location' in command[
                    'hostLevelParams']:
                server_url_prefix = command['hostLevelParams']['jdk_location']
            else:
                server_url_prefix = command['commandParams']['jdk_location']
            task_id = "status"
            try:
                task_id = command['taskId']
                command_name = command['roleCommand']
            except KeyError:
                pass  # Status commands have no taskId

            if forced_command_name is not None:  # If not supplied as an argument
                command_name = forced_command_name

            if command_name == self.CUSTOM_ACTION_COMMAND:
                base_dir = self.file_cache.get_custom_actions_base_dir(
                    server_url_prefix)
                script_tuple = (os.path.join(base_dir, script), base_dir)
                hook_dir = None
            else:
                if command_name == self.CUSTOM_COMMAND_COMMAND:
                    command_name = command['hostLevelParams']['custom_command']
                hook_dir = self.file_cache.get_hook_base_dir(
                    command, server_url_prefix)
                base_dir = self.file_cache.get_service_base_dir(
                    command, server_url_prefix)
                script_path = self.resolve_script_path(base_dir, script,
                                                       script_type)
                script_tuple = (script_path, base_dir)

            tmpstrucoutfile = os.path.join(
                self.tmp_dir, "structured-out-{0}.json".format(task_id))

            if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
                # We don't support anything else yet
                message = "Unknown script type {0}".format(script_type)
                raise AgentException(message)
            # Execute command using proper interpreter
            handle = None
            if (command.has_key('__handle')):
                handle = command['__handle']
                handle.on_background_command_started = self.map_task_to_process
                del command['__handle']

            json_path = self.dump_command_to_json(command)
            pre_hook_tuple = self.resolve_hook_script_path(
                hook_dir, self.PRE_HOOK_PREFIX, command_name, script_type)
            post_hook_tuple = self.resolve_hook_script_path(
                hook_dir, self.POST_HOOK_PREFIX, command_name, script_type)
            py_file_list = [pre_hook_tuple, script_tuple, post_hook_tuple]
            # filter None values
            filtered_py_file_list = [i for i in py_file_list if i]

            logger_level = logging.getLevelName(logger.level)

            # Executing hooks and script
            ret = None
            from ActionQueue import ActionQueue
            if (command.has_key('commandType') and command['commandType']
                    == ActionQueue.BACKGROUND_EXECUTION_COMMAND
                    and len(filtered_py_file_list) > 1):
                raise AgentException(
                    "Background commands are supported without hooks only")

            for py_file, current_base_dir in filtered_py_file_list:
                script_params = [command_name, json_path, current_base_dir]
                ret = self.python_executor.run_file(py_file,
                                                    script_params,
                                                    self.exec_tmp_dir,
                                                    tmpoutfile,
                                                    tmperrfile,
                                                    timeout,
                                                    tmpstrucoutfile,
                                                    logger_level,
                                                    self.map_task_to_process,
                                                    task_id,
                                                    override_output_files,
                                                    handle=handle)
                # Next run_file() invocations should always append to current output
                override_output_files = False
                if ret['exitcode'] != 0:
                    break

            if not ret:  # Something went wrong
                raise AgentException("No script has been executed")

            # if canceled and not background command
            if handle is None:
                cancel_reason = self.command_canceled_reason(task_id)
                if cancel_reason:
                    ret['stdout'] += cancel_reason
                    ret['stderr'] += cancel_reason

                    with open(tmpoutfile, "a") as f:
                        f.write(cancel_reason)
                    with open(tmperrfile, "a") as f:
                        f.write(cancel_reason)

        except Exception:  # We do not want to let agent fail completely
            exc_type, exc_obj, exc_tb = sys.exc_info()
            message = "Catched an exception while executing "\
              "custom service command: {0}: {1}".format(exc_type, exc_obj)
            logger.exception(message)
            ret = {
                'stdout': message,
                'stderr': message,
                'structuredOut': '{}',
                'exitcode': 1,
            }
        return ret

    def command_canceled_reason(self, task_id):
        with self.commands_in_progress_lock:
            if self.commands_in_progress.has_key(
                    task_id
            ):  #Background command do not push in this collection (TODO)
                logger.debug('Pop with taskId %s' % task_id)
                pid = self.commands_in_progress.pop(task_id)
                if not isinstance(pid, int):
                    return '\nCommand aborted. ' + pid
        return None

    def requestComponentStatus(self, command):
        """
     Component status is determined by exit code, returned by runCommand().
     Exit code 0 means that component is running and any other exit code means that
     component is not running
    """
        override_output_files = True  # by default, we override status command output
        if logger.level == logging.DEBUG:
            override_output_files = False
        res = self.runCommand(command,
                              self.status_commands_stdout,
                              self.status_commands_stderr,
                              self.COMMAND_NAME_STATUS,
                              override_output_files=override_output_files)
        return res

    def resolve_script_path(self, base_dir, script, script_type):
        """
    Incapsulates logic of script location determination.
    """
        path = os.path.join(base_dir, script)
        if not os.path.exists(path):
            message = "Script {0} does not exist".format(path)
            raise AgentException(message)
        return path

    def resolve_hook_script_path(self, stack_hooks_dir, prefix, command_name,
                                 script_type):
        """
    Returns a tuple(path to hook script, hook base dir) according to string prefix
    and command name. If script does not exist, returns None
    """
        if not stack_hooks_dir:
            return None
        hook_dir = "{0}-{1}".format(prefix, command_name)
        hook_base_dir = os.path.join(stack_hooks_dir, hook_dir)
        hook_script_path = os.path.join(hook_base_dir, "scripts", "hook.py")
        if not os.path.isfile(hook_script_path):
            logger.debug(
                "Hook script {0} not found, skipping".format(hook_script_path))
            return None
        return hook_script_path, hook_base_dir

    def dump_command_to_json(self, command):
        """
    Converts command to json file and returns file path
    """
        # Perform few modifications to stay compatible with the way in which
        public_fqdn = self.public_fqdn
        command['public_hostname'] = public_fqdn
        # Now, dump the json file
        command_type = command['commandType']
        from ActionQueue import ActionQueue  # To avoid cyclic dependency
        if command_type == ActionQueue.STATUS_COMMAND:
            # These files are frequently created, thats why we don't
            # store them all, but only the latest one
            file_path = os.path.join(self.tmp_dir, "status_command.json")
        else:
            task_id = command['taskId']
            if 'clusterHostInfo' in command and command['clusterHostInfo']:
                command['clusterHostInfo'] = self.decompressClusterHostInfo(
                    command['clusterHostInfo'])
            file_path = os.path.join(self.tmp_dir,
                                     "command-{0}.json".format(task_id))
        # Json may contain passwords, that's why we need proper permissions
        if os.path.isfile(file_path):
            os.unlink(file_path)
        with os.fdopen(os.open(file_path, os.O_WRONLY | os.O_CREAT, 0600),
                       'w') as f:
            content = json.dumps(command, sort_keys=False, indent=4)
            f.write(content)
        return file_path
class CustomServiceOrchestrator():
  """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

  SCRIPT_TYPE_PYTHON = "PYTHON"
  COMMAND_NAME_STATUS = "STATUS"
  COMMAND_NAME_SECURITY_STATUS = "SECURITY_STATUS"
  CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
  CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'

  PRE_HOOK_PREFIX="before"
  POST_HOOK_PREFIX="after"

  HOSTS_LIST_KEY = "all_hosts"
  PING_PORTS_KEY = "all_ping_ports"
  RACKS_KEY = "all_racks"
  IPV4_ADDRESSES_KEY = "all_ipv4_ips"

  AMBARI_SERVER_HOST = "ambari_server_host"
  DONT_DEBUG_FAILURES_FOR_COMMANDS = [COMMAND_NAME_SECURITY_STATUS, COMMAND_NAME_STATUS]
  REFLECTIVELY_RUN_COMMANDS = [COMMAND_NAME_SECURITY_STATUS, COMMAND_NAME_STATUS] # -- commands which run a lot and often (this increases their speed)

  def __init__(self, config, controller):
    self.config = config
    self.tmp_dir = config.get('agent', 'prefix')
    self.exec_tmp_dir = Constants.AGENT_TMP_DIR
    self.file_cache = FileCache(config)
    self.status_commands_stdout = os.path.join(self.tmp_dir,
                                               'status_command_stdout.txt')
    self.status_commands_stderr = os.path.join(self.tmp_dir,
                                               'status_command_stderr.txt')
    self.public_fqdn = hostname.public_hostname(config)
    # cache reset will be called on every agent registration
    controller.registration_listeners.append(self.file_cache.reset)

    # Clean up old status command files if any
    try:
      os.unlink(self.status_commands_stdout)
      os.unlink(self.status_commands_stderr)
    except OSError:
      pass # Ignore fail
    self.commands_in_progress_lock = threading.RLock()
    self.commands_in_progress = {}

  def map_task_to_process(self, task_id, processId):
    with self.commands_in_progress_lock:
      logger.debug('Maps taskId=%s to pid=%s' % (task_id, processId))
      self.commands_in_progress[task_id] = processId

  def cancel_command(self, task_id, reason):
    with self.commands_in_progress_lock:
      if task_id in self.commands_in_progress.keys():
        pid = self.commands_in_progress.get(task_id)
        self.commands_in_progress[task_id] = reason
        logger.info("Canceling command with task_id - {tid}, " \
                    "reason - {reason} . Killing process {pid}"
                    .format(tid=str(task_id), reason=reason, pid=pid))
        shell.kill_process_with_children(pid)
      else: 
        logger.warn("Unable to find pid by taskId = %s" % task_id)

  def get_py_executor(self, forced_command_name):
    """
    Wrapper for unit testing
    :return:
    """
    if forced_command_name in self.REFLECTIVELY_RUN_COMMANDS:
      return PythonReflectiveExecutor(self.tmp_dir, self.config)
    else:
      return PythonExecutor(self.tmp_dir, self.config)

  def runCommand(self, command, tmpoutfile, tmperrfile, forced_command_name=None,
                 override_output_files=True, retry=False):
    """
    forced_command_name may be specified manually. In this case, value, defined at
    command json, is ignored.
    """
    try:
      script_type = command['commandParams']['script_type']
      script = command['commandParams']['script']
      timeout = int(command['commandParams']['command_timeout'])

      if 'hostLevelParams' in command and 'jdk_location' in command['hostLevelParams']:
        server_url_prefix = command['hostLevelParams']['jdk_location']
      else:
        server_url_prefix = command['commandParams']['jdk_location']
        
      task_id = "status"
      
      try:
        task_id = command['taskId']
        command_name = command['roleCommand']
      except KeyError:
        pass  # Status commands have no taskId

      if forced_command_name is not None:  # If not supplied as an argument
        command_name = forced_command_name

      if command_name == self.CUSTOM_ACTION_COMMAND:
        base_dir = self.file_cache.get_custom_actions_base_dir(server_url_prefix)
        script_tuple = (os.path.join(base_dir, 'scripts', script), base_dir)
        hook_dir = None
      else:
        if command_name == self.CUSTOM_COMMAND_COMMAND:
          command_name = command['hostLevelParams']['custom_command']

        # forces a hash challenge on the directories to keep them updated, even
        # if the return type is not used
        self.file_cache.get_host_scripts_base_dir(server_url_prefix)          
        hook_dir = self.file_cache.get_hook_base_dir(command, server_url_prefix)
        base_dir = self.file_cache.get_service_base_dir(command, server_url_prefix)
        
        script_path = self.resolve_script_path(base_dir, script)
        script_tuple = (script_path, base_dir)

      tmpstrucoutfile = os.path.join(self.tmp_dir,
                                    "structured-out-{0}.json".format(task_id))

      # We don't support anything else yet
      if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
        message = "Unknown script type {0}".format(script_type)
        raise AgentException(message)

      # Execute command using proper interpreter
      handle = None
      if command.has_key('__handle'):
        handle = command['__handle']
        handle.on_background_command_started = self.map_task_to_process
        del command['__handle']

      json_path = self.dump_command_to_json(command, retry)
      pre_hook_tuple = self.resolve_hook_script_path(hook_dir,
          self.PRE_HOOK_PREFIX, command_name, script_type)
      post_hook_tuple = self.resolve_hook_script_path(hook_dir,
          self.POST_HOOK_PREFIX, command_name, script_type)
      py_file_list = [pre_hook_tuple, script_tuple, post_hook_tuple]
      # filter None values
      filtered_py_file_list = [i for i in py_file_list if i]

      logger_level = logging.getLevelName(logger.level)

      # Executing hooks and script
      ret = None
      from ActionQueue import ActionQueue
      if command.has_key('commandType') and command['commandType'] == ActionQueue.BACKGROUND_EXECUTION_COMMAND and len(filtered_py_file_list) > 1:
        raise AgentException("Background commands are supported without hooks only")

      python_executor = self.get_py_executor(forced_command_name)
      for py_file, current_base_dir in filtered_py_file_list:
        log_info_on_failure = not command_name in self.DONT_DEBUG_FAILURES_FOR_COMMANDS
        script_params = [command_name, json_path, current_base_dir, tmpstrucoutfile, logger_level, self.exec_tmp_dir]
        ret = python_executor.run_file(py_file, script_params,
                               tmpoutfile, tmperrfile, timeout,
                               tmpstrucoutfile, self.map_task_to_process,
                               task_id, override_output_files, handle = handle, log_info_on_failure=log_info_on_failure)
        # Next run_file() invocations should always append to current output
        override_output_files = False
        if ret['exitcode'] != 0:
          break

      if not ret: # Something went wrong
        raise AgentException("No script has been executed")

      # if canceled and not background command
      if handle is None:
        cancel_reason = self.command_canceled_reason(task_id)
        if cancel_reason:
          ret['stdout'] += cancel_reason
          ret['stderr'] += cancel_reason

          with open(tmpoutfile, "a") as f:
            f.write(cancel_reason)
          with open(tmperrfile, "a") as f:
            f.write(cancel_reason)

    except Exception, e: # We do not want to let agent fail completely
      exc_type, exc_obj, exc_tb = sys.exc_info()
      message = "Caught an exception while executing "\
        "custom service command: {0}: {1}; {2}".format(exc_type, exc_obj, str(e))
      logger.exception(message)
      ret = {
        'stdout' : message,
        'stderr' : message,
        'structuredOut' : '{}',
        'exitcode': 1,
      }
    return ret
Ejemplo n.º 44
0
class CustomServiceOrchestrator():
    """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

    SCRIPT_TYPE_PYTHON = "PYTHON"
    COMMAND_NAME_STATUS = "STATUS"
    COMMAND_NAME_SECURITY_STATUS = "SECURITY_STATUS"
    CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
    CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'

    PRE_HOOK_PREFIX = "before"
    POST_HOOK_PREFIX = "after"

    HOSTS_LIST_KEY = "all_hosts"
    PING_PORTS_KEY = "all_ping_ports"
    AMBARI_SERVER_HOST = "ambari_server_host"

    def __init__(self, config, controller):
        self.config = config
        self.tmp_dir = config.get('agent', 'prefix')
        self.exec_tmp_dir = config.get('agent', 'tmp_dir')
        self.file_cache = FileCache(config)
        self.python_executor = PythonExecutor(self.tmp_dir, config)
        self.status_commands_stdout = os.path.join(
            self.tmp_dir, 'status_command_stdout.txt')
        self.status_commands_stderr = os.path.join(
            self.tmp_dir, 'status_command_stderr.txt')
        self.public_fqdn = hostname.public_hostname(config)
        # cache reset will be called on every agent registration
        controller.registration_listeners.append(self.file_cache.reset)

        # Clean up old status command files if any
        try:
            os.unlink(self.status_commands_stdout)
            os.unlink(self.status_commands_stderr)
        except OSError:
            pass  # Ignore fail
        self.commands_in_progress_lock = threading.RLock()
        self.commands_in_progress = {}

    def map_task_to_process(self, task_id, processId):
        with self.commands_in_progress_lock:
            logger.debug('Maps taskId=%s to pid=%s' % (task_id, processId))
            self.commands_in_progress[task_id] = processId

    def cancel_command(self, task_id, reason):
        with self.commands_in_progress_lock:
            if task_id in self.commands_in_progress.keys():
                pid = self.commands_in_progress.get(task_id)
                self.commands_in_progress[task_id] = reason
                logger.info("Canceling command with task_id - {tid}, " \
                            "reason - {reason} . Killing process {pid}"
                            .format(tid=str(task_id), reason=reason, pid=pid))
                shell.kill_process_with_children(pid)
            else:
                logger.warn("Unable to find pid by taskId = %s" % task_id)

    def runCommand(self,
                   command,
                   tmpoutfile,
                   tmperrfile,
                   forced_command_name=None,
                   override_output_files=True):
        """
    forced_command_name may be specified manually. In this case, value, defined at
    command json, is ignored.
    """
        try:
            script_type = command['commandParams']['script_type']
            script = command['commandParams']['script']
            timeout = int(command['commandParams']['command_timeout'])

            if 'hostLevelParams' in command and 'jdk_location' in command[
                    'hostLevelParams']:
                server_url_prefix = command['hostLevelParams']['jdk_location']
            else:
                server_url_prefix = command['commandParams']['jdk_location']

            task_id = "status"

            try:
                task_id = command['taskId']
                command_name = command['roleCommand']
            except KeyError:
                pass  # Status commands have no taskId

            if forced_command_name is not None:  # If not supplied as an argument
                command_name = forced_command_name

            if command_name == self.CUSTOM_ACTION_COMMAND:
                base_dir = self.file_cache.get_custom_actions_base_dir(
                    server_url_prefix)
                script_tuple = (os.path.join(base_dir, 'scripts',
                                             script), base_dir)
                hook_dir = None
            else:
                if command_name == self.CUSTOM_COMMAND_COMMAND:
                    command_name = command['hostLevelParams']['custom_command']

                # forces a hash challenge on the directories to keep them updated, even
                # if the return type is not used
                self.file_cache.get_host_scripts_base_dir(server_url_prefix)
                hook_dir = self.file_cache.get_hook_base_dir(
                    command, server_url_prefix)
                base_dir = self.file_cache.get_service_base_dir(
                    command, server_url_prefix)

                script_path = self.resolve_script_path(base_dir, script)
                script_tuple = (script_path, base_dir)

            tmpstrucoutfile = os.path.join(
                self.tmp_dir, "structured-out-{0}.json".format(task_id))

            # We don't support anything else yet
            if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
                message = "Unknown script type {0}".format(script_type)
                raise AgentException(message)

            # Execute command using proper interpreter
            handle = None
            if command.has_key('__handle'):
                handle = command['__handle']
                handle.on_background_command_started = self.map_task_to_process
                del command['__handle']

            json_path = self.dump_command_to_json(command)
            pre_hook_tuple = self.resolve_hook_script_path(
                hook_dir, self.PRE_HOOK_PREFIX, command_name, script_type)
            post_hook_tuple = self.resolve_hook_script_path(
                hook_dir, self.POST_HOOK_PREFIX, command_name, script_type)
            py_file_list = [pre_hook_tuple, script_tuple, post_hook_tuple]
            # filter None values
            filtered_py_file_list = [i for i in py_file_list if i]

            logger_level = logging.getLevelName(logger.level)

            # Executing hooks and script
            ret = None
            from ActionQueue import ActionQueue
            if command.has_key('commandType') and command[
                    'commandType'] == ActionQueue.BACKGROUND_EXECUTION_COMMAND and len(
                        filtered_py_file_list) > 1:
                raise AgentException(
                    "Background commands are supported without hooks only")

            for py_file, current_base_dir in filtered_py_file_list:
                script_params = [command_name, json_path, current_base_dir]
                ret = self.python_executor.run_file(py_file,
                                                    script_params,
                                                    self.exec_tmp_dir,
                                                    tmpoutfile,
                                                    tmperrfile,
                                                    timeout,
                                                    tmpstrucoutfile,
                                                    logger_level,
                                                    self.map_task_to_process,
                                                    task_id,
                                                    override_output_files,
                                                    handle=handle)
                # Next run_file() invocations should always append to current output
                override_output_files = False
                if ret['exitcode'] != 0:
                    break

            if not ret:  # Something went wrong
                raise AgentException("No script has been executed")

            # if canceled and not background command
            if handle is None:
                cancel_reason = self.command_canceled_reason(task_id)
                if cancel_reason:
                    ret['stdout'] += cancel_reason
                    ret['stderr'] += cancel_reason

                    with open(tmpoutfile, "a") as f:
                        f.write(cancel_reason)
                    with open(tmperrfile, "a") as f:
                        f.write(cancel_reason)

        except Exception, e:  # We do not want to let agent fail completely
            exc_type, exc_obj, exc_tb = sys.exc_info()
            message = "Caught an exception while executing "\
              "custom service command: {0}: {1}; {2}".format(exc_type, exc_obj, str(e))
            logger.exception(message)
            ret = {
                'stdout': message,
                'stderr': message,
                'structuredOut': '{}',
                'exitcode': 1,
            }
        return ret
Ejemplo n.º 45
0
 def include_dependent_definition_headers(file_generator: FileGenerator,
                                          file_cache: FileCache):
     file_generator.include_user_header(
         file_cache.check_and_throw_exception_header())
Ejemplo n.º 46
0
class CustomServiceOrchestrator():
    """
  Executes a command for custom service. stdout and stderr are written to
  tmpoutfile and to tmperrfile respectively.
  """

    SCRIPT_TYPE_PYTHON = "PYTHON"
    COMMAND_TYPE = "commandType"
    COMMAND_NAME_STATUS = "STATUS"
    COMMAND_NAME_SECURITY_STATUS = "SECURITY_STATUS"
    CUSTOM_ACTION_COMMAND = 'ACTIONEXECUTE'
    CUSTOM_COMMAND_COMMAND = 'CUSTOM_COMMAND'

    PRE_HOOK_PREFIX = "before"
    POST_HOOK_PREFIX = "after"

    HOSTS_LIST_KEY = "all_hosts"
    PING_PORTS_KEY = "all_ping_ports"
    RACKS_KEY = "all_racks"
    IPV4_ADDRESSES_KEY = "all_ipv4_ips"

    AMBARI_SERVER_HOST = "ambari_server_host"
    AMBARI_SERVER_PORT = "ambari_server_port"
    AMBARI_SERVER_USE_SSL = "ambari_server_use_ssl"

    FREQUENT_COMMANDS = [COMMAND_NAME_SECURITY_STATUS, COMMAND_NAME_STATUS]
    DONT_DEBUG_FAILURES_FOR_COMMANDS = FREQUENT_COMMANDS
    REFLECTIVELY_RUN_COMMANDS = FREQUENT_COMMANDS  # -- commands which run a lot and often (this increases their speed)
    DONT_BACKUP_LOGS_FOR_COMMANDS = FREQUENT_COMMANDS

    # Path where hadoop credential JARS will be available
    DEFAULT_CREDENTIAL_SHELL_LIB_PATH = '/var/lib/ambari-agent/cred/lib'
    DEFAULT_CREDENTIAL_CONF_DIR = '/var/lib/ambari-agent/cred/conf'
    DEFAULT_CREDENTIAL_SHELL_CMD = 'org.apache.hadoop.security.alias.CredentialShell'

    # The property name used by the hadoop credential provider
    CREDENTIAL_PROVIDER_PROPERTY_NAME = 'hadoop.security.credential.provider.path'

    def __init__(self, config, controller):
        self.config = config
        self.tmp_dir = config.get('agent', 'prefix')
        self.force_https_protocol = config.get_force_https_protocol()
        self.exec_tmp_dir = Constants.AGENT_TMP_DIR
        self.file_cache = FileCache(config)
        self.status_commands_stdout = os.path.join(
            self.tmp_dir, 'status_command_stdout.txt')
        self.status_commands_stderr = os.path.join(
            self.tmp_dir, 'status_command_stderr.txt')
        self.public_fqdn = hostname.public_hostname(config)
        # cache reset will be called on every agent registration
        controller.registration_listeners.append(self.file_cache.reset)

        # Construct the hadoop credential lib JARs path
        self.credential_shell_lib_path = os.path.join(
            config.get('security', 'credential_lib_dir',
                       self.DEFAULT_CREDENTIAL_SHELL_LIB_PATH), '*')

        self.credential_conf_dir = config.get('security',
                                              'credential_conf_dir',
                                              self.DEFAULT_CREDENTIAL_CONF_DIR)

        self.credential_shell_cmd = config.get(
            'security', 'credential_shell_cmd',
            self.DEFAULT_CREDENTIAL_SHELL_CMD)

        # Clean up old status command files if any
        try:
            os.unlink(self.status_commands_stdout)
            os.unlink(self.status_commands_stderr)
        except OSError:
            pass  # Ignore fail
        self.commands_in_progress_lock = threading.RLock()
        self.commands_in_progress = {}

    def map_task_to_process(self, task_id, processId):
        with self.commands_in_progress_lock:
            logger.debug('Maps taskId=%s to pid=%s' % (task_id, processId))
            self.commands_in_progress[task_id] = processId

    def cancel_command(self, task_id, reason):
        with self.commands_in_progress_lock:
            if task_id in self.commands_in_progress.keys():
                pid = self.commands_in_progress.get(task_id)
                self.commands_in_progress[task_id] = reason
                logger.info("Canceling command with taskId = {tid}, " \
                            "reason - {reason} . Killing process {pid}"
                            .format(tid=str(task_id), reason=reason, pid=pid))
                shell.kill_process_with_children(pid)
            else:
                logger.warn(
                    "Unable to find process associated with taskId = %s" %
                    task_id)

    def get_py_executor(self, forced_command_name):
        """
    Wrapper for unit testing
    :return:
    """
        if forced_command_name in self.REFLECTIVELY_RUN_COMMANDS:
            return PythonReflectiveExecutor(self.tmp_dir, self.config)
        else:
            return PythonExecutor(self.tmp_dir, self.config)

    def getProviderDirectory(self, service_name):
        """
    Gets the path to the service conf folder where the JCEKS file will be created.

    :param service_name: Name of the service, for example, HIVE
    :return: lower case path to the service conf folder
    """

        # The stack definition scripts of the service can move the
        # JCEKS file around to where it wants, which is usually
        # /etc/<service_name>/conf

        conf_dir = os.path.join(self.credential_conf_dir, service_name.lower())
        if not os.path.exists(conf_dir):
            os.makedirs(conf_dir, 0644)

        return conf_dir

    def getConfigTypeCredentials(self, commandJson):
        """
    Gets the affected config types for the service in this command
    with the password aliases and values.

    Input:
    {
        "config-type1" : {
          "password_key_name1":"password_value_name1",
          "password_key_name2":"password_value_name2",
            :
        },
        "config-type2" : {
          "password_key_name1":"password_value_name1",
          "password_key_name2":"password_value_name2",
            :
        },
           :
    }

    Output:
    {
        "config-type1" : {
          "alias1":"password1",
          "alias2":"password2",
            :
        },
        "config-type2" : {
          "alias1":"password1",
          "alias2":"password2",
            :
        },
           :
    }

    If password_key_name is the same as password_value_name, then password_key_name is the password alias itself.
    The value it points to is the password value.

    If password_key_name is not the same as the password_value_name, then password_key_name points to the alias.
    The value is pointed to by password_value_name.

    For example:
    Input:
    {
      "oozie-site" : {"oozie.service.JPAService.jdbc.password" : "oozie.service.JPAService.jdbc.password"},
      "admin-properties" {"db_user":"******", "ranger.jpa.jdbc.credential.alias:ranger-admin-site" : "db_password"}
    }

    Output:
    {
      "oozie-site" : {"oozie.service.JPAService.jdbc.password" : "MyOozieJdbcPassword"},
      "admin-properties" {"rangerdba" : "MyRangerDbaPassword", "rangeradmin":"MyRangerDbaPassword"},
    }

    :param commandJson:
    :return:
    """
        configtype_credentials = {}
        if 'configuration_credentials' in commandJson:
            for config_type, password_properties in commandJson[
                    'configuration_credentials'].items():
                if config_type in commandJson['configurations']:
                    value_names = []
                    config = commandJson['configurations'][config_type]
                    credentials = {}
                    for key_name, value_name in password_properties.items():
                        if key_name == value_name:
                            if value_name in config:
                                # password name is the alias
                                credentials[key_name] = config[value_name]
                                value_names.append(
                                    value_name
                                )  # Gather the value_name for deletion
                        else:
                            keyname_keyconfig = key_name.split(':')
                            key_name = keyname_keyconfig[0]
                            # if the key is in another configuration (cross reference),
                            # get the value of the key from that configuration
                            if (len(keyname_keyconfig) > 1):
                                if keyname_keyconfig[1] not in commandJson[
                                        'configurations']:
                                    continue
                                key_config = commandJson['configurations'][
                                    keyname_keyconfig[1]]
                            else:
                                key_config = config
                            if key_name in key_config and value_name in config:
                                # password name points to the alias
                                credentials[
                                    key_config[key_name]] = config[value_name]
                                value_names.append(
                                    value_name
                                )  # Gather the value_name for deletion
                    if len(credentials) > 0:
                        configtype_credentials[config_type] = credentials
                    for value_name in value_names:
                        # Remove the clear text password
                        config.pop(value_name, None)
        return configtype_credentials

    def generateJceks(self, commandJson):
        """
    Generates the JCEKS file with passwords for the service specified in commandJson

    :param commandJson: command JSON
    :return: An exit value from the external process that generated the JCEKS file. None if
    there are no passwords in the JSON.
    """
        cmd_result = None
        roleCommand = None
        if 'roleCommand' in commandJson:
            roleCommand = commandJson['roleCommand']

        logger.info('generateJceks: roleCommand={0}'.format(roleCommand))

        # Set up the variables for the external command to generate a JCEKS file
        java_home = commandJson['hostLevelParams']['java_home']
        java_bin = '{java_home}/bin/java'.format(java_home=java_home)

        cs_lib_path = self.credential_shell_lib_path
        serviceName = commandJson['serviceName']

        # Gather the password values and remove them from the configuration
        provider_paths = []  # A service may depend on multiple configs
        configtype_credentials = self.getConfigTypeCredentials(commandJson)
        for config_type, credentials in configtype_credentials.items():
            config = commandJson['configurations'][config_type]
            file_path = os.path.join(self.getProviderDirectory(serviceName),
                                     "{0}.jceks".format(config_type))
            if os.path.exists(file_path):
                os.remove(file_path)
            provider_path = 'jceks://file{file_path}'.format(
                file_path=file_path)
            provider_paths.append(provider_path)
            logger.info('provider_path={0}'.format(provider_path))
            for alias, pwd in credentials.items():
                logger.debug("config={0}".format(config))
                protected_pwd = PasswordString(pwd)
                # Generate the JCEKS file
                cmd = (java_bin, '-cp', cs_lib_path, self.credential_shell_cmd,
                       'create', alias, '-value', protected_pwd, '-provider',
                       provider_path)
                logger.info(cmd)
                cmd_result = subprocess.call(cmd)
                logger.info('cmd_result = {0}'.format(cmd_result))
                os.chmod(
                    file_path, 0644
                )  # group and others should have read access so that the service user can read

        if provider_paths:
            # Add JCEKS provider paths instead
            config[self.CREDENTIAL_PROVIDER_PROPERTY_NAME] = ','.join(
                provider_paths)

        return cmd_result

    def runCommand(self,
                   command,
                   tmpoutfile,
                   tmperrfile,
                   forced_command_name=None,
                   override_output_files=True,
                   retry=False):
        """
    forced_command_name may be specified manually. In this case, value, defined at
    command json, is ignored.
    """
        try:
            script_type = command['commandParams']['script_type']
            script = command['commandParams']['script']
            timeout = int(command['commandParams']['command_timeout'])

            if 'hostLevelParams' in command and 'jdk_location' in command[
                    'hostLevelParams']:
                server_url_prefix = command['hostLevelParams']['jdk_location']
            else:
                server_url_prefix = command['commandParams']['jdk_location']

            task_id = "status"

            try:
                task_id = command['taskId']
                command_name = command['roleCommand']
            except KeyError:
                pass  # Status commands have no taskId

            if forced_command_name is not None:  # If not supplied as an argument
                command_name = forced_command_name

            if command_name == self.CUSTOM_ACTION_COMMAND:
                base_dir = self.file_cache.get_custom_actions_base_dir(
                    server_url_prefix)
                script_tuple = (os.path.join(base_dir, 'scripts',
                                             script), base_dir)
                hook_dir = None
            else:
                if command_name == self.CUSTOM_COMMAND_COMMAND:
                    command_name = command['hostLevelParams']['custom_command']

                # forces a hash challenge on the directories to keep them updated, even
                # if the return type is not used
                self.file_cache.get_host_scripts_base_dir(server_url_prefix)
                hook_dir = self.file_cache.get_hook_base_dir(
                    command, server_url_prefix)
                base_dir = self.file_cache.get_service_base_dir(
                    command, server_url_prefix)
                self.file_cache.get_custom_resources_subdir(
                    command, server_url_prefix)

                script_path = self.resolve_script_path(base_dir, script)
                script_tuple = (script_path, base_dir)

            tmpstrucoutfile = os.path.join(
                self.tmp_dir, "structured-out-{0}.json".format(task_id))

            # We don't support anything else yet
            if script_type.upper() != self.SCRIPT_TYPE_PYTHON:
                message = "Unknown script type {0}".format(script_type)
                raise AgentException(message)

            # Execute command using proper interpreter
            handle = None
            if command.has_key('__handle'):
                handle = command['__handle']
                handle.on_background_command_started = self.map_task_to_process
                del command['__handle']

            # If command contains credentialStoreEnabled, then
            # generate the JCEKS file for the configurations.
            credentialStoreEnabled = False
            if 'credentialStoreEnabled' in command:
                credentialStoreEnabled = (
                    command['credentialStoreEnabled'] == "true")

            if credentialStoreEnabled == True:
                self.generateJceks(command)

            json_path = self.dump_command_to_json(command, retry)
            pre_hook_tuple = self.resolve_hook_script_path(
                hook_dir, self.PRE_HOOK_PREFIX, command_name, script_type)
            post_hook_tuple = self.resolve_hook_script_path(
                hook_dir, self.POST_HOOK_PREFIX, command_name, script_type)
            py_file_list = [pre_hook_tuple, script_tuple, post_hook_tuple]
            # filter None values
            filtered_py_file_list = [i for i in py_file_list if i]

            logger_level = logging.getLevelName(logger.level)

            # Executing hooks and script
            ret = None
            from ActionQueue import ActionQueue
            if command.has_key('commandType') and command[
                    'commandType'] == ActionQueue.BACKGROUND_EXECUTION_COMMAND and len(
                        filtered_py_file_list) > 1:
                raise AgentException(
                    "Background commands are supported without hooks only")

            python_executor = self.get_py_executor(forced_command_name)
            backup_log_files = not command_name in self.DONT_BACKUP_LOGS_FOR_COMMANDS
            log_out_files = self.config.get(
                "logging", "log_out_files", default="0") != "0"

            for py_file, current_base_dir in filtered_py_file_list:
                log_info_on_failure = not command_name in self.DONT_DEBUG_FAILURES_FOR_COMMANDS
                script_params = [
                    command_name, json_path, current_base_dir, tmpstrucoutfile,
                    logger_level, self.exec_tmp_dir, self.force_https_protocol
                ]

                if log_out_files:
                    script_params.append("-o")

                ret = python_executor.run_file(
                    py_file,
                    script_params,
                    tmpoutfile,
                    tmperrfile,
                    timeout,
                    tmpstrucoutfile,
                    self.map_task_to_process,
                    task_id,
                    override_output_files,
                    backup_log_files=backup_log_files,
                    handle=handle,
                    log_info_on_failure=log_info_on_failure)
                # Next run_file() invocations should always append to current output
                override_output_files = False
                if ret['exitcode'] != 0:
                    break

            if not ret:  # Something went wrong
                raise AgentException("No script has been executed")

            # if canceled and not background command
            if handle is None:
                cancel_reason = self.command_canceled_reason(task_id)
                if cancel_reason is not None:
                    ret['stdout'] += cancel_reason
                    ret['stderr'] += cancel_reason

                    with open(tmpoutfile, "a") as f:
                        f.write(cancel_reason)
                    with open(tmperrfile, "a") as f:
                        f.write(cancel_reason)

        except Exception, e:  # We do not want to let agent fail completely
            exc_type, exc_obj, exc_tb = sys.exc_info()
            message = "Caught an exception while executing "\
              "custom service command: {0}: {1}; {2}".format(exc_type, exc_obj, str(e))
            logger.exception(message)
            ret = {
                'stdout': message,
                'stderr': message,
                'structuredOut': '{}',
                'exitcode': 1,
            }
        return ret