示例#1
0
文件: hue.py 项目: madar010/mad
    def get_config_files(self, cluster_context, configs, instance=None):
        template = 'plugins/mapr/services/hue/resources/hue_%s.template'
        # hue.ini
        hue_ini = bcf.TemplateFile("hue.ini")
        hue_ini.remote_path = self.conf_dir(cluster_context)
        hue_ini.parse(files.get_file_text(template % self.version))
        hue_ini.add_properties(self._get_hue_ini_props(cluster_context))
        hue_ini.add_property("thrift_version",
                             configs[self.THRIFT_VERSION.name])

        # # hue.sh
        hue_sh_template = 'plugins/mapr/services/hue/' \
                          'resources/hue_sh_%s.template'
        hue_sh = bcf.TemplateFile("hue.sh")
        hue_sh.remote_path = self.home_dir(cluster_context) + '/bin'
        hue_sh.parse(files.get_file_text(hue_sh_template % self.version))
        hue_sh.add_property('hadoop_version', cluster_context.hadoop_version)
        hue_sh.mode = 777

        hue_instances = cluster_context.get_instances(HUE)
        for instance in hue_instances:
            if instance not in cluster_context.changed_instances():
                cluster_context.should_be_restarted[self] += [instance]

        return [hue_ini, hue_sh]
示例#2
0
 def test_render(self):
     foo = conf_f.TemplateFile('foo')
     expected = {'value1': 'value1', 'value2': 'value2'}
     foo.parse(self.content)
     foo._config_dict = expected
     actual = foo.render()
     self.assertEqual(self.rendered, actual)
示例#3
0
文件: impala.py 项目: madar010/mad
    def get_config_files(self, cluster_context, configs, instance=None):
        defaults = 'plugins/mapr/services/impala/resources/impala-env.sh.j2'

        impala_env = bcf.TemplateFile("env.sh")
        impala_env.remote_path = self.conf_dir(cluster_context)
        if instance:
            impala_env.fetch(instance)
        impala_env.parse(files.get_file_text(defaults))
        impala_env.add_properties(self._get_impala_env_props(cluster_context))
        sentry_host = cluster_context.get_instance(sentry.SENTRY)
        if sentry_host:
            sentry_mode = cluster_context._get_cluster_config_value(
                sentry.Sentry().SENTRY_STORAGE_MODE)
            ui_name = sentry.Sentry().ui_name
            sentry_version = cluster_context.get_chosen_service_version(
                ui_name)
            sentry_service = cluster_context. \
                _find_service_instance(ui_name, sentry_version)
            if sentry_service.supports(self, sentry_mode):
                impala_env.add_properties({
                    'sentry_home':
                    sentry_service.home_dir(cluster_context),
                    'sentry_db':
                    sentry_mode == sentry.DB_STORAGE_SENTRY_MODE,
                    'sentry_policy_file':
                    'maprfs://' + sentry_service.GLOBAL_POLICY_FILE,
                })
        return [impala_env]
示例#4
0
文件: spark.py 项目: madar010/mad
    def get_config_files(self, cluster_context, configs, instance=None):
        hbase_version = self._get_hbase_version(cluster_context)
        hive_version = self._get_hive_version(cluster_context)
        # spark-env-sh
        template = 'plugins/mapr/services/' \
                   'spark/resources/spark-env.template'
        env_sh = bcf.TemplateFile('spark-env.sh')
        env_sh.remote_path = self.conf_dir(cluster_context)
        env_sh.parse(files.get_file_text(template))
        env_sh.add_property('version', self.version)

        # spark-defaults
        conf = bcf.PropertiesFile('spark-defaults.conf', separator=' ')
        conf.remote_path = self.conf_dir(cluster_context)
        if instance:
            conf.fetch(instance)

        # compatibility.version
        versions = bcf.PropertiesFile('compatibility.version')
        versions.remote_path = self.home_dir(cluster_context) + '/mapr-util'
        if instance:
            versions.fetch(instance)

        if hive_version:
            versions.add_property('hive_versions', hive_version + '.0')
            conf.add_properties(self._hive_properties(cluster_context))
        if hbase_version:
            versions.add_property('hbase_versions', hbase_version)
            conf.add_property(
                'spark.executor.extraClassPath', '%s/lib/*' %
                self._hbase(cluster_context).home_dir(cluster_context))
        return [conf, versions, env_sh]
示例#5
0
文件: mysql.py 项目: uladz/sahara
 def _create_script_obj(filename, template, **kwargs):
     script = cf.TemplateFile(filename)
     script.remote_path = '/tmp/'
     script.parse(f.get_file_text(
         'plugins/mapr/services/mysql/resources/%s' % template))
     for k, v in six.iteritems(kwargs):
         script.add_property(k, v)
     return script
示例#6
0
    def get_config_files(self, cluster_context, configs, instance=None):
        defaults = 'plugins/mapr/services/impala/resources/impala-env.sh'

        impala_env = bcf.TemplateFile("env.sh")
        impala_env.remote_path = self.conf_dir(cluster_context)
        if instance:
            impala_env.fetch(instance)
        impala_env.parse(files.get_file_text(defaults))
        impala_env.add_properties(self._get_impala_env_props(cluster_context))

        return [impala_env]
示例#7
0
    def test_add_property(self):
        foo = conf_f.TemplateFile('foo')
        expected = {}
        self.assertDictEqual(expected, foo._config_dict)

        foo.add_property('key1', 'value1')
        expected = {'key1': 'value1'}
        self.assertDictEqual(expected, foo._config_dict)

        foo.add_property('key2', 'value2')
        expected = {'key1': 'value1', 'key2': 'value2'}
        self.assertDictEqual(expected, foo._config_dict)
示例#8
0
文件: hue.py 项目: madar010/mad
    def get_config_files(self, cluster_context, configs, instance=None):
        livy_conf_template = 'plugins/mapr/services/hue/' \
                             'resources/livy_conf_%s.template'
        livy_conf = bcf.TemplateFile("livy.conf")
        livy_conf.parse(files.get_file_text(livy_conf_template % self.version))
        livy_conf.remote_path = self.home_dir(cluster_context) + '/conf'

        livy_sh_template = 'plugins/mapr/services/hue/' \
                           'resources/livy_sh_%s.template'
        livy_sh = bcf.TemplateFile("livy-env.sh")
        livy_sh.remote_path = self.home_dir(cluster_context) + '/conf'
        livy_sh.parse(files.get_file_text(livy_sh_template % self.version))
        livy_sh.add_property('hadoop_version', cluster_context.hadoop_version)
        livy_sh.add_property('spark_version', spark.SparkOnYarnV201().version)
        livy_sh.mode = 777

        hue_instances = cluster_context.get_instances(HUE)
        for instance in hue_instances:
            if instance not in cluster_context.changed_instances():
                cluster_context.should_be_restarted[self] += [instance]

        return [livy_sh, livy_conf]
示例#9
0
文件: hue.py 项目: uladz/sahara
    def get_config_files(self, cluster_context, configs, instance=None):
        template = 'plugins/mapr/services/hue/resources/hue_%s.template'

        hue_ini = bcf.TemplateFile("hue.ini")
        hue_ini.remote_path = self.conf_dir(cluster_context)
        hue_ini.parse(files.get_file_text(template % self.version))
        hue_ini.add_properties(self._get_hue_ini_props(cluster_context))

        # TODO(aosadchyi): rewrite it
        hue_instances = cluster_context.get_instances(HUE)
        for instance in hue_instances:
            if instance not in cluster_context.changed_instances():
                cluster_context.should_be_restarted[self] += [instance]

        return [hue_ini]
示例#10
0
 def get_config_files(self, cluster_context, configs, instance=None):
     sentry_default =\
         'plugins/mapr/services/sentry/resources/sentry-default.xml'
     global_policy_template =\
         'plugins/mapr/services/sentry/resources/global-policy.ini'
     sentry_site = cf.HadoopXML('sentry-site.xml')
     sentry_site.remote_path = self.conf_dir(cluster_context)
     if instance:
         sentry_site.fetch(instance)
     sentry_site.load_properties(configs)
     sentry_mode = configs[self.SENTRY_STORAGE_MODE.name]
     sentry_site.parse(files.get_file_text(sentry_default))
     sentry_site.add_properties(
         self._get_sentry_site_props(cluster_context, sentry_mode))
     global_policy = cf.TemplateFile('global-policy.ini')
     global_policy.remote_path = self.conf_dir(cluster_context)
     global_policy.parse(files.get_file_text(global_policy_template))
     return [sentry_site, global_policy]
示例#11
0
文件: spark.py 项目: msionkin/sahara
    def get_config_files(self, cluster_context, configs, instance=None):
        hbase_version = self._get_hbase_version(cluster_context)
        hive_version = self._get_hive_version(cluster_context)
        # spark-env-sh
        template = 'plugins/mapr/services/' \
                   'spark/resources/spark-env.template'
        env_sh = bcf.TemplateFile('spark-env.sh')
        env_sh.remote_path = self.conf_dir(cluster_context)
        env_sh.parse(files.get_file_text(template))
        env_sh.add_property('version', self.version)
        env_sh.add_property('servlet_api_jar', self.SERVLET_JAR)

        # spark-defaults
        conf = bcf.PropertiesFile('spark-defaults.conf', separator=' ')
        conf.remote_path = self.conf_dir(cluster_context)
        if instance:
            conf.fetch(instance)
        conf.add_property(
            'spark.yarn.jar', 'maprfs://%s/%s' %
            (self.JAR_FILE_TARGET,
             self._assembly_jar_path(cluster_context).rsplit('/', 1)[1]))

        # compatibility.version
        versions = bcf.PropertiesFile('compatibility.version')
        versions.remote_path = self.home_dir(cluster_context) + '/mapr-util'
        if instance:
            versions.fetch(instance)

        if hive_version:
            versions.add_property('hive_versions',
                                  self._format_hive_version(hive_version))
            conf.add_properties(self._hive_properties(cluster_context))
        if hbase_version:
            versions.add_property('hbase_versions', hbase_version)
            conf.add_property(
                'spark.executor.extraClassPath', '%s/lib/*' %
                self._hbase(cluster_context).home_dir(cluster_context))
        return [conf, versions, env_sh]
示例#12
0
 def test_parse(self):
     foo = conf_f.TemplateFile('foo')
     foo.parse(self.content)
     self.assertIsNotNone(foo._template)
示例#13
0
 def test_remote_path(self):
     foo = conf_f.TemplateFile('foo')
     foo.remote_path = '/bar'
     self.assertEqual('/bar/foo', foo.remote_path)