def _build_configs_and_plugin(): c1 = p.Config('n-1', 'at-1', 'cluster') c2 = p.Config('n-2', 'at-2', 'cluster') c3 = p.Config('n-3', 'at-1', 'node') class TestPlugin(TestEmptyPlugin): def get_configs(self, hadoop_version): return [c1, c2, c3] return c1, c2, c3, TestPlugin()
def _initialize(self, config): for configuration in self.config['configurations']: for service_property in configuration['properties']: config = p.Config(service_property['name'], self._get_target( service_property['applicable_target']), service_property['scope'], config_type= service_property['config_type'], default_value=service_property ['default_value'], is_optional=service_property[ 'is_optional'], description=service_property[ 'description']) setattr(config, 'tag', configuration['tag'].rsplit(".", 1)[0]) self.config_items.append(config) #TODO(jspeidel): an assumption is made that property names # are unique across configuration sections which is dangerous property_name = service_property['name'] # if property already exists, throw an exception if property_name in self.config_mapper: # internal error # ambari-config-resource contains duplicates raise InvalidDataException( 'Internal Error. Duplicate property ' 'name detected: %s' % property_name) self.config_mapper[service_property['name']] = \ self._get_target( service_property['applicable_target'])
def _initialise_configs(): configs = [] for service, config_lists in XML_CONFS.iteritems(): for config_list in config_lists: for config in config_list: if config['name'] not in HIDDEN_CONFS: cfg = p.Config(config['name'], service, "cluster", is_optional=True, config_type="string", default_value=str(config['value']), description=config['description']) if config.get('type'): cfg.config_type = CFG_TYPE[config['type']] if cfg.config_type == 'bool': cfg.default_value = cfg.default_value == 'true' if cfg.config_type == 'int': if cfg.default_value: cfg.default_value = int(cfg.default_value) else: cfg.config_type = 'string' if config['name'] in PRIORITY_1_CONFS: cfg.priority = 1 configs.append(cfg) configs.append(IDH_TARBALL_URL) configs.append(IDH_REPO_URL) configs.append(OS_REPO_URL) configs.append(OOZIE_EXT22_URL) configs.append(ENABLE_SWIFT) return configs
def _initialise_configs(): configs = [] for service, config_lists in XML_CONFS.iteritems(): for config_list in config_lists: for config in config_list: if config['name'] not in HIDDEN_CONFS: cfg = p.Config(config['name'], service, "node", is_optional=True, config_type="string", default_value=str(config['value']), description=config['description']) if cfg.default_value in ["true", "false"]: cfg.config_type = "bool" cfg.default_value = (cfg.default_value == 'true') elif types.is_int(cfg.default_value): cfg.config_type = "int" cfg.default_value = int(cfg.default_value) if config['name'] in CLUSTER_WIDE_CONFS: cfg.scope = 'cluster' if config['name'] in PRIORITY_1_CONFS: cfg.priority = 1 configs.append(cfg) for service, config_items in ENV_CONFS.iteritems(): for name, param_format_str in config_items.iteritems(): configs.append( p.Config(name, service, "node", default_value=1024, priority=1, config_type="int")) configs.append(ENABLE_SWIFT) configs.append(ENABLE_MYSQL) if CONF.enable_data_locality: configs.append(ENABLE_DATA_LOCALITY) return configs
'Job Tracker Heap Size': 'HADOOP_JOBTRACKER_OPTS=\\"-Xmx%sm\\"', 'Task Tracker Heap Size': 'HADOOP_TASKTRACKER_OPTS=\\"-Xmx%sm\\"' }, "HDFS": { 'Name Node Heap Size': 'HADOOP_NAMENODE_OPTS=\\"-Xmx%sm\\"', 'Data Node Heap Size': 'HADOOP_DATANODE_OPTS=\\"-Xmx%sm\\"' }, "JobFlow": { 'Oozie Heap Size': 'CATALINA_OPTS=\\"-Xmx%sm\\"' } } ENABLE_SWIFT = p.Config('Enable Swift', 'general', 'cluster', config_type="bool", priority=1, default_value=True, is_optional=True) HIDDEN_CONFS = [ 'fs.default.name', 'dfs.name.dir', 'dfs.data.dir', 'mapred.job.tracker', 'mapred.system.dir', 'mapred.local.dir', 'hadoop.proxyuser.hadoop.hosts', 'hadoop.proxyuser.hadoop.groups' ] CLUSTER_WIDE_CONFS = [ 'dfs.block.size', 'dfs.permissions', 'dfs.replication', 'dfs.replication.min', 'dfs.replication.max', 'io.file.buffer.size', 'mapreduce.job.counters.max', 'mapred.output.compress', 'io.compression.codecs', 'mapred.output.compression.codec',
'Job Tracker Heap Size': 'HADOOP_JOBTRACKER_OPTS=\\"-Xmx%sm\\"', 'Task Tracker Heap Size': 'HADOOP_TASKTRACKER_OPTS=\\"-Xmx%sm\\"' }, "HDFS": { 'Name Node Heap Size': 'HADOOP_NAMENODE_OPTS=\\"-Xmx%sm\\"', 'Data Node Heap Size': 'HADOOP_DATANODE_OPTS=\\"-Xmx%sm\\"' }, "JobFlow": { 'Oozie Heap Size': 'CATALINA_OPTS=\\"-Xmx%sm\\"' } } ENABLE_SWIFT = p.Config('Enable Swift', 'general', 'cluster', config_type="bool", priority=1, default_value=True, is_optional=True) ENABLE_DATA_LOCALITY = p.Config('Enable Data Locality', 'general', 'cluster', config_type="bool", priority=1, default_value=True, is_optional=True) ENABLE_MYSQL = p.Config('Enable MySQL', 'general', 'cluster',
'plugins/intel/v2_5_1/resources/mapred-default.xml') OOZIE_DEFAULT = x.load_hadoop_xml_defaults( 'plugins/intel/v2_5_1/resources/oozie-default.xml') XML_CONFS = { "Hadoop": [CORE_DEFAULT], "HDFS": [HDFS_DEFAULT], "MapReduce": [MAPRED_DEFAULT], "JobFlow": [OOZIE_DEFAULT] } IDH_TARBALL_URL = p.Config('IDH tarball URL', 'general', 'cluster', priority=1, default_value='http://repo1.intelhadoop.com:3424/' 'setup/setup-intelhadoop-' '2.5.1-en-evaluation.RHEL.tar.gz') OS_REPO_URL = p.Config('OS repository URL', 'general', 'cluster', priority=1, is_optional=True, default_value='http://mirror.centos.org/' 'centos-6/6/os/x86_64') IDH_REPO_URL = p.Config('IDH repository URL', 'general', 'cluster',