def get_configs(self, hadoop_version): config_resource = cfg.ConfigurationProvider( json.load( open( os.path.join(os.path.dirname(__file__), 'resources', 'ambari-config-resource.json'), "r"))) return config_resource.get_config_items()
def _get_blueprint_processor(self, cluster): processor = bp.BlueprintProcessor(json.load( open(os.path.join(os.path.dirname(__file__), 'resources', 'default-cluster.template'), "r"))) processor.process_user_inputs(self._map_to_user_inputs( '1.3.0', cluster.cluster_configs)) processor.process_node_groups(cluster.node_groups) return processor
def _get_blueprint_processor(self, cluster): processor = bp.BlueprintProcessor(json.load( open(os.path.join(os.path.dirname(__file__), 'resources', 'default-cluster.template'), "r"))) processor.process_user_inputs(self._map_to_user_inputs( '1.3.0', cluster.cluster_configs)) processor.process_node_groups(cluster.node_groups) return processor
def __init__(self, cluster_spec): # TODO(jspeidel): get from stack config self.hadoop_version = "1.3.0" self.cluster_configs = [] self.node_groups = [] self.config = cfg.ConfigurationProvider( json.load(open(os.path.join(os.path.dirname(__file__), "resources", "ambari-config-resource.json"), "r")) ) self._parse_configurations(cluster_spec.configurations) self._parse_node_groups(cluster_spec.node_groups)
def __init__(self, cluster_spec): #TODO(jspeidel): get from stack config self.hadoop_version = '1.3.0' self.cluster_configs = [] self.node_groups = [] self.config = cfg.ConfigurationProvider( json.load( open( os.path.join(os.path.dirname(__file__), 'resources', 'ambari-config-resource.json'), "r"))) self._parse_configurations(cluster_spec.configurations) self._parse_node_groups(cluster_spec.node_groups)
def configure_cluster(self, cluster): # take the user inputs from the cluster and node groups and convert # to a ambari blueprint processor = bp.BlueprintProcessor(json.load( open(os.path.join(os.path.dirname(__file__), 'resources', 'default-cluster.template'), "r"))) processor.process_user_inputs(cluster.cluster_configs) processor.process_node_groups(cluster.node_groups) # NOTE: for the time being we are going to ignore the node group # level configurations. we are not currently # defining node level configuration items (i.e. scope='cluster' in # all cases for returned configs) #create a cloud context #TODO(jmaron): is base host name really necessary any longer? #cloud_ctx = ClusterContext(None, LOG) #self._add_instances_to_cluster_context (cloud_ctx, cluster) self.create_cluster(cluster, json.dumps(processor.blueprint))
def configure_cluster(self, cluster): # take the user inputs from the cluster and node groups and convert # to a ambari blueprint processor = bp.BlueprintProcessor( json.load( open( os.path.join(os.path.dirname(__file__), 'resources', 'default-cluster.template'), "r"))) processor.process_user_inputs( self._map_to_user_inputs('1.3.0', cluster.cluster_configs)) processor.process_node_groups(cluster.node_groups) # NOTE: for the time being we are going to ignore the node group # level configurations. we are not currently # defining node level configuration items (i.e. scope='cluster' in # all cases for returned configs) #create a cloud context #TODO(jmaron): is base host name really necessary any longer? #cloud_ctx = ClusterContext(None, LOG) #self._add_instances_to_cluster_context (cloud_ctx, cluster) self.create_cluster(cluster, json.dumps(processor.blueprint))
def get_configs(self, hadoop_version): config_resource = cfg.ConfigurationProvider( json.load(open(os.path.join(os.path.dirname(__file__), 'resources', 'ambari-config-resource.json'), "r"))) return config_resource.get_config_items()