def _create_hdfs_data(self, source, hdfs_username): def to_hex_present(string): return "".join( map(lambda x: hex(ord(x)).replace("0x", "\\x"), string)) if 'user' in source: return source hdfs_dir = utils.rand_name("/user/%s/data" % hdfs_username) inst_ip = self._get_nodes_with_process()[0]["management_ip"] self._run_command_on_node( inst_ip, "sudo su - -c \"hdfs dfs -mkdir -p %(path)s \" %(user)s" % { "path": hdfs_dir, "user": hdfs_username }) hdfs_filepath = utils.rand_name(hdfs_dir + "/file") with open(source) as source_fd: data = source_fd.read() self._run_command_on_node( inst_ip, ("echo -e \"%(data)s\" | sudo su - -c \"hdfs dfs" " -put - %(path)s\" %(user)s") % { "data": to_hex_present(data), "path": hdfs_filepath, "user": hdfs_username }) return hdfs_filepath
def _create_cluster_template(self): self.ng_name_map = {} template = None if self.testcase.get('cluster_template'): template = self.testcase['cluster_template'] else: template_path = os.path.join(self.template_path, 'cluster_template.json') with open(template_path) as data: template = json.load(data) kwargs = dict(template) ngs = kwargs['node_group_templates'] del kwargs['node_group_templates'] kwargs['node_groups'] = [] for ng, count in ngs.items(): ng_name = utils.rand_name(ng) self.ng_name_map[ng] = ng_name kwargs['node_groups'].append({ 'name': ng_name, 'node_group_template_id': self.ng_id_map[ng], 'count': count}) kwargs.update(self.plugin_opts) kwargs['name'] = utils.rand_name(kwargs['name']) if self.network['type'] == 'neutron': kwargs['net_id'] = self.neutron.get_network_id( self.network['private_network']) return self.__create_cluster_template(**kwargs)
def _create_cluster_template(self): self.ng_name_map = {} template = None if self.testcase.get("cluster_template"): template = self.testcase["cluster_template"] else: template_path = os.path.join(self.template_path, "cluster_template.json") with open(template_path) as data: template = json.load(data) kwargs = dict(template) ngs = kwargs["node_group_templates"] del kwargs["node_group_templates"] kwargs["node_groups"] = [] for ng, count in ngs.items(): ng_name = utils.rand_name(ng) self.ng_name_map[ng] = ng_name kwargs["node_groups"].append( {"name": ng_name, "node_group_template_id": self.ng_id_map[ng], "count": count} ) kwargs.update(self.plugin_opts) kwargs["name"] = utils.rand_name(kwargs["name"]) if self.network["type"] == "neutron": kwargs["net_id"] = self.neutron.get_network_id(self.network["private_network"]) return self.__create_cluster_template(**kwargs)
def _create_cluster_template(self): self.ng_name_map = {} template = None if self.testcase.get('cluster_template'): template = self.testcase['cluster_template'] else: template_path = os.path.join(self.template_path, 'cluster_template.json') with open(template_path) as data: template = json.load(data) kwargs = dict(template) ngs = kwargs['node_group_templates'] del kwargs['node_group_templates'] kwargs['node_groups'] = [] for ng, count in ngs.items(): ng_name = utils.rand_name(ng) self.ng_name_map[ng] = ng_name kwargs['node_groups'].append({ 'name': ng_name, 'node_group_template_id': self.ng_id_map[ng], 'count': count }) kwargs.update(self.plugin_opts) kwargs['name'] = utils.rand_name(kwargs['name']) if self.network['type'] == 'neutron': kwargs['net_id'] = self.neutron.get_network_id( self.network['private_network']) return self.__create_cluster_template(**kwargs)
def create(ds, name): location = ds.get('source', None) if not location: location = utils.rand_name(ds['destination']) if ds['type'] == 'swift': url = self._create_swift_data(location) if ds['type'] == 'hdfs': url = location return self.__create_datasource( name=utils.rand_name(name), description='', data_source_type=ds['type'], url=url, credential_user=self.credentials['os_username'], credential_pass=self.credentials['os_password'])
def create(ds, name): location = ds.get('source', None) if not location: location = utils.rand_name(ds['destination']) if ds['type'] == 'swift': url = self._create_swift_data(location) if ds['type'] == 'hdfs' or ds['type'] == 'maprfs': url = location return self.__create_datasource( name=utils.rand_name(name), description='', data_source_type=ds['type'], url=url, credential_user=self.credentials['os_username'], credential_pass=self.credentials['os_password'])
def check_scale(self): scale_ops = [] if self.testcase.get('scaling'): scale_ops = self.testcase['scaling'] else: scale_path = os.path.join(self.template_path, 'scale.json') if os.path.exists(scale_path): with open(scale_path) as data: scale_ops = json.load(data) body = {} for op in scale_ops: if op['operation'] == 'add': if 'add_node_groups' not in body: body['add_node_groups'] = [] body['add_node_groups'].append({ 'node_group_template_id': self.ng_id_map[op['node_group']], 'count': op['size'], 'name': utils.rand_name(op['node_group']) }) if op['operation'] == 'resize': if 'resize_node_groups' not in body: body['resize_node_groups'] = [] body['resize_node_groups'].append({ 'name': self.ng_name_map[op['node_group']], 'count': op['size'] }) if body: self.sahara.scale_cluster(self.cluster_id, body) self._poll_cluster_status(self.cluster_id)
def _create_node_group_templates(self): ng_id_map = {} floating_ip_pool = None if self.network['type'] == 'neutron': floating_ip_pool = self.neutron.get_network_id( self.network['public_network']) elif not self.network['auto_assignment_floating_ip']: floating_ip_pool = self.network['public_network'] node_groups = [] if self.testcase.get('node_group_templates'): for ng in self.testcase['node_group_templates']: node_groups.append(ng) else: templates_path = os.path.join(self.template_path, 'node_group_template_*.json') for template_file in glob.glob(templates_path): with open(template_file) as data: node_groups.append(json.load(data)) for ng in node_groups: kwargs = dict(ng) kwargs.update(self.plugin_opts) kwargs['name'] = utils.rand_name(kwargs['name']) kwargs['floating_ip_pool'] = floating_ip_pool ng_id = self.__create_node_group_template(**kwargs) ng_id_map[ng['name']] = ng_id return ng_id_map
def __create_keypair(self): key = utils.rand_name('scenario_key') self.nova.nova_client.keypairs.create(key, public_key=self.public_key) if not self.testcase['retain_resources']: self.addCleanup(self.nova.delete_keypair, key) return key
def __create_keypair(self): key = utils.rand_name('scenario_key') self.nova.nova_client.keypairs.create(key, public_key=self.public_key) if not self.testcase['retain_resources']: self.addCleanup(self.nova.delete_keypair, key) return key
def _create_node_group_templates(self): ng_id_map = {} floating_ip_pool = None if self.network['type'] == 'neutron': floating_ip_pool = self.neutron.get_network_id( self.network['public_network']) elif not self.network['auto_assignment_floating_ip']: floating_ip_pool = self.network['public_network'] node_groups = [] if self.testcase.get('node_group_templates'): for ng in self.testcase['node_group_templates']: node_groups.append(ng) else: templates_path = os.path.join(self.template_path, 'node_group_template_*.json') for template_file in glob.glob(templates_path): with open(template_file) as data: node_groups.append(json.load(data)) for ng in node_groups: kwargs = dict(ng) kwargs.update(self.plugin_opts) kwargs['name'] = utils.rand_name(kwargs['name']) kwargs['floating_ip_pool'] = floating_ip_pool ng_id = self.__create_node_group_template(**kwargs) ng_id_map[ng['name']] = ng_id return ng_id_map
def create_flavor(self, flavor_object): return self.nova_client.flavors.create( flavor_object.get('name', utils.rand_name('scenario')), flavor_object.get('ram', 1), flavor_object.get('vcpus', 1), flavor_object.get('root_disk', 0), ephemeral=flavor_object.get('ephemeral_disk', 0), swap=flavor_object.get('swap_disk', 0), flavorid=flavor_object.get('id', 'auto'))
def create_flavor(self, flavor_object): return self.nova_client.flavors.create( flavor_object.get('name', utils.rand_name('scenario')), flavor_object.get('ram', 1), flavor_object.get('vcpus', 1), flavor_object.get('root_disk', 0), ephemeral=flavor_object.get('ephemeral_disk', 0), swap=flavor_object.get('swap_disk', 0), flavorid=flavor_object.get('id', 'auto'))
def create(ds, name): location = ds.get("source", None) if not location: location = utils.rand_name(ds["destination"]) if ds["type"] == "swift": url = self._create_swift_data(location) if ds["type"] == "hdfs": url = self._create_hdfs_data(location, ds.get("hdfs_username", "oozie")) if ds["type"] == "maprfs": url = location return self.__create_datasource( name=utils.rand_name(name), description="", data_source_type=ds["type"], url=url, credential_user=self.credentials["os_username"], credential_pass=self.credentials["os_password"], )
def _create_swift_data(self, source=None): container = self._get_swift_container() path = utils.rand_name('test') data = None if source: data = open(source).read() self.__upload_to_container(container, path, data) return 'swift://%s.sahara/%s' % (container, path)
def _create_hdfs_data(self, source, hdfs_username): def to_hex_present(string): return "".join(map(lambda x: hex(ord(x)).replace("0x", "\\x"), string)) if "user" in source: return source hdfs_dir = utils.rand_name("/user/%s/data" % hdfs_username) inst_ip = self._get_nodes_with_process()[0]["management_ip"] self._run_command_on_node( inst_ip, 'sudo su - -c "hdfs dfs -mkdir -p %(path)s " %(user)s' % {"path": hdfs_dir, "user": hdfs_username} ) hdfs_filepath = utils.rand_name(hdfs_dir + "/file") data = open(source).read() self._run_command_on_node( inst_ip, ('echo -e "%(data)s" | sudo su - -c "hdfs dfs' ' -put - %(path)s" %(user)s') % {"data": to_hex_present(data), "path": hdfs_filepath, "user": hdfs_username}, ) return hdfs_filepath
def _create_swift_data(self, source=None): container = self._get_swift_container() path = utils.rand_name('test') data = None if source: data = open(source).read() self.__upload_to_container(container, path, data) return 'swift://%s.sahara/%s' % (container, path)
def _create_swift_data(self, source=None): container = self._get_swift_container() path = utils.rand_name("test") data = None if source: with open(source) as source_fd: data = source_fd.read() self.__upload_to_container(container, path, data) return "swift://%s.sahara/%s" % (container, path)
def _create_job_binary(self, job_binary): url = None extra = {} if job_binary["type"] == "swift": url = self._create_swift_data(job_binary["source"]) extra["user"] = self.credentials["os_username"] extra["password"] = self.credentials["os_password"] if job_binary["type"] == "database": url = self._create_internal_db_data(job_binary["source"]) job_binary_name = "%s-%s" % (utils.rand_name("test"), os.path.basename(job_binary["source"])) return self.__create_job_binary(job_binary_name, url, "", extra)
def _create_cluster(self, cluster_template_id): if self.testcase.get("cluster"): kwargs = dict(self.testcase["cluster"]) else: kwargs = {} # default template kwargs.update(self.plugin_opts) kwargs["name"] = utils.rand_name(kwargs.get("name", "test")) kwargs["cluster_template_id"] = cluster_template_id kwargs["default_image_id"] = self.nova.get_image_id(self.testcase["image"]) kwargs["user_keypair_id"] = self.key_name return self.__create_cluster(**kwargs)
def _create_cluster(self, cluster_template_id): if self.testcase.get('cluster'): kwargs = dict(self.testcase['cluster']) else: kwargs = {} # default template kwargs.update(self.plugin_opts) kwargs['name'] = utils.rand_name(kwargs.get('name', 'test')) kwargs['cluster_template_id'] = cluster_template_id kwargs['default_image_id'] = self.nova.get_image_id( self.testcase['image']) return self.__create_cluster(**kwargs)
def _create_job_binary(self, job_binary): url = None extra = {} if job_binary['type'] == 'swift': url = self._create_swift_data(job_binary['source']) extra['user'] = self.credentials['os_username'] extra['password'] = self.credentials['os_password'] if job_binary['type'] == 'database': url = self._create_internal_db_data(job_binary['source']) job_binary_name = '%s-%s' % (utils.rand_name('test'), os.path.basename(job_binary['source'])) return self.__create_job_binary(job_binary_name, url, '', extra)
def _create_cluster(self, cluster_template_id): if self.testcase.get('cluster'): kwargs = dict(self.testcase['cluster']) else: kwargs = {} # default template kwargs.update(self.plugin_opts) kwargs['name'] = utils.rand_name(kwargs.get('name', 'test')) kwargs['cluster_template_id'] = cluster_template_id kwargs['default_image_id'] = self.nova.get_image_id( self.testcase['image']) return self.__create_cluster(**kwargs)
def _create_job_binary(self, job_binary): url = None extra = {} if job_binary['type'] == 'swift': url = self._create_swift_data(job_binary['source']) extra['user'] = self.credentials['os_username'] extra['password'] = self.credentials['os_password'] if job_binary['type'] == 'database': url = self._create_internal_db_data(job_binary['source']) job_binary_name = '%s-%s' % ( utils.rand_name('test'), os.path.basename(job_binary['source'])) return self.__create_job_binary(job_binary_name, url, '', extra)
def check_scale(self): scale_ops = [] ng_before_scale = self.sahara.get_cluster(self.cluster_id).node_groups if self.testcase.get('scaling'): scale_ops = self.testcase['scaling'] else: scale_path = os.path.join(self.template_path, 'scale.json') if os.path.exists(scale_path): with open(scale_path) as data: scale_ops = json.load(data) body = {} for op in scale_ops: node_scale = op['node_group'] if op['operation'] == 'add': if 'add_node_groups' not in body: body['add_node_groups'] = [] body['add_node_groups'].append({ 'node_group_template_id': self.ng_id_map.get( node_scale, self.sahara.get_node_group_template_id(node_scale)), 'count': op['size'], 'name': utils.rand_name(node_scale) }) if op['operation'] == 'resize': if 'resize_node_groups' not in body: body['resize_node_groups'] = [] body['resize_node_groups'].append({ 'name': self.ng_name_map.get( node_scale, self.sahara.get_node_group_template_id(node_scale)), 'count': op['size'] }) if body: self.sahara.scale_cluster(self.cluster_id, body) self._poll_cluster_status(self.cluster_id) ng_after_scale = self.sahara.get_cluster( self.cluster_id).node_groups self._validate_scaling( ng_after_scale, self._get_expected_count_of_nodes(ng_before_scale, body))
def check_scale(self): scale_ops = [] ng_before_scale = self.sahara.get_cluster(self.cluster_id).node_groups if self.testcase.get('scaling'): scale_ops = self.testcase['scaling'] else: scale_path = os.path.join(self.template_path, 'scale.json') if os.path.exists(scale_path): with open(scale_path) as data: scale_ops = json.load(data) body = {} for op in scale_ops: node_scale = op['node_group'] if op['operation'] == 'add': if 'add_node_groups' not in body: body['add_node_groups'] = [] body['add_node_groups'].append({ 'node_group_template_id': self.ng_id_map.get(node_scale, self.sahara.get_node_group_template_id( node_scale)), 'count': op['size'], 'name': utils.rand_name(node_scale) }) if op['operation'] == 'resize': if 'resize_node_groups' not in body: body['resize_node_groups'] = [] body['resize_node_groups'].append({ 'name': self.ng_name_map.get( node_scale, self.sahara.get_node_group_template_id(node_scale)), 'count': op['size'] }) if body: self.sahara.scale_cluster(self.cluster_id, body) self._poll_cluster_status(self.cluster_id) ng_after_scale = self.sahara.get_cluster( self.cluster_id).node_groups self._validate_scaling(ng_after_scale, self._get_expected_count_of_nodes( ng_before_scale, body))
def check(self): # This check will check correct work of Kafka # Required things to run this check: # Cluster running with at least one ZooKeeper server and # Kafka Brokers and Spark can be included too # Initially designed for Ambari plugin. ckd = self.base.testcase.get('custom_checks', {}).get('check_kafka', {}) topic = ckd.get('topic', 'test-topic') topic = utils.rand_name(topic) zk = ckd.get('zookeeper_process', "ZooKeeper") kb = ckd.get('kafka_process', "Kafka Broker") shs = ckd.get('spark_process', "Spark History Server") # Disable spark job running by default spark_flow = ckd.get('spark_flow_test', None) kb_port = ckd.get('kafka_port', 6667) zk_port = ckd.get('zookeeper_port', 2181) node_domain = ckd.get('node_domain', "novalocal") broker_list = self._get_nodes_desc_list( self._get_nodes_with_process(kb), node_domain, kb_port) zookeeper_list = self._get_nodes_desc_list( self._get_nodes_with_process(zk), node_domain, zk_port) self._create_test_topic(kb, topic, zookeeper_list) self._send_messages(kb, topic, broker_list) if spark_flow: dest = self._prepare_spark_kafka_job_running(shs) if 'configs' not in spark_flow: spark_flow['configs'] = {} # override driver classpath spark_flow['configs']['edp.spark.driver.classpath'] = dest timeout = spark_flow.get('timeout', 30) if 'args' not in spark_flow: spark_flow['args'] = [] new_args = [] for arg in spark_flow['args']: arg = arg.format(zookeeper_list=zookeeper_list, timeout=timeout, topic=topic) new_args.append(arg) spark_flow['args'] = new_args to_execute = [self._prepare_job_running(spark_flow)] self._job_batching(to_execute)
def check(self): # This check will check correct work of Kafka # Required things to run this check: # Cluster running with at least one ZooKeeper server and # Kafka Brokers and Spark can be included too # Initially designed for Ambari plugin. ckd = self.base.testcase.get( 'custom_checks', {}).get('check_kafka', {}) topic = ckd.get('topic', 'test-topic') topic = utils.rand_name(topic) zk = ckd.get('zookeeper_process', "ZooKeeper") kb = ckd.get('kafka_process', "Kafka Broker") shs = ckd.get('spark_process', "Spark History Server") # Disable spark job running by default spark_flow = ckd.get('spark_flow_test', None) kb_port = ckd.get('kafka_port', 6667) zk_port = ckd.get('zookeeper_port', 2181) node_domain = ckd.get('node_domain', "novalocal") broker_list = self._get_nodes_desc_list( self._get_nodes_with_process(kb), node_domain, kb_port) zookeeper_list = self._get_nodes_desc_list( self._get_nodes_with_process(zk), node_domain, zk_port) self._create_test_topic(kb, topic, zookeeper_list) self._send_messages(kb, topic, broker_list) if spark_flow: dest = self._prepare_spark_kafka_job_running(shs) if 'configs' not in spark_flow: spark_flow['configs'] = {} # override driver classpath spark_flow['configs']['edp.spark.driver.classpath'] = dest timeout = spark_flow.get('timeout', 30) if 'args' not in spark_flow: spark_flow['args'] = [] new_args = [] for arg in spark_flow['args']: arg = arg.format(zookeeper_list=zookeeper_list, timeout=timeout, topic=topic) new_args.append(arg) spark_flow['args'] = new_args to_execute = [self._prepare_job_running(spark_flow)] self._job_batching(to_execute)
def check_scale(self): scale_ops = [] ng_before_scale = self.sahara.get_cluster(self.cluster_id).node_groups if self.testcase.get("scaling"): scale_ops = self.testcase["scaling"] else: scale_path = os.path.join(self.template_path, "scale.json") if os.path.exists(scale_path): with open(scale_path) as data: scale_ops = json.load(data) body = {} for op in scale_ops: node_scale = op["node_group"] if op["operation"] == "add": if "add_node_groups" not in body: body["add_node_groups"] = [] body["add_node_groups"].append( { "node_group_template_id": self.ng_id_map.get( node_scale, self.sahara.get_node_group_template_id(node_scale) ), "count": op["size"], "name": utils.rand_name(node_scale), } ) if op["operation"] == "resize": if "resize_node_groups" not in body: body["resize_node_groups"] = [] body["resize_node_groups"].append( { "name": self.ng_name_map.get(node_scale, self.sahara.get_node_group_template_id(node_scale)), "count": op["size"], } ) if body: self.sahara.scale_cluster(self.cluster_id, body) self._poll_cluster_status(self.cluster_id) ng_after_scale = self.sahara.get_cluster(self.cluster_id).node_groups self._validate_scaling(ng_after_scale, self._get_expected_count_of_nodes(ng_before_scale, body))
def _create_node_group_templates(self): ng_id_map = {} floating_ip_pool = None if self.network['type'] == 'neutron': floating_ip_pool = self.neutron.get_network_id( self.network['public_network']) elif not self.network['auto_assignment_floating_ip']: floating_ip_pool = self.network['public_network'] node_groups = [] if self.testcase.get('node_group_templates'): for ng in self.testcase['node_group_templates']: node_groups.append(ng) else: templates_path = os.path.join(self.template_path, 'node_group_template_*.json') for template_file in glob.glob(templates_path): with open(template_file) as data: node_groups.append(json.load(data)) check_indirect_access = False for ng in node_groups: if ng.get('is_proxy_gateway'): check_indirect_access = True for ng in node_groups: kwargs = dict(ng) kwargs.update(self.plugin_opts) kwargs['flavor_id'] = self._get_flavor_id(kwargs['flavor']) del kwargs['flavor'] kwargs['name'] = utils.rand_name(kwargs['name']) if (not kwargs.get('is_proxy_gateway', False)) and (check_indirect_access): kwargs['floating_ip_pool'] = None self.proxy_ng_name = kwargs['name'] else: kwargs['floating_ip_pool'] = floating_ip_pool ng_id = self.__create_node_group_template(**kwargs) ng_id_map[ng['name']] = ng_id return ng_id_map
def _create_node_group_templates(self): ng_id_map = {} floating_ip_pool = None if self.network['type'] == 'neutron': floating_ip_pool = self.neutron.get_network_id( self.network['public_network']) elif not self.network['auto_assignment_floating_ip']: floating_ip_pool = self.network['public_network'] node_groups = [] if self.testcase.get('node_group_templates'): for ng in self.testcase['node_group_templates']: node_groups.append(ng) else: templates_path = os.path.join(self.template_path, 'node_group_template_*.json') for template_file in glob.glob(templates_path): with open(template_file) as data: node_groups.append(json.load(data)) check_indirect_access = False for ng in node_groups: if ng.get('is_proxy_gateway'): check_indirect_access = True for ng in node_groups: kwargs = dict(ng) kwargs.update(self.plugin_opts) kwargs['flavor_id'] = self._get_flavor_id(kwargs['flavor']) del kwargs['flavor'] kwargs['name'] = utils.rand_name(kwargs['name']) if (not kwargs.get('is_proxy_gateway', False)) and (check_indirect_access): kwargs['floating_ip_pool'] = None else: kwargs['floating_ip_pool'] = floating_ip_pool ng_id = self.__create_node_group_template(**kwargs) ng_id_map[ng['name']] = ng_id return ng_id_map
def check_scale(self): scale_ops = [] if self.testcase.get('scaling'): scale_ops = self.testcase['scaling'] else: scale_path = os.path.join(self.template_path, 'scale.json') if os.path.exists(scale_path): with open(scale_path) as data: scale_ops = json.load(data) body = {} for op in scale_ops: if op['operation'] == 'add': if 'add_node_groups' not in body: body['add_node_groups'] = [] body['add_node_groups'].append({ 'node_group_template_id': self.ng_id_map[op['node_group']], 'count': op['size'], 'name': utils.rand_name(op['node_group']) }) if op['operation'] == 'resize': if 'resize_node_groups' not in body: body['resize_node_groups'] = [] body['resize_node_groups'].append({ 'name': self.ng_name_map[op['node_group']], 'count': op['size'] }) if body: self.sahara.scale_cluster(self.cluster_id, body) self._poll_cluster_status(self.cluster_id)
def _create_node_group_templates(self): ng_id_map = {} floating_ip_pool = None if self.network["type"] == "neutron": floating_ip_pool = self.neutron.get_network_id(self.network["public_network"]) elif not self.network["auto_assignment_floating_ip"]: floating_ip_pool = self.network["public_network"] node_groups = [] if self.testcase.get("node_group_templates"): for ng in self.testcase["node_group_templates"]: node_groups.append(ng) else: templates_path = os.path.join(self.template_path, "node_group_template_*.json") for template_file in glob.glob(templates_path): with open(template_file) as data: node_groups.append(json.load(data)) check_indirect_access = False for ng in node_groups: if ng.get("is_proxy_gateway"): check_indirect_access = True for ng in node_groups: kwargs = dict(ng) kwargs.update(self.plugin_opts) kwargs["flavor_id"] = self._get_flavor_id(kwargs["flavor"]) del kwargs["flavor"] kwargs["name"] = utils.rand_name(kwargs["name"]) if (not kwargs.get("is_proxy_gateway", False)) and (check_indirect_access): kwargs["floating_ip_pool"] = None else: kwargs["floating_ip_pool"] = floating_ip_pool ng_id = self.__create_node_group_template(**kwargs) ng_id_map[ng["name"]] = ng_id return ng_id_map
def _create_job(self, type, mains, libs): return self.__create_job(utils.rand_name('test'), type, mains, libs, '')
def _create_job(self, type, mains, libs): return self.__create_job(utils.rand_name("test"), type, mains, libs, "")
def _create_internal_db_data(self, source): with open(source) as source_fd: data = source_fd.read() id = self.__create_internal_db_data(utils.rand_name("test"), data) return "internal-db://%s" % id
def _get_swift_container(self): if not getattr(self, '__swift_container', None): self.__swift_container = self.__create_container( utils.rand_name('sahara-tests')) return self.__swift_container
def _create_internal_db_data(self, source): data = open(source).read() id = self.__create_internal_db_data(utils.rand_name('test'), data) return 'internal-db://%s' % id
def _create_job(self, type, mains, libs): return self.__create_job(utils.rand_name('test'), type, mains, libs, '')
def _create_internal_db_data(self, source): with open(source) as source_fd: data = source_fd.read() id = self.__create_internal_db_data(utils.rand_name('test'), data) return 'internal-db://%s' % id
def _get_swift_container(self): if not getattr(self, '__swift_container', None): self.__swift_container = self.__create_container( utils.rand_name('sahara-tests')) return self.__swift_container