def create_cluster(values): values = values.copy() session = ctx.current().session with session.begin(): values['tenant_id'] = ctx.current().tenant_id ngs_vals = values.pop('node_groups', []) cluster_tmpl_id = values.pop('cluster_template_id', None) if cluster_tmpl_id: cluster_tmpl = get_cluster_template(id=cluster_tmpl_id) cluster = cluster_tmpl.to_cluster(values) else: cluster = m.Cluster(**values) if not ngs_vals and cluster_tmpl_id: # copy node groups from cluster template ngs_vals = cluster_tmpl.dict['node_groups'] for ng in ngs_vals: tmpl_id = ng.get('node_group_template_id') if tmpl_id: tmpl = get_node_group_template(id=tmpl_id) node_group = tmpl.to_object(ng, m.NodeGroup) else: node_group = m.NodeGroup(**ng) cluster.node_groups.append(node_group) session.add(node_group) session.add(cluster) return cluster
def create_node_group_template(values): values = values.copy() session = ctx.current().session with session.begin(): values['tenant_id'] = ctx.current().tenant_id node_group_template = m.NodeGroupTemplate(**values) session.add(node_group_template) return node_group_template
def _retrieve_tenant(): try: return context.current().headers["X-Tenant-Name"] except RuntimeError: LOG.error("Cannot retrieve tenant for swift integration. " "Stop cluster creation") # todo(slukjanov?) raise special error here raise RuntimeError("Cannot retrieve tenant for swift integration")
def execute_job(job_id, data): # Elements common to all job types cluster_id = data['cluster_id'] configs = data.get('job_configs', {}) ctx = context.current() cluster = conductor.cluster_get(ctx, cluster_id) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) instance = plugin.get_oozie_server(cluster) extra = {} info = None if CONF.use_namespaces and not CONF.use_floating_ips: info = instance.remote().get_neutron_info() extra['neutron'] = info # Not in Java job types but present for all others input_id = data.get('input_id', None) output_id = data.get('output_id', None) # Since we will use a unified class in the database, we pass # a superset for all job types job_ex_dict = {'input_id': input_id, 'output_id': output_id, 'job_id': job_id, 'cluster_id': cluster_id, 'info': {'status': 'Pending'}, 'job_configs': configs, 'extra': extra} job_execution = conductor.job_execution_create(context.ctx(), job_ex_dict) context.spawn("Starting Job Execution %s" % job_execution.id, manager.run_job, job_execution) return job_execution
def testCreateClusterTemplateWithNodeGroupTemplates(self): session = ctx.current().session with session.begin(): ct = m.ClusterTemplate('ct', 't-1', 'p-1', 'hv-1') session.add(ct) ngts = [] for i in xrange(0, 3): ngt = m.NodeGroupTemplate('ngt-%s' % i, 't-1', 'f-1', 'p-1', 'hv-1', ['np-1', 'np-2']) session.add(ngt) session.flush() rel = ct.add_node_group_template({ 'node_group_template_id': ngt.id, 'name': 'group-%s' % i, 'count': 5 + i }) session.add(rel) ngts.append(ngt) with session.begin(): res = session.query(m.ClusterTemplate).filter_by().first() self.assertIsValidModelObject(res) self.assertEqual(len(res.node_group_templates), 3) self.assertEqual(set(t.name for t in res.node_group_templates), set('ngt-%s' % i for i in xrange(0, 3)))
def testCreateNodeGroupTemplate(self): session = ctx.current().session with session.begin(): ngt = m.NodeGroupTemplate('ngt-1', 't-1', 'f-1', 'p-1', 'hv-1', ['np-1', 'np-2'], node_configs=SAMPLE_CONFIGS) session.add(ngt) res = session.query(m.NodeGroupTemplate).filter_by().first() self.assertIsValidModelObject(res) self.assertEquals(['np-1', 'np-2'], res.node_processes) self.assertEquals(SAMPLE_CONFIGS, res.node_configs) res_dict = self.get_clean_dict(res) self.assertEqual(res_dict, { 'flavor_id': 'f-1', 'hadoop_version': 'hv-1', 'name': 'ngt-1', 'node_configs': SAMPLE_CONFIGS, 'node_processes': ['np-1', 'np-2'], 'plugin_name': 'p-1', 'image_id': None, 'volume_mount_prefix': '/volumes/disk', 'volumes_per_node': 0, 'volumes_size': 10, 'description': None })
def _retrieve_tenant(): try: return context.current().headers['X-Tenant-Name'] except RuntimeError: LOG.error("Cannot retrieve tenant for swift integration. " "Stop cluster creation") #todo(slukjanov?) raise special error here raise RuntimeError("Cannot retrieve tenant for swift integration")
def convert_to_cluster_template(plugin_name, version, config_file): plugin = plugin_base.PLUGINS.get_plugin(plugin_name) tenant_id = context.current().tenant_id name = uuidutils.generate_uuid() ct = m.ClusterTemplate(name, tenant_id, plugin_name, version) plugin.convert(ct, config_file) return s.persist_cluster_template(ct)
def _retrieve_tenant(): try: return context.current().tenant_name except RuntimeError: LOG.exception("Cannot retrieve tenant for swift integration. " "Stopping cluster creation") #todo(slukjanov?) raise special error here raise RuntimeError("Cannot retrieve tenant for swift integration")
def testCreateCluster(self): session = ctx.current().session with session.begin(): c = m.Cluster("c-1", "t-1", "p-1", "hv-1") session.add(c) with session.begin(): res = session.query(m.Cluster).filter_by().first() self.assertIsValidModelObject(res)
def use_os_admin_auth_token(cluster): if cluster.trust_id: ctx = context.current() ctx.username = CONF.os_admin_username ctx.tenant_id = cluster.tenant_id client = keystone.client_for_trusts(CONF.os_admin_username, CONF.os_admin_password, cluster.trust_id) ctx.token = client.auth_token ctx.service_catalog = json.dumps( client.service_catalog.catalog['catalog'])
def client(): headers = context.current().headers username = headers['X-User-Name'] token = headers['X-Auth-Token'] tenant = headers['X-Tenant-Id'] identity_url = base.url_for(headers, 'identity') keystone = keystone_client.Client(username=username, token=token, tenant_id=tenant, auth_url=identity_url) return keystone
def create_cluster_template(values): values = values.copy() session = ctx.current().session with session.begin(): values['tenant_id'] = ctx.current().tenant_id ngts_vals = values.pop('node_groups', []) cluster_template = m.ClusterTemplate(**values) for ngt in ngts_vals: tmpl_id = ngt.get('node_group_template_id') if tmpl_id: tmpl = get_node_group_template(id=tmpl_id) node_group = tmpl.to_object(ngt, m.TemplatesRelation) node_group.cluster_template_id = cluster_template.id else: node_group = m.TemplatesRelation(**ngt) cluster_template.node_groups.append(node_group) session.add(node_group) session.add(cluster_template) return cluster_template
def client(): ctx = context.current() volume_url = base.url_for(ctx.service_catalog, 'volume') cinder = cinder_client.Client(ctx.username, ctx.token, ctx.tenant_id, volume_url) cinder.client.auth_token = ctx.token cinder.client.management_url = volume_url return cinder
def test_model_update(self): _insert_test_object() t = context.current().session.query(TestModel).first() context.model_update(t, test_field=42) db_t = context.model_query(TestModel).first() self.assertEqual(t.id, db_t.id) self.assertEqual(42, db_t.test_field)
def client(): ctx = context.current() identity_url = base.url_for(ctx.service_catalog, 'identity') keystone = keystone_client.Client(username=ctx.username, user_id=ctx.user_id, token=ctx.token, tenant_name=ctx.tenant_name, tenant_id=ctx.tenant_id, auth_url=identity_url) return keystone
def use_os_admin_auth_token(cluster): if cluster.trust_id: ctx = context.current() ctx.username = CONF.os_admin_username ctx.tenant_id = cluster.tenant_id client = keystone.client_for_trusts( CONF.os_admin_username, CONF.os_admin_password, cluster.trust_id) ctx.token = client.auth_token ctx.service_catalog = json.dumps( client.service_catalog.catalog['catalog'])
def get_neutron_info(self): neutron_info = h.HashableDict() neutron_info['network'] = \ self.instance.node_group.cluster.neutron_management_network ctx = context.current() neutron_info['uri'] = base.url_for(ctx.service_catalog, 'network') neutron_info['token'] = ctx.token neutron_info['tenant'] = ctx.tenant_name neutron_info['host'] = self.instance.management_ip LOG.debug('Returning neutron info: {0}'.format(neutron_info)) return neutron_info
def get_neutron_info(self): neutron_info = h.HashableDict() neutron_info['network'] = \ self.instance.node_group.cluster.neutron_management_network ctx = context.current() neutron_info['uri'] = base.url_for(ctx.service_catalog, 'network') neutron_info['token'] = ctx.token neutron_info['tenant'] = ctx.tenant_name neutron_info['host'] = self.instance.management_ip LOG.debug('Returning neutron info: {0}'.format(neutron_info)) return neutron_info
def client(): ctx = context.current() volume_url = base.url_for(ctx.service_catalog, 'volume') cinder = cinder_client.Client(ctx.username, ctx.token, ctx.tenant_id, volume_url) cinder.client.auth_token = ctx.token cinder.client.management_url = volume_url return cinder
def client(): headers = context.current().headers username = headers['X-User-Name'] token = headers['X-Auth-Token'] tenant = headers['X-Tenant-Id'] volume_url = base.url_for(headers, 'volume') cinder = cinder_client.Client(username, token, tenant, volume_url) cinder.client.auth_token = token cinder.client.management_url = volume_url return cinder
def client(): headers = context.current().headers username = headers['X-User-Name'] token = headers['X-Auth-Token'] tenant = headers['X-Tenant-Id'] identity_url = base.url_for(headers, 'identity') keystone = keystone_client.Client(username=username, token=token, tenant_id=tenant, auth_url=identity_url) return keystone
def create_trust(cluster): client = keystone.client() trustee_id = keystone.client_for_trusts(CONF.os_admin_username, CONF.os_admin_password, None).user_id ctx = context.current() trust = client.trusts.create(trustor_user=client.user_id, trustee_user=trustee_id, impersonation=True, role_names=ctx.roles, project=client.tenant_id) conductor.cluster_update(ctx, cluster, {'trust_id': trust.id})
def client_for_trusts(username, password, trust_id): if not CONF.use_identity_api_v3: raise Exception('Trusts aren\'t implemented in keystone api' ' less than v3') ctx = context.current() auth_url = base.retrieve_auth_url() keystone = keystone_client_v3.Client(username=username, password=password, tenant_id=ctx.tenant_id, auth_url=auth_url, trust_id=trust_id) keystone.management_url = auth_url return keystone
def client_for_trusts(username, password, trust_id): if not CONF.use_identity_api_v3: raise Exception('Trusts aren\'t implemented in keystone api' ' less than v3') ctx = context.current() auth_url = base.retrieve_auth_url() keystone = keystone_client_v3.Client(username=username, password=password, tenant_id=ctx.tenant_id, auth_url=auth_url, trust_id=trust_id) keystone.management_url = auth_url return keystone
def client(): ctx = context.current() compute_url = base.url_for(ctx.service_catalog, 'compute') nova = nova_client.Client(ctx.username, ctx.token, ctx.tenant_id, auth_url=compute_url) nova.client.auth_token = ctx.token nova.client.management_url = compute_url nova.images = images.SavannaImageManager(nova) if not hasattr(nova.keypairs, 'get'): nova.keypairs = keypairs.SavannaKeypairManager(nova) return nova
def client(): ctx = context.current() compute_url = base.url_for(ctx.service_catalog, "compute") nova = nova_client.Client(ctx.username, ctx.token, ctx.tenant_id, auth_url=compute_url) nova.client.auth_token = ctx.token nova.client.management_url = compute_url nova.images = images.SavannaImageManager(nova) # unconditionally patch 'get' until # https://bugs.launchpad.net/python-novaclient/+bug/1223934 # is fixed for our use case and all we support nova.keypairs = keypairs.SavannaKeypairManager(nova) return nova
def create_trust(cluster): client = keystone.client() trustee_id = keystone.client_for_trusts( CONF.os_admin_username, CONF.os_admin_password, None).user_id ctx = context.current() trust = client.trusts.create(trustor_user=client.user_id, trustee_user=trustee_id, impersonation=True, role_names=ctx.roles, project=client.tenant_id) conductor.cluster_update(ctx, cluster, {'trust_id': trust.id})
def client(): headers = context.current().headers username = headers['X-User-Name'] token = headers['X-Auth-Token'] tenant = headers['X-Tenant-Id'] compute_url = base.url_for(headers, 'compute') nova = nova_client.Client(username, token, tenant, auth_url=compute_url) nova.client.auth_token = token nova.client.management_url = compute_url nova.images = images.SavannaImageManager(nova) if not hasattr(nova.keypairs, 'get'): nova.keypairs = keypairs.SavannaKeypairManager(nova) return nova
def client(): ctx = context.current() auth_url = base.retrieve_auth_url() if CONF.use_identity_api_v3: keystone = keystone_client_v3.Client(username=ctx.username, token=ctx.token, tenant_id=ctx.tenant_id, auth_url=auth_url) keystone.management_url = auth_url else: keystone = keystone_client.Client(username=ctx.username, token=ctx.token, tenant_id=ctx.tenant_id, auth_url=auth_url) return keystone
def client(): ctx = context.current() auth_url = base.retrieve_auth_url() if CONF.use_identity_api_v3: keystone = keystone_client_v3.Client(username=ctx.username, token=ctx.token, tenant_id=ctx.tenant_id, auth_url=auth_url) keystone.management_url = auth_url else: keystone = keystone_client.Client(username=ctx.username, token=ctx.token, tenant_id=ctx.tenant_id, auth_url=auth_url) return keystone
def client(): headers = context.current().headers username = headers['X-User-Name'] token = headers['X-Auth-Token'] tenant = headers['X-Tenant-Id'] compute_url = base.url_for(headers, 'compute') nova = nova_client.Client(username, token, tenant, auth_url=compute_url) nova.client.auth_token = token nova.client.management_url = compute_url nova.images = images.SavannaImageManager(nova) if not hasattr(nova.keypairs, 'get'): nova.keypairs = keypairs.SavannaKeypairManager(nova) return nova
def client(): ctx = context.current() compute_url = base.url_for(ctx.service_catalog, 'compute') nova = nova_client.Client(ctx.username, ctx.token, ctx.tenant_id, auth_url=compute_url) nova.client.auth_token = ctx.token nova.client.management_url = compute_url nova.images = images.SavannaImageManager(nova) # unconditionally patch 'get' until # https://bugs.launchpad.net/python-novaclient/+bug/1223934 # is fixed for our use case and all we support nova.keypairs = keypairs.SavannaKeypairManager(nova) return nova
def execute_job(job_id, data): # Elements common to all job types cluster_id = data['cluster_id'] configs = data.get('job_configs', {}) # Squash args if it is a dict. # TODO(tmckay): remove this after bug #1269968 is fixed on the UI side # (tracked in bug #1270882) if "args" in configs and type(configs["args"]) is dict: configs["args"] = [] ctx = context.current() cluster = conductor.cluster_get(ctx, cluster_id) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) instance = plugin.get_oozie_server(cluster) extra = {} info = None if CONF.use_namespaces and not CONF.use_floating_ips: info = instance.remote().get_neutron_info() extra['neutron'] = info # Not in Java job types but present for all others input_id = data.get('input_id', None) output_id = data.get('output_id', None) # Present for Java job types main_class = data.get('main_class', '') java_opts = data.get('java_opts', '') # Since we will use a unified class in the database, we pass # a superset for all job types job_ex_dict = {'main_class': main_class, 'java_opts': java_opts, 'input_id': input_id, 'output_id': output_id, 'job_id': job_id, 'cluster_id': cluster_id, 'info': {'status': 'Pending'}, 'job_configs': configs, 'extra': extra} job_execution = conductor.job_execution_create(context.ctx(), job_ex_dict) context.spawn("Starting Job Execution %s" % job_execution.id, manager.run_job, job_execution) return job_execution
def setUp(self): super(TestClusterCreateFlavorValidation, self).setUp() context.current().tenant_id = '1234' modules = [ "savanna.service.validations.base.check_plugin_name_exists", "savanna.service.validations.base.check_plugin_supports_version", "savanna.service.validations.base._get_plugin_configs", "savanna.service.validations.base.check_node_processes", "savanna.utils.openstack.nova.client", ] self.patchers = [] for module in modules: patch = mock.patch(module) patch.start() self.patchers.append(patch) nova_p = mock.patch("savanna.utils.openstack.nova.client") nova = nova_p.start() self.patchers.append(nova_p) nova().flavors.get.side_effect = u._get_flavor
def setUp(self): super(TestClusterCreateFlavorValidation, self).setUp() context.current().tenant_id = '1234' modules = [ "savanna.service.validations.base.check_plugin_name_exists", "savanna.service.validations.base.check_plugin_supports_version", "savanna.service.validations.base._get_plugin_configs", "savanna.service.validations.base.check_node_processes", "savanna.utils.openstack.nova.client", ] self.patchers = [] for module in modules: patch = mock.patch(module) patch.start() self.patchers.append(patch) nova_p = mock.patch("savanna.utils.openstack.nova.client") nova = nova_p.start() self.patchers.append(nova_p) nova().flavors.get.side_effect = u._get_flavor
def testCreateClusterTemplate(self): session = ctx.current().session with session.begin(): c = m.ClusterTemplate('c-1', 't-1', 'p-1', 'hv-1', cluster_configs=SAMPLE_CONFIGS) session.add(c) res = session.query(m.ClusterTemplate).filter_by().first() self.assertIsValidModelObject(res) self.assertEqual(SAMPLE_CONFIGS, res.cluster_configs) res_dict = self.get_clean_dict(res) self.assertEqual(res_dict, { 'cluster_configs': SAMPLE_CONFIGS, 'hadoop_version': 'hv-1', 'name': 'c-1', 'plugin_name': 'p-1', 'node_groups': [], 'default_image_id': None, 'description': None, 'anti_affinity': [] })
def _get_service_address(service_type): ctx = context.current() identity_url = base.url_for(ctx.service_catalog, service_type) address_regexp = r"^\w+://(.+?)/" identity_host = re.search(address_regexp, identity_url).group(1) return identity_host
def client(): ctx = context.current() heat_url = base.url_for(ctx.service_catalog, 'orchestration') return heat_client.Client('1', heat_url, token=ctx.token)
def client(): ctx = context.current() heat_url = base.url_for(ctx.service_catalog, 'orchestration') return heat_client.Client('1', heat_url, token=ctx.token)
def _acquire_remote_semaphore(): context.current().remote_semaphore.acquire() _global_remote_semaphore.acquire()
def _release_remote_semaphore(): _global_remote_semaphore.release() context.current().remote_semaphore.release()
def terminate_cluster(cluster): with ctx.current().session.begin(): ctx.current().session.delete(cluster)
def terminate_node_group_template(**args): with ctx.current().session.begin(): ctx.current().session.delete(get_node_group_template(**args))
def terminate_cluster_template(**args): with ctx.current().session.begin(): ctx.current().session.delete(get_cluster_template(**args))
def persist_cluster_template(cluster_template): session = ctx.current().session with session.begin(): session.add(cluster_template) return cluster_template