def check_cluster_create(data, **kwargs): b.check_cluster_unique_name(data['name']) b.check_plugin_name_exists(data['plugin_name']) b.check_plugin_supports_version(data['plugin_name'], data['hadoop_version']) if data.get('cluster_template_id'): ct_id = data['cluster_template_id'] b.check_cluster_template_exists(ct_id) if not data.get('node_groups'): b.check_node_groups_in_cluster_templates(data['plugin_name'], data['hadoop_version'], ct_id) if data.get('user_keypair_id'): b.check_keypair_exists(data['user_keypair_id']) default_image_id = _get_cluster_field(data, 'default_image_id') if default_image_id: b.check_image_registered(default_image_id) b.check_required_image_tags(data['plugin_name'], data['hadoop_version'], default_image_id) else: raise ex.NotFoundException('default_image_id', "'%s' field is not found") b.check_all_configurations(data) if data.get('anti_affinity'): b.check_node_processes(data['plugin_name'], data['hadoop_version'], data['anti_affinity']) if data.get('node_groups'): b.check_network_config(data['node_groups'])
def node_group_update(context, node_group_id, values): session = get_session() with session.begin(): node_group = _node_group_get(context, session, node_group_id) if not node_group: raise ex.NotFoundException(node_group_id, "Node Group id '%s' not found!") node_group.update(values)
def instance_update(context, instance_id, values): session = get_session() with session.begin(): instance = _instance_get(context, session, instance_id) if not instance: raise ex.NotFoundException(instance_id, "Instance id '%s' not found!") instance.update(values)
def _add_to_streaming_element(self, element, path): if element not in ['mapper', 'reducer']: raise ex.NotFoundException(element, message='"%s" child cannot be added to ' 'streaming element') x.get_and_create_if_not_exist(self.doc, self.tag_name, 'streaming') x.add_text_element_to_tag(self.doc, 'streaming', element, path)
def remove_volume(context, instance_id, volume_id): session = get_session() with session.begin(): instance = _instance_get(context, session, instance_id) if not instance: raise ex.NotFoundException(instance_id, "Instance id '%s' not found!") instance.volumes.remove(volume_id)
def job_execution_destroy(context, job_execution_id): session = get_session() with session.begin(): job_ex = _job_execution_get(context, session, job_execution_id) if not job_ex: raise ex.NotFoundException(job_execution_id, "JobExecution id '%s' not found!") session.delete(job_ex)
def cluster_destroy(context, cluster_id): session = get_session() with session.begin(): cluster = _cluster_get(context, session, cluster_id) if not cluster: raise ex.NotFoundException(cluster_id, "Cluster id '%s' not found!") session.delete(cluster)
def _add_to_prepare_element(self, element, paths): if element not in ['delete', 'mkdir']: raise ex.NotFoundException(element, message='"%s" child cannot be added to ' 'prepare element') prop = x.get_and_create_if_not_exist(self.doc, self.tag_name, 'prepare') for path in paths: elem = xml.parseString('<%s path="%s"/>' % (element, path)) prop.appendChild(elem.firstChild)
def node_group_remove(context, node_group_id): session = get_session() with session.begin(): node_group = _node_group_get(context, session, node_group_id) if not node_group: raise ex.NotFoundException(node_group_id, "Node Group id '%s' not found!") session.delete(node_group)
def job_binary_internal_destroy(context, job_binary_internal_id): session = get_session() with session.begin(): job_binary_internal = _job_binary_internal_get(context, session, job_binary_internal_id) if not job_binary_internal: raise ex.NotFoundException(job_binary_internal_id, "JobBinaryInternal id '%s' not found!") session.delete(job_binary_internal)
def job_update(context, job_id, values): session = get_session() with session.begin(): job = _job_get(context, session, job_id) if not job: raise ex.NotFoundException(job_id, "Job id '%s' not found!") job.update(values) return job
def node_group_template_destroy(context, node_group_template_id): session = get_session() with session.begin(): node_group_template = _node_group_template_get(context, session, node_group_template_id) if not node_group_template: raise ex.NotFoundException( node_group_template_id, "Node Group Template id '%s' not found!") session.delete(node_group_template)
def cluster_update(context, cluster_id, values): session = get_session() with session.begin(): cluster = _cluster_get(context, session, cluster_id) if cluster is None: raise ex.NotFoundException(cluster_id, "Cluster id '%s' not found!") cluster.update(values) return cluster
def _read_swift_topology(): topology = {} try: with open(CONF.swift_topology_file) as f: for line in f: (host, path) = line.split() topology[host] = path except IOError: raise ex.NotFoundException( CONF.swift_topology_file, "Unable to find file %s with Swift topology") return topology
def job_destroy(context, job_id): session = get_session() try: with session.begin(): job = _job_get(context, session, job_id) if not job: raise ex.NotFoundException(job_id, "Job id '%s' not found!") session.delete(job) except db_exc.DBError as e: msg = "foreign key constraint" in e.message and\ " on foreign key constraint" or "" raise ex.DeletionFailed("Job deletion failed%s" % msg)
def job_binary_destroy(context, job_binary_id): session = get_session() with session.begin(): job_binary = _job_binary_get(context, session, job_binary_id) if not job_binary: raise ex.NotFoundException(job_binary_id, "JobBinary id '%s' not found!") if _check_job_binary_referenced(context, session, job_binary_id): raise ex.DeletionFailed("JobBinary is referenced" "and cannot be deleted") session.delete(job_binary)
def data_source_destroy(context, data_source_id): session = get_session() try: with session.begin(): data_source = _data_source_get(context, session, data_source_id) if not data_source: raise ex.NotFoundException(data_source_id, "Data Source id '%s' not found!") session.delete(data_source) except db_exc.DBError as e: msg = "foreign key constraint" in e.message and\ " on foreign key constraint" or "" raise ex.DeletionFailed("Data Source deletion failed%s" % msg)
def instance_remove(context, instance_id): session = get_session() with session.begin(): instance = _instance_get(context, session, instance_id) if not instance: raise ex.NotFoundException(instance_id, "Instance id '%s' not found!") session.delete(instance) node_group_id = instance.node_group_id node_group = _node_group_get(context, session, node_group_id) node_group.count -= 1
def node_group_add(context, cluster_id, values): session = get_session() with session.begin(): cluster = _cluster_get(context, session, cluster_id) if not cluster: raise ex.NotFoundException(cluster_id, "Cluster id '%s' not found!") node_group = m.NodeGroup() node_group.update({"cluster_id": cluster_id}) node_group.update(values) session.add(node_group) return node_group.id
def instance_add(context, node_group_id, values): session = get_session() with session.begin(): node_group = _node_group_get(context, session, node_group_id) if not node_group: raise ex.NotFoundException(node_group_id, "Node Group id '%s' not found!") instance = m.Instance() instance.update({"node_group_id": node_group_id}) instance.update(values) session.add(instance) node_group = _node_group_get(context, session, node_group_id) node_group.count += 1 return instance.id
def _read_compute_topology(): ctx = context.ctx() tenant_id = str(ctx.tenant_id) topology = {} try: with open(CONF.compute_topology_file) as f: for line in f: (host, path) = line.split() #calulating host id based on tenant id and host #using the same algorithm as in nova #see nova/api/openstack/compute/views/servers.py #def _get_host_id(instance): sha_hash = hashlib.sha224(tenant_id + host) topology[sha_hash.hexdigest()] = path except IOError: raise ex.NotFoundException( CONF.compute_topology_file, "Unable to find file %s with compute topology") return topology
def handler(*args, **kwargs): if id_prop and not get_args: get_args['id'] = id_prop[0] get_kwargs = {} for get_arg in get_args: get_kwargs[get_arg] = kwargs[get_args[get_arg]] obj = None try: obj = get_func(**get_kwargs) except Exception as e: if 'notfound' not in e.__class__.__name__.lower(): raise e if obj is None: e = ex.NotFoundException(get_kwargs, 'Object with %s not found') return u.not_found(e) return func(*args, **kwargs)
def generate_topology_map(cluster, is_node_awareness): mapping = _read_compute_topology() nova_client = nova.client() topology_mapping = {} for ng in cluster.node_groups: for i in ng.instances: #TODO(alazarev) get all servers info with one request ni = nova_client.servers.get(i.instance_id) hostId = ni.hostId if hostId not in mapping: raise ex.NotFoundException( i.instance_id, "Was not able to find compute node " "topology for VM %s") rack = mapping[hostId] if is_node_awareness: rack += "/" + hostId topology_mapping[i.instance_name] = rack topology_mapping[i.management_ip] = rack topology_mapping[i.internal_ip] = rack topology_mapping.update(_read_swift_topology()) return topology_mapping
def _check_binaries(values): for job_binary in values: if not api.get_job_binary(job_binary): raise e.NotFoundException(job_binary, "Job binary '%s' does not exist")