def validate(cls, data, instance=None): parsed = super(NetworkTemplateValidator, cls).validate(data) cls.validate_schema(parsed, NETWORK_TEMPLATE) # Ensure templates requested in templates_for_node_role are # present in network_scheme if not parsed['adv_net_template']: raise errors.InvalidData("No node groups are defined") for ng_name, node_group in six.iteritems(parsed['adv_net_template']): defined_templates = set(six.iterkeys(node_group['network_scheme'])) not_found = set() for templates_by_role in six.itervalues( node_group['templates_for_node_role']): for template in templates_by_role: if template not in defined_templates: not_found.add(template) if not_found: raise errors.InvalidData( "Requested templates {0} were not found for node " "group {1}".format(', '.join(not_found), ng_name)) if not defined_templates: raise errors.InvalidData( "No templates are defined for node group {0}".format( ng_name)) return parsed
def validate_collection_update(cls, data): d = cls.validate_json(data) if not isinstance(d, list): raise errors.InvalidData("Invalid json list", log_message=True) q = db().query(Node) for nd in d: if not nd.get("mac") and not nd.get("id"): raise errors.InvalidData("Neither MAC nor ID is specified", log_message=True) if "mac" in nd and not nd["mac"]: raise errors.InvalidData("Null MAC is specified", log_message=True) else: if nd.get("mac"): existent_node = q.filter_by(mac=nd["mac"]).first() \ or cls.validate_existent_node_mac_update(nd) if not existent_node: raise errors.InvalidData("Invalid MAC specified", log_message=True) if nd.get("id"): existent_node = q.get(nd["id"]) if not existent_node: raise errors.InvalidData("Invalid ID specified", log_message=True) if 'roles' in nd: cls.validate_roles(nd, existent_node) if 'meta' in nd: nd['meta'] = MetaValidator.validate_update(nd['meta']) return d
def _validate_env_before_execute(cls, filters): """Validate environment before execute configuration update""" # We can not pass cluster object here from handler because cluster_id # is passed in request data force = filters.get('force', False) cluster = objects.Cluster.get_by_uid(filters['cluster_id'], fail_if_not_found=True) if not force and cluster.status != consts.CLUSTER_STATUSES.operational: raise errors.InvalidData("Cluster should be in the status " "'operational'") target_nodes = objects.Cluster.get_nodes_to_update_config( cluster, filters.get('node_id'), filters.get('node_role'), only_ready_nodes=False) ready_target_nodes_uids = set( node.uid for node in target_nodes if node.status == consts.NODE_STATUSES.ready) if not ready_target_nodes_uids: raise errors.InvalidData("No nodes in status 'ready'") invalid_target_nodes_uids = set( node.uid for node in target_nodes if node.status != consts.NODE_STATUSES.ready) if not force and invalid_target_nodes_uids: raise errors.InvalidData( "Nodes '{0}' are not in status 'ready' and can not be updated " "directly." "".format(', '.join(invalid_target_nodes_uids)))
def validate(cls, data): d = cls.validate_json(data) if not "name" in d: raise errors.InvalidData("No release name specified", log_message=True) if not "version" in d: raise errors.InvalidData("No release version specified", log_message=True) if db().query(Release).filter_by(name=d["name"], version=d["version"]).first(): raise errors.AlreadyExists( "Release with the same name and version " "already exists", log_message=True) if "networks_metadata" in d: for network in d["networks_metadata"]: if not "name" in network or not "access" in network: raise errors.InvalidData("Invalid network data: %s" % str(network), log_message=True) if network["access"] not in settings.NETWORK_POOLS: raise errors.InvalidData("Invalid access mode for network", log_message=True) else: d["networks_metadata"] = [] if not "attributes_metadata" in d: d["attributes_metadata"] = {} else: try: Attributes.validate_fixture(d["attributes_metadata"]) except: raise errors.InvalidData( "Invalid logical structure of attributes metadata", log_message=True) return d
def _validate_common(cls, data, instance=None): d = cls.validate_json(data) release_id = d.get("release", d.get("release_id")) if release_id: release = objects.Release.get_by_uid(release_id) if not release: raise errors.InvalidData("Invalid release ID", log_message=True) if not objects.Release.is_deployable(release): raise errors.NotAllowed( "Release with ID '{0}' is not deployable.".format( release_id), log_message=True) cls._validate_mode(d, release) pend_release_id = d.get("pending_release_id") if pend_release_id: pend_release = objects.Release.get_by_uid(pend_release_id, fail_if_not_found=True) if not release_id: if not instance: raise errors.InvalidData( "Cannot set pending release when " "there is no current release", log_message=True) release_id = instance.release_id curr_release = objects.Release.get_by_uid(release_id) if not cls._can_update_release(curr_release, pend_release): raise errors.InvalidData( "Cannot set pending release as " "it cannot update current release", log_message=True) return d
def validate(cls, data): d = cls.validate_json(data) if "name" not in d: raise errors.InvalidData("No release name specified", log_message=True) if "version" not in d: raise errors.InvalidData("No release version specified", log_message=True) if db().query(Release).filter_by(name=d["name"], version=d["version"]).first(): raise errors.AlreadyExists( "Release with the same name and version " "already exists", log_message=True) if "networks_metadata" in d: # TODO(enchantner): additional validation for network in d["networks_metadata"]: if "name" not in network: raise errors.InvalidData("Invalid network data: %s" % str(network), log_message=True) else: d["networks_metadata"] = [] if "attributes_metadata" not in d: d["attributes_metadata"] = {} else: try: Attributes.validate_fixture(d["attributes_metadata"]) except Exception: raise errors.InvalidData( "Invalid logical structure of attributes metadata", log_message=True) return d
def check_nova_compute_duplicate_and_empty_values(cls, attributes): """Check 'nova_computes' attributes for empty and duplicate values.""" nova_compute_attributes_sets = { 'vsphere_cluster': set(), 'service_name': set(), 'target_node': set() } for nova_compute_data in attributes: for attr, values in six.iteritems(nova_compute_attributes_sets): if attr == 'target_node': settings_value = cls._get_target_node_id(nova_compute_data) if settings_value == 'controllers': continue else: settings_value = nova_compute_data.get(attr) if not settings_value: raise errors.InvalidData( "Empty value for attribute '{0}' is not allowed". format(attr), log_message=True ) if settings_value in values: raise errors.InvalidData( "Duplicate value '{0}' for attribute '{1}' is " "not allowed".format(settings_value, attr), log_message=True ) values.add(settings_value)
def _validate_common(cls, data, instance=None): d = cls.validate_json(data) release_id = d.get("release", d.get("release_id")) if release_id: if not Release.get_by_uid(release_id): raise errors.InvalidData("Invalid release ID", log_message=True) pend_release_id = d.get("pending_release_id") if pend_release_id: pend_release = Release.get_by_uid(pend_release_id, fail_if_not_found=True) if not release_id: if not instance: raise errors.InvalidData( "Cannot set pending release when " "there is no current release", log_message=True) release_id = instance.release_id curr_release = Release.get_by_uid(release_id) if not cls._can_update_release(curr_release, pend_release): raise errors.InvalidData( "Cannot set pending release as " "it cannot update current release", log_message=True) return d
def validate(cls, data): d = cls.validate_json(data) if not isinstance(d, dict): raise errors.InvalidData( "Node data must be dict", log_message=True ) if "mac" not in d: raise errors.InvalidData( "No mac address specified", log_message=True ) else: q = db().query(Node) if q.filter(Node.mac == d["mac"]).first() or q.join( NodeNICInterface, Node.nic_interfaces).filter( NodeNICInterface.mac == d["mac"]).first(): raise errors.AlreadyExists( "Node with mac {0} already " "exists - doing nothing".format(d["mac"]), log_level="info" ) if cls.validate_existent_node_mac_create(d): raise errors.AlreadyExists( "Node with mac {0} already " "exists - doing nothing".format(d["mac"]), log_level="info" ) if 'meta' in d: MetaValidator.validate_create(d['meta']) return d
def get_attributes(cls, instance, all_plugins_versions=False): """Get attributes for current Cluster instance. :param instance: Cluster instance :param all_plugins_versions: Get attributes of all versions of plugins :returns: dict """ try: attrs = db().query(models.Attributes).filter( models.Attributes.cluster_id == instance.id ).one() except MultipleResultsFound: raise errors.InvalidData( u"Multiple rows with attributes were found for cluster '{0}'" .format(instance.name) ) except NoResultFound: raise errors.InvalidData( u"No attributes were found for cluster '{0}'" .format(instance.name) ) attrs = dict(attrs) # Merge plugins attributes into editable ones plugin_attrs = PluginManager.get_plugins_attributes( instance, all_versions=all_plugins_versions) plugin_attrs = traverse(plugin_attrs, AttributesGenerator, { 'cluster': instance, 'settings': settings, }) attrs['editable'].update(plugin_attrs) return attrs
def validate(cls, data): d = cls.validate_json(data) if "name" not in d: raise errors.InvalidData("No release name specified", log_message=True) if "version" not in d: raise errors.InvalidData("No release version specified", log_message=True) if "operating_system" not in d: raise errors.InvalidData("No release operating system specified", log_message=True) if "orchestrator_data" not in d: raise errors.InvalidData('No orchestrator_data specified', log_message=True) if db().query(Release).filter_by(name=d["name"], version=d["version"]).first(): raise errors.AlreadyExists( "Release with the same name and version " "already exists", log_message=True) cls._validate_common(d) if "networks_metadata" not in d: d["networks_metadata"] = {} if "attributes_metadata" not in d: d["attributes_metadata"] = {} return d
def validate(cls, data, cluster=None): d = cls.validate_json(data) if "generated" in d: raise errors.InvalidData( "It is not allowed to update generated attributes", log_message=True) if "editable" in d and not isinstance(d["editable"], dict): raise errors.InvalidData( "Editable attributes should be a dictionary", log_message=True) attrs = d if cluster is not None: attrs = objects.Cluster.get_updated_editable_attributes(cluster, d) cls._validate_net_provider(attrs, cluster) # NOTE(agordeev): disable classic provisioning for 7.0 or higher if StrictVersion(cluster.release.environment_version) >= \ StrictVersion(consts.FUEL_IMAGE_BASED_ONLY): provision_data = attrs['editable'].get('provision') if provision_data: if provision_data['method']['value'] != \ consts.PROVISION_METHODS.image: raise errors.InvalidData( u"Cannot use classic provisioning for adding " u"nodes to environment", log_message=True) else: raise errors.InvalidData( u"Provisioning method is not set. Unable to continue", log_message=True) cls.validate_editable_attributes(attrs) return d
def validate(cls, data): d = cls.validate_json(data) if not "release_id" in d: raise errors.InvalidData( "No Release ID specified", ) if not "license_type" in d: raise errors.InvalidData( "No License Type specified" ) if d["license_type"] not in ["rhsm", "rhn"]: raise errors.InvalidData( "Invalid License Type" ) if "username" not in d or "password" not in d: raise errors.InvalidData( "Username or password not specified" ) if d["license_type"] == "rhn": if "satellite" not in d or "activation_key" not in d: raise errors.InvalidData( "Satellite hostname or activation key not specified", ) return d
def check(self): if not self.graph.is_acyclic(): raise errors.InvalidData( "Tasks can not be processed because it contains cycles in it.") non_existing_tasks = [] invalid_tasks = [] for node_key, node_value in six.iteritems(self.graph.node): if not node_value.get('id'): successors = self.graph.successors(node_key) predecessors = self.graph.predecessors(node_key) neighbors = successors + predecessors non_existing_tasks.append(node_key) invalid_tasks.extend(neighbors) if non_existing_tasks: raise errors.InvalidData( "Tasks '{non_existing_tasks}' can't be in requires" "|required_for|groups|tasks for [{invalid_tasks}]" " because they don't exist in the graph".format( non_existing_tasks=', '.join( str(x) for x in sorted(non_existing_tasks)), invalid_tasks=', '.join( str(x) for x in sorted(set(invalid_tasks)))))
def validate_collection_update(cls, data): d = cls.validate_json(data) if not isinstance(d, list): raise errors.InvalidData("Invalid json list", log_message=True) q = db().query(Notification) valid_d = [] for nd in d: valid_nd = {} if "id" not in nd: raise errors.InvalidData("ID is not set correctly", log_message=True) if "status" not in nd: raise errors.InvalidData("ID is not set correctly", log_message=True) if not q.get(nd["id"]): raise errors.InvalidData("Invalid ID specified", log_message=True) valid_nd["id"] = nd["id"] valid_nd["status"] = nd["status"] valid_d.append(valid_nd) return valid_d
def validate_networks_update(cls, data, cluster): data = cls.base_validation(data) cls.validate_schema(data, network_schema.NETWORKS) net_ids = [ng['id'] for ng in data['networks']] ng_db_by_id = dict((ng.id, ng) for ng in db().query(NetworkGroup).filter( NetworkGroup.id.in_(net_ids))) missing_ids = set(net_ids).difference(ng_db_by_id) if missing_ids: raise errors.InvalidData( u"Networks with ID's [{0}] are not present in the " "database".format(', '.join(map(str, sorted(missing_ids))))) for network in data['networks']: net_id = network['id'] ng_db = ng_db_by_id[net_id] cidr = network['cidr'] if 'cidr' in network else ng_db.cidr ip_ranges = network['ip_ranges'] if 'ip_ranges' in network else [ (r.first, r.last) for r in ng_db.ip_ranges ] # values are always taken either from request or from DB meta = network.get('meta', {}) notation = meta.get('notation', ng_db.meta.get('notation')) use_gateway = meta.get('use_gateway', ng_db.meta.get('use_gateway', False)) gateway = network.get('gateway', ng_db.get('gateway')) if use_gateway and not gateway: raise errors.InvalidData( "Flag 'use_gateway' cannot be provided without gateway") # Depending on notation required parameters must be either in # the request or DB if notation == consts.NETWORK_NOTATION.ip_ranges: if not ip_ranges and not ng_db.ip_ranges: raise errors.InvalidData( "No IP ranges were specified for network " "{0}".format(net_id)) if notation in [ consts.NETWORK_NOTATION.cidr, consts.NETWORK_NOTATION.ip_ranges ]: if not cidr and not ng_db.cidr: raise errors.InvalidData( "No CIDR was specified for network " "{0}".format(net_id)) if cluster.is_locked and cls._check_for_ip_conflicts( network, cluster, notation, use_gateway): raise errors.InvalidData( "New IP ranges for network '{0}' conflict " "with already allocated IPs.".format(network['name'])) return data
def validate_delete(cls, data, instance, force=False): if not instance.group_id: # Only default Admin-pxe network doesn't have group_id. # It cannot be deleted. raise errors.InvalidData( "Default Admin-pxe network cannot be deleted") elif instance.nodegroup.cluster.is_locked: raise errors.InvalidData( "Networks cannot be deleted after deployment")
def validate(cls, data): d = cls.validate_json(data) if "generated" in d: raise errors.InvalidData( "It is not allowed to update generated attributes", log_message=True) if "editable" in d and not isinstance(d["editable"], dict): raise errors.InvalidData( "Editable attributes should be a dictionary", log_message=True) return d
def check_operational_controllers_settings(cls, input_nova_computes, db_nova_computes): """Check deployed nova computes settings with target = controllers. Raise InvalidData exception if any deployed nova computes clusters with target 'controllers' were added, removed or modified. :param input_nova_computes: new nova_compute settings :type input_nova_computes: list of dicts :param db_nova_computes: nova_computes settings stored in db :type db_nova_computes: list of dicts """ input_computes_by_vsphere_name = dict( (nc['vsphere_cluster'], nc) for nc in input_nova_computes if cls._get_target_node_id(nc) == 'controllers' ) db_clusters_names = set() for db_nova_compute in db_nova_computes: target_name = cls._get_target_node_id(db_nova_compute) if target_name == 'controllers': vsphere_name = db_nova_compute['vsphere_cluster'] input_nova_compute = \ input_computes_by_vsphere_name.get(vsphere_name) if not input_nova_compute: raise errors.InvalidData( "Nova compute instance with target 'controllers' and " "vSphere cluster {0} couldn't be deleted from " "operational environment.".format(vsphere_name), log_message=True ) for attr, db_value in six.iteritems(db_nova_compute): input_value = input_nova_compute.get(attr) if attr == 'target_node': db_value = cls._get_target_node_id(db_nova_compute) input_value = cls._get_target_node_id( input_nova_compute) if db_value != input_value: raise errors.InvalidData( "Parameter '{0}' of nova compute instance with " "vSphere cluster name '{1}' couldn't be changed". format(attr, vsphere_name), log_message=True ) db_clusters_names.add(vsphere_name) input_clusters_names = set(input_computes_by_vsphere_name) if input_clusters_names - db_clusters_names: raise errors.InvalidData( "Nova compute instances with target 'controllers' couldn't be " "added to operational environment. Check nova compute " "instances with the following vSphere cluster names: {0}". format(', '.join( sorted(input_clusters_names - db_clusters_names))), log_message=True )
def validate_update(cls, data, instance=None): if isinstance(data, (str, unicode)): d = cls.validate_json(data) else: d = data if "status" in d and d["status"] not in consts.NODE_STATUSES: raise errors.InvalidData( "Invalid status for node", log_message=True ) if not d.get("mac") and not d.get("id") and not instance: raise errors.InvalidData( "Neither MAC nor ID is specified", log_message=True ) q = db().query(Node) if "mac" in d: if not d["mac"]: raise errors.InvalidData( "Null MAC is specified", log_message=True ) else: existent_node = q.filter_by(mac=d["mac"]).first() \ or cls.validate_existent_node_mac_update(d) if not existent_node: raise errors.InvalidData( "Invalid MAC is specified", log_message=True ) if "id" in d and d["id"]: existent_node = q.get(d["id"]) if not existent_node: raise errors.InvalidData( "Invalid ID specified", log_message=True ) if "roles" in d: if instance: node = instance else: node = objects.Node.get_by_mac_or_uid( mac=d.get("mac"), node_uid=d.get("id") ) cls.validate_roles(d, node) if 'meta' in d: d['meta'] = MetaValidator.validate_update(d['meta']) return d
def _check_attribute(cls, metadata, attributes, new_attributes): """Check new_attributes is equal with attributes except editable fields :param metadata: dict describes structure and properties of attributes :param attributes: attributes which is the basis for comparison :param new_attributes: attributes with modifications to check """ if type(attributes) != type(new_attributes): raise errors.InvalidData( "Value type of '{0}' attribute couldn't be changed.". format(metadata.get('label') or metadata.get('name')), log_message=True ) # if metadata field contains editable_for_deployed = True, attribute # and all its childs may be changed too. No need to check it. if metadata.get('editable_for_deployed'): return # no 'fields' in metadata means that attribute has no any childs(leaf) if 'fields' not in metadata: if attributes != new_attributes: raise errors.InvalidData( "Value of '{0}' attribute couldn't be changed.". format(metadata.get('label') or metadata.get('name')), log_message=True ) return fields_sort_functions = { 'availability_zones': lambda x: x['az_name'], 'nova_computes': lambda x: x['vsphere_cluster'] } field_name = metadata['name'] if isinstance(attributes, (list, tuple)): if len(attributes) != len(new_attributes): raise errors.InvalidData( "Value of '{0}' attribute couldn't be changed.". format(metadata.get('label') or metadata.get('name')), log_message=True ) attributes = sorted( attributes, key=fields_sort_functions.get(field_name)) new_attributes = sorted( new_attributes, key=fields_sort_functions.get(field_name)) for item, new_item in six.moves.zip(attributes, new_attributes): for field_metadata in metadata['fields']: cls._check_attribute(field_metadata, item.get(field_metadata['name']), new_item.get(field_metadata['name'])) elif isinstance(attributes, dict): for field_metadata in metadata['fields']: cls._check_attribute(field_metadata, attributes.get(field_name), new_attributes.get(field_name))
def validate_json(cls, data): if data: try: res = jsonutils.loads(data) except Exception: raise errors.InvalidData("Invalid json received", log_message=True) else: raise errors.InvalidData("Empty request received", log_message=True) return res
def check_roles_requirement(cls, roles, roles_metadata, settings): for role in roles: if "depends" in roles_metadata[role]: depends = roles_metadata[role]['depends'] for condition in depends: search_key = condition['condition'].keys()[0] if not search_key.startswith('settings:'): errors.InvalidData('Incorrect settings path') setting_path = search_key[search_key.find(':') + 1:] setting = cls._search_in_settings(settings, setting_path) if setting != condition['condition'].values()[0]: raise errors.InvalidData(condition['warning'])
def validate_attribute(cls, attr_name, attr): """Validates a single attribute from settings.yaml. Dict is of this form: description: <description> label: <label> restrictions: - <restriction> - <restriction> - ... type: <type> value: <value> weight: <weight> regex: error: <error message> source: <regexp source> We validate that 'value' corresponds to 'type' according to attribute_type_schemas mapping in json_schema/cluster.py. If regex is present, we additionally check that the provided string value matches the regexp. :param attr_name: Name of the attribute being checked :param attr: attribute value :return: attribute or raise InvalidData exception """ if not isinstance(attr, dict): return attr if 'type' not in attr and 'value' not in attr: return attr schema = copy.deepcopy(cluster_schema.attribute_schema) type_ = attr.get('type') if type_: value_schema = cluster_schema.attribute_type_schemas.get(type_) if value_schema: schema['properties'].update(value_schema) try: cls.validate_schema(attr, schema) except errors.InvalidData as e: raise errors.InvalidData('[{0}] {1}'.format(attr_name, e.message)) # Validate regexp only if some value is present # Otherwise regexp might be invalid if attr['value']: regex_err = restrictions.AttributesRestriction.validate_regex(attr) if regex_err is not None: raise errors.InvalidData('[{0}] {1}'.format( attr_name, regex_err))
def validate_release_upgrade(cls, orig_release, new_release): if not new_release.is_deployable: raise errors.InvalidData( "Upgrade to the given release ({0}) is not possible because " "this release is deprecated and cannot be installed.".format( new_release.id), log_message=True) if orig_release >= new_release: raise errors.InvalidData( "Upgrade to the given release ({0}) is not possible because " "this release is equal or lower than the release of the " "original cluster.".format(new_release.id), log_message=True)
def _validate_common(cls, d): if "networks_metadata" in d: # TODO(enchantner): additional validation meta = d["networks_metadata"]["nova_network"] for network in meta["networks"]: if "name" not in network: raise errors.InvalidData( "Invalid network data: {0}".format(network), log_message=True) if "attributes_metadata" in d: try: AttributesValidator.validate_fixture(d["attributes_metadata"]) except Exception as exc: raise errors.InvalidData(str(exc), log_message=True)
def validate_dns_servers_update(cls, data): d = cls.validate_json(data) dns_servers = d['dns_nameservers'].get("nameservers", []) if not isinstance(dns_servers, list): raise errors.InvalidData( "It's expected to receive array of DNS servers, " "not a single object", log_message=True) if len(dns_servers) < 2: raise errors.InvalidData( "There should be at least two DNS servers", log_message=True) return d
def validate(cls, node): if not isinstance(node, dict): raise errors.InvalidData("Each node should be dict", log_message=True) if 'id' not in node: raise errors.InvalidData("Each node should have ID", log_message=True) if 'interfaces' not in node or \ not isinstance(node['interfaces'], list): raise errors.InvalidData( "There is no 'interfaces' list in node '%d'" % node['id'], log_message=True) net_ids = set() for iface in node['interfaces']: if not isinstance(iface, dict): raise errors.InvalidData( "Node '%d': each interface should be dict (got '%s')" % (node['id'], str(iface)), log_message=True) if 'id' not in iface: raise errors.InvalidData( "Node '%d': each interface should have ID" % node['id'], log_message=True) if 'assigned_networks' not in iface or \ not isinstance(iface['assigned_networks'], list): raise errors.InvalidData("There is no 'assigned_networks' list" " in interface '%d' in node '%d'" % (iface['id'], node['id']), log_message=True) for net in iface['assigned_networks']: if not isinstance(net, dict): raise errors.InvalidData( "Node '%d', interface '%d':" " each assigned network should be dict" % (iface['id'], node['id']), log_message=True) if 'id' not in net: raise errors.InvalidData( "Node '%d', interface '%d':" " each assigned network should have ID" % (iface['id'], node['id']), log_message=True) if net['id'] in net_ids: raise errors.InvalidData( "Assigned networks for node '%d' have" " a duplicate network '%d' (second" " occurrence in interface '%d')" % (node['id'], net['id'], iface['id']), log_message=True) net_ids.add(net['id']) return node
def validate_networks_update(cls, data): d = cls.validate_json(data) if not d: raise errors.InvalidData("No valid data received", log_message=True) networks = d.get('networks') if not isinstance(networks, list): raise errors.InvalidData("'networks' is expected to be an array", log_message=True) for i in networks: if 'id' not in i: raise errors.InvalidData( "No 'id' param presents for '{0}' network".format(i), log_message=True) return d
def check_all_nodes(nodes, node_ids): not_found_node_ids = set(node_ids) - set(n.id for n in nodes) if not_found_node_ids: raise errors.InvalidData( u"Nodes with ids {0} were not found.".format(",".join( map(str, not_found_node_ids))), log_message=True)