def generate_public_cache_authfile(global_data: GlobalData, fqdn=None, legacy=True, suppress_errors=True) -> str: """ Generate the Xrootd authfile needed for public caches """ if legacy: authfile = "u * /user/ligo -rl \\\n" else: authfile = "u * \\\n" resource = None if fqdn: resource_groups = global_data.get_topology().get_resource_group_list() resource = _get_cache_resource(fqdn, resource_groups, suppress_errors) if not resource: return "" public_dirs = set() vo_data = global_data.get_vos_data() for vo_name, vo_details in vo_data.vos.items(): stashcache_data = vo_details.get('DataFederations', {}).get('StashCache') if not stashcache_data: continue if resource and not _cache_is_allowed( resource, vo_name, stashcache_data, True, suppress_errors): continue for dirname, authz_list in stashcache_data.get("Namespaces", {}).items(): if "PUBLIC" in authz_list: public_dirs.add(dirname) for dirname in sorted(public_dirs): authfile += " {} rl \\\n".format(dirname) if authfile.endswith("\\\n"): authfile = authfile[:-2] + "\n" return authfile
def generate_cache_authfile(global_data: GlobalData, fqdn=None, legacy=True, suppress_errors=True) -> str: """ Generate the Xrootd authfile needed by a StashCache cache server. """ authfile = "" id_to_dir = defaultdict(set) resource = None if fqdn: resource_groups = global_data.get_topology().get_resource_group_list() resource = _get_cache_resource(fqdn, resource_groups, suppress_errors) if not resource: return "" vo_data = global_data.get_vos_data() for vo_name, vo_details in vo_data.vos.items(): stashcache_data = vo_details.get('DataFederations', {}).get('StashCache') if not stashcache_data: continue namespaces = stashcache_data.get("Namespaces") if not namespaces: if suppress_errors: continue else: raise DataError("VO {} in StashCache does not provide a Namespaces list.".format(vo_name)) needs_authz = False for namespace, authz_list in namespaces.items(): if not authz_list: if suppress_errors: continue else: raise DataError("Namespace {} (VO {}) does not provide any authorizations.".format(namespace, vo_name)) if authz_list != ["PUBLIC"]: needs_authz = True break if not needs_authz: continue if resource and not _cache_is_allowed(resource, vo_name, stashcache_data, False, suppress_errors): continue for namespace, authz_list in namespaces.items(): for authz in authz_list: if not isinstance(authz, str): continue if authz.startswith("FQAN:"): id_to_dir["g {}".format(authz[5:])].add(namespace) elif authz.startswith("DN:"): hash = _generate_dn_hash(authz[3:]) id_to_dir["u {}".format(hash)].add(namespace) if legacy: ldappass = readfile(global_data.ligo_ldap_passfile, log) for dn in _generate_ligo_dns(global_data.ligo_ldap_url, global_data.ligo_ldap_user, ldappass): hash = _generate_dn_hash(dn) id_to_dir["u {}".format(hash)].add("/user/ligo") for id, dir_list in id_to_dir.items(): if dir_list: authfile += "{} {}\n".format(id, " ".join([i + " rl" for i in sorted(dir_list)])) return authfile
class Validation: global_data: GlobalData resource_groups: List[ResourceGroup] resource_group_names: Set[str] vos_data: VOsData campus_grid_ids: Dict[str, int] project_filenames: List[str] def __init__(self, topdir): """Constructor; loads VO and Resource Data and campus grid IDs. Does not load the project data, only gets a list of filenames; for validation, we want to load the files ourselves, one at a time. """ self.global_data = GlobalData(config={"TOPOLOGY_DATA_DIR": topdir}, strict=True) self.resource_groups = self.global_data.get_topology().get_resource_group_list() self.resource_group_names = {x.name for x in self.resource_groups} self.vos_data = self.global_data.get_vos_data() projects_dir = self.global_data.projects_dir self.campus_grid_ids = project_reader.get_campus_grid_ids(projects_dir) self.project_filenames = glob.glob(os.path.join(projects_dir, "[!_]*.yaml")) def validate_project_file(self, project_filename: str) -> List[str]: """Validate one project file, returning a list of error messages for problems found with that file. Current validations are for ResourceAllocations: 1. Check ResourceGroups are in the topology tree 2. Check AllowedSchedds are Resources with "Submit Node" services """ project_filebn = os.path.basename(project_filename) try: project = project_reader.get_one_project( project_filename, self.campus_grid_ids, self.vos_data ) except Exception as err: return ["%s: exception while reading: %r" % (project_filebn, err)] errors = [] if not is_null(project, "ResourceAllocations", "ResourceAllocation"): for resource_allocation in project["ResourceAllocations"]["ResourceAllocation"]: # Check 1 if not is_null(resource_allocation, "ExecuteResourceGroups", "ExecuteResourceGroup"): project_ergs = resource_allocation["ExecuteResourceGroups"]["ExecuteResourceGroup"] project_erg_names = {x["GroupName"] for x in project_ergs} errors.extend([ f"{project_filebn}: ExecuteResourceGroup '{missing}' not found in topology" for missing in (project_erg_names - self.resource_group_names) ]) # Check 2 if not is_null(resource_allocation, "SubmitResources", "SubmitResource"): project_schedd_names = resource_allocation["SubmitResources"]["SubmitResource"] for sn in project_schedd_names: resource = self._get_resource_by_name(sn) if not resource: errors.append( "%s: SubmitResource '%s' not found in topology" % (project_filebn, sn) ) elif "Submit Node" not in resource.service_names: errors.append( "%s: SubmitResource '%s' does not provide a Submit Node" % (project_filebn, sn) ) return errors def _get_resource_by_name(self, name: str) -> Optional[Resource]: for group in self.resource_groups: if name in group.resources_by_name: return group.resources_by_name[name] else: return None