def event_match(self, event_type): for e in self._ensure_list(self.cfg.get('event_create', [])): if fnmatch.fnmatch(event_type, e): return EVENT_CREATE for e in self._ensure_list(self.cfg.get('event_delete', [])): if fnmatch.fnmatch(event_type, e): return EVENT_DELETE for e in self._ensure_list(self.cfg.get('event_update', [])): if fnmatch.fnmatch(event_type, e): return EVENT_UPDATE
def is_supported(dataset, data_name): # Support wildcard like storage.* and !disk.* # Start with negation, we consider that the order is deny, allow if any(fnmatch.fnmatch(data_name, datapoint[1:]) for datapoint in dataset if datapoint[0] == "!"): return False if any(fnmatch.fnmatch(data_name, datapoint) for datapoint in dataset if datapoint[0] != "!"): return True # if we only have negation, we suppose the default is allow return all(datapoint.startswith("!") for datapoint in dataset)
def is_supported(dataset, data_name): # Support wildcard like storage.* and !disk.* # Start with negation, we consider that the order is deny, allow if any(fnmatch.fnmatch(data_name, datapoint[1:]) for datapoint in dataset if datapoint[0] == '!'): return False if any(fnmatch.fnmatch(data_name, datapoint) for datapoint in dataset if datapoint[0] != '!'): return True # if we only have negation, we suppose the default is allow return all(datapoint.startswith('!') for datapoint in dataset)
def _remove_from_vol_cache(self, cache_file_path, ref_suffix, volume): """Removes a reference and possibly volume from the volume cache This method removes the ref_id reference (soft link) from the cache. If no other references exist the cached volume itself is removed, too. :param cache_file_path file path to the volume in the cache :param ref_suffix The id based suffix of the cache file reference :param volume The volume whose share defines the cache to address """ # NOTE(kaisers): As the cache_file_path may be a relative path we use # cache dir and file name to ensure absolute paths in all operations. cache_path = os.path.join(self._local_volume_dir(volume), self.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME) cache_file_name = os.path.basename(cache_file_path) # delete the reference LOG.debug("Deleting cache reference %(cfp)s%(rs)s", {"cfp": cache_file_path, "rs": ref_suffix}) fileutils.delete_if_exists(os.path.join(cache_path, cache_file_name + ref_suffix)) # If no other reference exists, remove the cache entry. for file in os.listdir(cache_path): if fnmatch.fnmatch(file, cache_file_name + ".*"): # found another reference file, keep cache entry LOG.debug("Cached volume %(file)s still has at least one " "reference: %(ref)s", {"file": cache_file_name, "ref": file}) return # No other reference found, remove cache entry LOG.debug("Removing cached volume %(cvol)s as no more references for " "this cached volume exist.", {"cvol": os.path.join(cache_path, cache_file_name)}) fileutils.delete_if_exists(os.path.join(cache_path, cache_file_name))
def get_archive_policy_for_metric(self, metric_name): """Helper to get the archive policy according archive policy rules.""" rules = self.list_archive_policy_rules() for rule in rules: if fnmatch.fnmatch(metric_name or "", rule.metric_pattern): return self.get_archive_policy(rule.archive_policy_name) raise NoArchivePolicyRuleMatch(metric_name)
def _get_drivers_for_message(self, group, event_type, priority): """Which drivers should be called for this event_type or priority. """ accepted_drivers = set() for driver, rules in six.iteritems(group): checks = [] for key, patterns in six.iteritems(rules): if key == 'accepted_events': c = [fnmatch.fnmatch(event_type, p) for p in patterns] checks.append(any(c)) if key == 'accepted_priorities': c = [fnmatch.fnmatch(priority, p.lower()) for p in patterns] checks.append(any(c)) if all(checks): accepted_drivers.add(driver) return list(accepted_drivers)
def _reload_previously_created_subcas(self): for file in os.listdir(self.subca_directory): if fnmatch.fnmatch(file, '*.key'): ca_id, _ext = os.path.splitext(file) self.cas[ca_id] = SnakeoilCA( cert_path=os.path.join(self.subca_directory, ca_id + ".cert"), key_path=os.path.join(self.subca_directory, file), chain_path=os.path.join(self.subca_directory, ca_id + ".chain"), pkcs7_path=os.path.join(self.subca_directory, ca_id + ".p7b"))
def _reload_previously_created_subcas(self): for file in os.listdir(self.subca_directory): if fnmatch.fnmatch(file, '*.key'): ca_id, _ext = os.path.splitext(file) self.cas[ca_id] = SnakeoilCA( cert_path=os.path.join(self.subca_directory, ca_id + ".cert"), key_path=os.path.join(self.subca_directory, file), chain_path=os.path.join(self.subca_directory, ca_id + ".chain"), pkcs7_path=os.path.join(self.subca_directory, ca_id + ".p7b") )
def _remove_admin_fields(self, document): """Prior to indexing, remove any fields that shouldn't be indexed and available to users who do not have administrative privileges. Returns a copy of the document even if there's nothing to remove. """ sanitized_document = {} for k, v in document.items(): # Only return a field if it doesn't have ANY matches against # admin_only_fields if not any(fnmatch.fnmatch(k, field) for field in self.plugin.admin_only_fields): sanitized_document[k] = v return sanitized_document
def match_type(self, event_type): for t in self.event_types: if fnmatch.fnmatch(event_type, t): return True return False
def match_type(self, meter_name): for t in self._event_type: if fnmatch.fnmatch(meter_name, t): return True
def match(self, metric_name): for t in self.cfg['metrics']: if fnmatch.fnmatch(metric_name, t): return True return False
def _match(pollster): """Find out if pollster name matches to one of the list.""" return any(fnmatch.fnmatch(pollster.name, pattern) for pattern in pollster_list)
def excluded_type(self, event_type): for t in self._excluded_types: if fnmatch.fnmatch(event_type, t): return True return False
def event_type_to_watch(self, event_type): return fnmatch.fnmatch(event_type, self.obj.rule['event_type'])
def _match(pollster): """Find out if pollster name matches to one of the list.""" return any( fnmatch.fnmatch(pollster.name, pattern) for pattern in pollster_list)
def metric_match(self, metric_name): for t in self.cfg['metrics']: if fnmatch.fnmatch(metric_name, t): return True return False