def aggregate_and_send_metrics(url: str, app_name: str, instance_id: str, custom_headers: dict, custom_options: dict, features: dict, ondisk_cache: FileCache) -> None: feature_stats_list = [] for feature_name in features.keys(): feature_stats = { features[feature_name].name: { "yes": features[feature_name].yes_count, "no": features[feature_name].no_count } } features[feature_name].reset_stats() feature_stats_list.append(feature_stats) metrics_request = { "appName": app_name, "instanceId": instance_id, "bucket": { "start": ondisk_cache[METRIC_LAST_SENT_TIME].isoformat(), "stop": datetime.now(timezone.utc).isoformat(), "toggles": dict(ChainMap(*feature_stats_list)) } } send_metrics(url, metrics_request, custom_headers, custom_options) ondisk_cache[METRIC_LAST_SENT_TIME] = datetime.now(timezone.utc) ondisk_cache.sync()
def cache_full(): cache_name = 'pytest_%s' % uuid.uuid4() temporary_cache = FileCache(cache_name) temporary_cache[FEATURES_URL] = MOCK_ALL_FEATURES temporary_cache.sync() yield temporary_cache temporary_cache.delete()
def cache_custom(): cache_name = 'pytest_%s' % uuid.uuid4() temporary_cache = FileCache(cache_name) temporary_cache[FEATURES_URL] = MOCK_CUSTOM_STRATEGY temporary_cache.sync() yield temporary_cache temporary_cache.delete()
def get_stopwords(extra_stopwords=[]): cache = FileCache('stopwords') stopwords = cache.get('stopwords', []) if stopwords: return set(stopwords + extra_stopwords) else: cache['stopwords'] = default_stopwords() cache.sync() return set(cache['stopwords'] + extra_stopwords)
def get_stemmed_stopwords(extra_stopwords=[]): cache = FileCache('stopwords') stemmed_stopwords = cache.get('stemmed_stopwords', []) stemmed_extra = list(set([stemmize(w) for w in extra_stopwords])) if stemmed_stopwords: return set(stemmed_stopwords + stemmed_extra) else: stemmed = list(set(stemmize(w) for w in get_stopwords())) cache['stemmed_stopwords'] = stemmed cache.sync() return set(stemmed + stemmed_extra)
def get_latlng(location): cache = FileCache('uber-button-cache') if location not in cache: geolocator = Nominatim(user_agent="Uber-Button") geolocator.timeout = 60 time.sleep(1.1) cache[location] = geolocator.geocode(location) cache.sync() return cache[location]
def fetch_and_load_features(url: str, app_name: str, instance_id: str, custom_headers: dict, cache: FileCache, strategies: dict) -> None: feature_provisioning = get_feature_toggles(url, app_name, instance_id, custom_headers) if feature_provisioning: cache[FEATURES_URL] = feature_provisioning cache.sync() else: LOGGER.info("Unable to get feature flag toggles, using cached values.") load_features(cache, strategies)
def fetch_and_load_features(url: str, app_name: str, instance_id: str, custom_headers: dict, custom_options: dict, cache: FileCache, features: dict, strategy_mapping: dict) -> None: feature_provisioning = get_feature_toggles(url, app_name, instance_id, custom_headers, custom_options) if feature_provisioning: cache[FEATURES_URL] = feature_provisioning cache.sync() else: LOGGER.warning("Unable to get feature flag toggles, using cached provisioning.") load_features(cache, features, strategy_mapping)
class SZFileBackend(CacheBackend): def __init__(self, arguments): self._cache = FileCache(arguments.pop("appname", None), flag=arguments.pop("flag", "c"), serialize=arguments.pop("serialize", True), app_cache_dir=arguments.pop( "app_cache_dir", None)) def get(self, key): value = self._cache.get(key, NO_VALUE) return value def get_multi(self, keys): ret = [self._cache.get(key, NO_VALUE) for key in keys] return ret def set(self, key, value): self._cache[key] = value def set_multi(self, mapping): for key, value in mapping.items(): self._cache[key] = value def delete(self, key): self._cache.pop(key, None) def delete_multi(self, keys): for key in keys: self._cache.pop(key, None) @property def all_filenames(self): return self._cache._all_filenames() def sync(self, force=False): if (hasattr(self._cache, "_buffer") and self._cache._buffer) or force: self._cache.sync() def clear(self): self._cache.clear() if not hasattr(self._cache, "_buffer") or self._cache._sync: self._cache._sync = False self._cache._buffer = {}
class SZFileBackend(CacheBackend): def __init__(self, arguments): self._cache = FileCache(arguments.pop("appname", None), flag=arguments.pop("flag", "c"), serialize=arguments.pop("serialize", True), app_cache_dir=arguments.pop("app_cache_dir", None)) def get(self, key): value = self._cache.get(key, NO_VALUE) return value def get_multi(self, keys): ret = [ self._cache.get(key, NO_VALUE) for key in keys] return ret def set(self, key, value): self._cache[key] = value def set_multi(self, mapping): for key, value in mapping.items(): self._cache[key] = value def delete(self, key): self._cache.pop(key, None) def delete_multi(self, keys): for key in keys: self._cache.pop(key, None) @property def all_filenames(self): return self._cache._all_filenames() def sync(self, force=False): if (hasattr(self._cache, "_buffer") and self._cache._buffer) or force: self._cache.sync() def clear(self): self._cache.clear() if not hasattr(self._cache, "_buffer") or self._cache._sync: self._cache._sync = False self._cache._buffer = {}
class UnleashClient: """Client implementation.""" def __init__(self, url: str, app_name: str, environment: str = "default", instance_id: str = "unleash-client-python", refresh_interval: int = 15, metrics_interval: int = 60, disable_metrics: bool = False, disable_registration: bool = False, custom_headers: Optional[dict] = None, custom_options: Optional[dict] = None, custom_strategies: Optional[dict] = None, cache_directory: str = None, project_name: str = None) -> None: """ A client for the Unleash feature toggle system. :param url: URL of the unleash server, required. :param app_name: Name of the application using the unleash client, required. :param environment: Name of the environment using the unleash client, optional & defaults to "default". :param instance_id: Unique identifier for unleash client instance, optional & defaults to "unleash-client-python" :param refresh_interval: Provisioning refresh interval in ms, optional & defaults to 15 seconds :param metrics_interval: Metrics refresh interval in ms, optional & defaults to 60 seconds :param disable_metrics: Disables sending metrics to unleash server, optional & defaults to false. :param custom_headers: Default headers to send to unleash server, optional & defaults to empty. :param custom_options: Default requests parameters, optional & defaults to empty. :param custom_strategies: Dictionary of custom strategy names : custom strategy objects :param cache_directory: Location of the cache directory. When unset, FCache will determine the location """ custom_headers = custom_headers or {} custom_options = custom_options or {} custom_strategies = custom_strategies or {} # Configuration self.unleash_url = url.rstrip('\\') self.unleash_app_name = app_name self.unleash_environment = environment self.unleash_instance_id = instance_id self.unleash_refresh_interval = refresh_interval self.unleash_metrics_interval = metrics_interval self.unleash_disable_metrics = disable_metrics self.unleash_disable_registration = disable_registration self.unleash_custom_headers = custom_headers self.unleash_custom_options = custom_options self.unleash_static_context = { "appName": self.unleash_app_name, "environment": self.unleash_environment } self.unleash_project_name = project_name # Class objects self.cache = FileCache(self.unleash_instance_id, app_cache_dir=cache_directory) self.features: dict = {} self.scheduler = BackgroundScheduler() self.fl_job: Job = None self.metric_job: Job = None self.cache[METRIC_LAST_SENT_TIME] = datetime.now(timezone.utc) self.cache.sync() # Mappings default_strategy_mapping = { "applicationHostname": ApplicationHostname, "default": Default, "gradualRolloutRandom": GradualRolloutRandom, "gradualRolloutSessionId": GradualRolloutSessionId, "gradualRolloutUserId": GradualRolloutUserId, "remoteAddress": RemoteAddress, "userWithId": UserWithId, "flexibleRollout": FlexibleRollout } if custom_strategies: strategy_v2xx_deprecation_check( [x for x in custom_strategies.values()]) # pylint: disable=R1721 self.strategy_mapping = { **custom_strategies, **default_strategy_mapping } # Client status self.is_initialized = False def initialize_client(self) -> None: """ Initializes client and starts communication with central unleash server(s). This kicks off: * Client registration * Provisioning poll * Stats poll :return: """ # Setup fl_args = { "url": self.unleash_url, "app_name": self.unleash_app_name, "instance_id": self.unleash_instance_id, "custom_headers": self.unleash_custom_headers, "custom_options": self.unleash_custom_options, "cache": self.cache, "features": self.features, "strategy_mapping": self.strategy_mapping, "project": self.unleash_project_name } metrics_args = { "url": self.unleash_url, "app_name": self.unleash_app_name, "instance_id": self.unleash_instance_id, "custom_headers": self.unleash_custom_headers, "custom_options": self.unleash_custom_options, "features": self.features, "ondisk_cache": self.cache } # Register app if not self.unleash_disable_registration: register_client(self.unleash_url, self.unleash_app_name, self.unleash_instance_id, self.unleash_metrics_interval, self.unleash_custom_headers, self.unleash_custom_options, self.strategy_mapping) fetch_and_load_features(**fl_args) # Start periodic jobs self.scheduler.start() self.fl_job = self.scheduler.add_job( fetch_and_load_features, trigger=IntervalTrigger( seconds=int(self.unleash_refresh_interval)), kwargs=fl_args) if not self.unleash_disable_metrics: self.metric_job = self.scheduler.add_job( aggregate_and_send_metrics, trigger=IntervalTrigger( seconds=int(self.unleash_metrics_interval)), kwargs=metrics_args) self.is_initialized = True def destroy(self): """ Gracefully shuts down the Unleash client by stopping jobs, stopping the scheduler, and deleting the cache. You shouldn't need this too much! :return: """ self.fl_job.remove() if self.metric_job: self.metric_job.remove() self.scheduler.shutdown() self.cache.delete() @staticmethod def _get_fallback_value(fallback_function: Callable, feature_name: str, context: dict) -> bool: if fallback_function: fallback_value = fallback_function(feature_name, context) else: fallback_value = False return fallback_value # pylint: disable=broad-except def is_enabled(self, feature_name: str, context: Optional[dict] = None, fallback_function: Callable = None) -> bool: """ Checks if a feature toggle is enabled. Notes: * If client hasn't been initialized yet or an error occurs, flat will default to false. :param feature_name: Name of the feature :param context: Dictionary with context (e.g. IPs, email) for feature toggle. :param default_value: Allows override of default value. (DEPRECIATED, used fallback_function instead!) :param fallback_function: Allows users to provide a custom function to set default value. :return: True/False """ context = context or {} context.update(self.unleash_static_context) if self.is_initialized: try: return self.features[feature_name].is_enabled(context) except Exception as excep: LOGGER.warning("Returning default value for feature: %s", feature_name) LOGGER.warning("Error checking feature flag: %s", excep) return self._get_fallback_value(fallback_function, feature_name, context) else: LOGGER.warning("Returning default value for feature: %s", feature_name) LOGGER.warning( "Attempted to get feature_flag %s, but client wasn't initialized!", feature_name) return self._get_fallback_value(fallback_function, feature_name, context) # pylint: disable=broad-except def get_variant(self, feature_name: str, context: Optional[dict] = None) -> dict: """ Checks if a feature toggle is enabled. If so, return variant. Notes: * If client hasn't been initialized yet or an error occurs, flat will default to false. :param feature_name: Name of the feature :param context: Dictionary with context (e.g. IPs, email) for feature toggle. :return: Dict with variant and feature flag status. """ context = context or {} context.update(self.unleash_static_context) if self.is_initialized: try: return self.features[feature_name].get_variant(context) except Exception as excep: LOGGER.warning( "Returning default flag/variation for feature: %s", feature_name) LOGGER.warning("Error checking feature flag variant: %s", excep) return DISABLED_VARIATION else: LOGGER.warning("Returning default flag/variation for feature: %s", feature_name) LOGGER.warning( "Attempted to get feature flag/variation %s, but client wasn't initialized!", feature_name) return DISABLED_VARIATION
class UnleashClient(object): """ Client implementation. """ def __init__(self, url, app_name, instance_id="unleash-client-python", refresh_interval=15, metrics_interval=60, disable_metrics=False, disable_registration=False, custom_headers={}, custom_strategies={}, cache_directory=None): """ A client for the Unleash feature toggle system. :param url: URL of the unleash server, required. :param app_name: Name of the application using the unleash client, required. :param instance_id: Unique identifier for unleash client instance, optional & defaults to "unleash-client-python" :param refresh_interval: Provisioning refresh interval in ms, optional & defaults to 15 seconds :param metrics_interval: Metrics refresh interval in ms, optional & defaults to 60 seconds :param disable_metrics: Disables sending metrics to unleash server, optional & defaults to false. :param custom_headers: Default headers to send to unleash server, optional & defaults to empty. :param custom_strategies: Dictionary of custom strategy names : custom strategy objects :param cache_directory: Location of the cache directory. When unset, FCache will determine the location """ # Configuration self.unleash_url = url.rstrip('\\') self.unleash_app_name = app_name self.unleash_instance_id = instance_id self.unleash_refresh_interval = refresh_interval self.unleash_metrics_interval = metrics_interval self.unleash_disable_metrics = disable_metrics self.unleash_disable_registration = disable_registration self.unleash_custom_headers = custom_headers # Class objects self.cache = FileCache(self.unleash_instance_id, app_cache_dir=cache_directory) self.features = {} # type: Dict self.scheduler = BackgroundScheduler() self.fl_job = None # type: Job self.metric_job = None # type: Job self.cache[METRIC_LAST_SENT_TIME] = datetime.now(timezone.utc) self.cache.sync() # Mappings default_strategy_mapping = { "applicationHostname": ApplicationHostname, "default": Default, "gradualRolloutRandom": GradualRolloutRandom, "gradualRolloutSessionId": GradualRolloutSessionId, "gradualRolloutUserId": GradualRolloutUserId, "remoteAddress": RemoteAddress, "userWithId": UserWithId } self.strategy_mapping = default_strategy_mapping.copy() self.strategy_mapping.update(custom_strategies) # Client status self.is_initialized = False def initialize_client(self): """ Initializes client and starts communication with central unleash server(s). This kicks off: * Client registration * Provisioning poll * Stats poll :return: """ # Setup fl_args = { "url": self.unleash_url, "app_name": self.unleash_app_name, "instance_id": self.unleash_instance_id, "custom_headers": self.unleash_custom_headers, "cache": self.cache, "features": self.features, "strategy_mapping": self.strategy_mapping } metrics_args = { "url": self.unleash_url, "app_name": self.unleash_app_name, "instance_id": self.unleash_instance_id, "custom_headers": self.unleash_custom_headers, "features": self.features, "ondisk_cache": self.cache } # Register app if not self.unleash_disable_registration: register_client(self.unleash_url, self.unleash_app_name, self.unleash_instance_id, self.unleash_metrics_interval, self.unleash_custom_headers, self.strategy_mapping) fetch_and_load_features(**fl_args) # Start periodic jobs self.scheduler.start() self.fl_job = self.scheduler.add_job( fetch_and_load_features, trigger=IntervalTrigger( seconds=int(self.unleash_refresh_interval)), kwargs=fl_args) if not self.unleash_disable_metrics: self.metric_job = self.scheduler.add_job( aggregate_and_send_metrics, trigger=IntervalTrigger( seconds=int(self.unleash_metrics_interval)), kwargs=metrics_args) self.is_initialized = True def destroy(self): """ Gracefully shuts down the Unleash client by stopping jobs, stopping the scheduler, and deleting the cache. You shouldn't need this too much! :return: """ self.fl_job.remove() if self.metric_job: self.metric_job.remove() self.scheduler.shutdown() self.cache.delete() # pylint: disable=broad-except def is_enabled(self, feature_name, context={}, default_value=False): """ Checks if a feature toggle is enabled. Notes: * If client hasn't been initialized yet or an error occurs, flat will default to false. :param feature_name: Name of the feature :param context: Dictionary with context (e.g. IPs, email) for feature toggle. :param default_value: Allows override of default value. :return: True/False """ if self.is_initialized: try: return self.features[feature_name].is_enabled( context, default_value) except Exception as excep: LOGGER.warning("Returning default value for feature: %s", feature_name) LOGGER.warning("Error checking feature flag: %s", excep) return default_value else: LOGGER.warning("Returning default value for feature: %s", feature_name) LOGGER.warning( "Attempted to get feature_flag %s, but client wasn't initialized!", feature_name) return default_value
query = { 'grant_type': 'authorization_code', 'code': YANDEX_CODE, 'client_id': YANDEX_ID, 'client_secret': YANDEX_PASS } r = requests.post("https://oauth.yandex.ru/token", data=query, headers=YANDEX_REQUEST) if (r.status_code == 200): data = r.json() access_token = data['access_token'] mycache['access_token'] = access_token mycache.sync() else: print('todo: sentry here') exit() else: access_token = mycache["access_token"] print(access_token) query = {'oauth_token': access_token} r = requests.get("https://api-metrica.yandex.com/management/v1/counters", params=query, headers=YANDEX_REQUEST) if r.status_code == 200: content = r.text.encode("UTF-8").decode("UTF-8").strip()
class Cache(object): _cache = {} _cachedir = None _cache_lock = None @property def cache_lock(self): return self._cache_lock def __init__(self, environment): """ Initializes the cache so that kraft does not have to constantly retrieve informational lists about unikraft, its available architectures, platforms, libraries and supported applications. """ self._cachedir = environment.get('UK_CACHEDIR') # Initiaize a cache instance self._cache = FileCache(app_cache_dir=self._cachedir, appname=__program__, flag='cs') self._cache_lock = threading.Lock() @property def cache(self): ret = None with self._cache_lock: ret = self._cache return ret def get(self, origin=None): ret = None if isinstance(origin, six.string_types) and origin in self._cache: logger.debug("Retrieving %s from cache..." % origin) with self._cache_lock: ret = self._cache[origin] return ret def find_item_by_name(self, type=None, name=None): for origin in self._cache: for item in self._cache[origin].items(): if ((type is not None and item[1].type.shortname == type) or type is None) and item[1].name == name: return item[1] return None def all(self): return self.cache def save(self, origin, manifest): if not isinstance(origin, six.string_types): raise TypeError("origin is not string") if not isinstance(manifest, Manifest): raise TypeError("Invalid manifest") with self._cache_lock: logger.debug("Saving %s into cache..." % manifest) self._cache[origin] = manifest def sync(self): logger.debug("Synchronizing cache with filesystem...") with self._cache_lock: self._cache.sync() def purge(self): logger.debug("Purging cache...") with self._cache_lock: self._cache.clear() def is_stale(self): """ Determine if the list of remote repositories is stale. Return a boolean value if at least one repository is marked as stale. """ logger.debug("Checking cache for staleness...") return True if len(self.all()) == 0 else False