def fetch_and_load_features(url: str, app_name: str, instance_id: str, custom_headers: dict, custom_options: dict, cache: BaseCache, features: dict, strategy_mapping: dict, project: str = None) -> None: (feature_provisioning, etag) = get_feature_toggles( url, app_name, instance_id, custom_headers, custom_options, project, cache.get(ETAG) ) if feature_provisioning: cache.set(FEATURES_URL, feature_provisioning) else: LOGGER.debug("No feature provisioning returned from server, using cached provisioning.") if etag: cache.set(ETAG, etag) load_features(cache, features, strategy_mapping)
def aggregate_and_send_metrics(url: str, app_name: str, instance_id: str, custom_headers: dict, custom_options: dict, features: dict, cache: BaseCache) -> None: feature_stats_list = [] for feature_name in features.keys(): if not (features[feature_name].yes_count or features[feature_name].no_count): continue feature_stats = { features[feature_name].name: { "yes": features[feature_name].yes_count, "no": features[feature_name].no_count } } features[feature_name].reset_stats() feature_stats_list.append(feature_stats) metrics_request = { "appName": app_name, "instanceId": instance_id, "bucket": { "start": cache.get(METRIC_LAST_SENT_TIME).isoformat(), "stop": datetime.now(timezone.utc).isoformat(), "toggles": dict(ChainMap(*feature_stats_list)) } } if feature_stats_list: send_metrics(url, metrics_request, custom_headers, custom_options) cache.set(METRIC_LAST_SENT_TIME, datetime.now(timezone.utc)) else: LOGGER.debug( "No feature flags with metrics, skipping metrics submission.")