def parse_config(self, config_file_path): """Parses the configuration file Args: config_file_path (string): path to the configuration file """ # Read main logfeeder configuration file staticconf.YamlConfiguration(config_file_path) self.aws_config_filepath = staticconf.read( 'logfeeder.aws_config_filepath') self.domain = staticconf.read('logfeeder.domain') app_file = staticconf.read('{0}.file'.format(self.APP_NAME)) # Read app specific configuration file contents = staticconf.YamlConfiguration(app_file) self.api_creds_filepath = staticconf.read('api_creds_filepath') if 'rate_limiter_num_calls_per_timeunit' in contents: self.rate_limiter = RateLimiter( calls_per_timeunit=staticconf.read_int( 'rate_limiter_num_calls_per_timeunit'), seconds_per_timeunit=staticconf.read_int( 'rate_limiter_num_seconds_per_timeunit'), ) self.sub_apis = {} for key in contents: if key.startswith('enable_'): name_of_subapi = key.split('enable_', 1)[1] self.sub_apis[name_of_subapi] = staticconf.read_bool(key) # If an API doesn't have any sub_apis, then set set its APP_NAME to self.sub_apis for code compatibility if not self.sub_apis: self.sub_apis = {self.APP_NAME: True}
def get_twitter_client(): if not staticconf.read_bool('twitter.enabled', default=False): return None return Twython( app_key=staticconf.read('twitter.api_key'), app_secret=staticconf.read('twitter.api_secret'), oauth_token=staticconf.read('twitter.access_token'), oauth_token_secret=staticconf.read('twitter.access_token_secret'), )
def search_log_source_by_keyword(request_body): disabled_logfinder = staticconf.read_bool('disable_logfinder_service') if disabled_logfinder: return {'logs': []} # send HTTP request search_endpoint = staticconf.read_string('log_finder_search_end_point') response = requests.post(search_endpoint, request_body) # if we get a bad HTTP status, raise an exception response.raise_for_status() content = response.json() return content
def get_log_meta_data(bucket_name, log_name): if bucket_name is None or log_name is None: return None if staticconf.read_bool('disable_logfinder_service'): return None # send HTTP request endpoint = staticconf.read_string('log_finder_buckets_end_point') \ + '/' + bucket_name + '/' + log_name response = requests.get(endpoint) # if we get a bad HTTP status, raise an exception response.raise_for_status() return response.json()
def ask_for_confirmation(prompt: str, default: str = 'y'): yes = 'Y' if default.lower() == 'y' else 'y' no = 'n' if default.lower() == 'y' else 'N' while True: sys.stdout.write(f'{prompt} [{yes}/{no}] ') sys.stdout.flush() if staticconf.read_bool('yes', default=False): return True inp = sys.stdin.readline().strip() if inp.lower() in {'y', 'yes'}: return True elif inp.lower() in {'n', 'no'}: return False elif inp == '': return default == 'y' else: print('Unrecognized response; please enter "yes" or "no"')
def setup_signals_environment(pool: str, scheduler: str) -> Tuple[int, int]: app_namespace = POOL_NAMESPACE.format(pool=pool, scheduler=scheduler) signal_versions, signal_namespaces, signal_names, app_names = [], [], [], [] if not staticconf.read_bool('autoscale_signal.internal', default=False): signal_names.append(staticconf.read_string('autoscale_signal.name')) signal_versions.append(staticconf.read_string('autoscale_signal.branch_or_tag')) signal_namespaces.append(staticconf.read_string('autoscaling.default_signal_role')) app_names.append('__default__') app_signal_name = staticconf.read_string( 'autoscale_signal.name', namespace=app_namespace, default=None, ) if app_signal_name: signal_names.append(app_signal_name) signal_versions.append(staticconf.read_string( 'autoscale_signal.branch_or_tag', namespace=app_namespace, default=pool, )) signal_namespaces.append( staticconf.read_string('autoscale_signal.namespace', namespace=app_namespace, default=pool), ) app_names.append(pool) versions_to_fetch = set(signal_versions) os.environ['CMAN_VERSIONS_TO_FETCH'] = ' '.join(versions_to_fetch) os.environ['CMAN_SIGNAL_VERSIONS'] = ' '.join(signal_versions) os.environ['CMAN_SIGNAL_NAMESPACES'] = ' '.join(signal_namespaces) os.environ['CMAN_SIGNAL_NAMES'] = ' '.join(signal_names) os.environ['CMAN_SIGNAL_APPS'] = ' '.join(app_names) os.environ['CMAN_NUM_VERSIONS'] = str(len(versions_to_fetch)) os.environ['CMAN_NUM_SIGNALS'] = str(len(signal_versions)) os.environ['CMAN_SIGNALS_BUCKET'] = staticconf.read_string('aws.signals_bucket', default=DEFAULT_SIGNALS_BUCKET) return len(versions_to_fetch), len(signal_versions)
def _get_pods_by_ip_or_pending( self ) -> Tuple[Mapping[str, List[KubernetesPod]], List[KubernetesPod], Mapping[ str, List[KubernetesPod]]]: pods_by_ip: Mapping[str, List[KubernetesPod]] = defaultdict(list) pending_pods: List[KubernetesPod] = [] excluded_pods_by_ip: Mapping[str, List[KubernetesPod]] = defaultdict(list) exclude_daemonset_pods = self.pool_config.read_bool( 'exclude_daemonset_pods', default=staticconf.read_bool('exclude_daemonset_pods', default=False)) all_pods = self._core_api.list_pod_for_all_namespaces().items for pod in all_pods: if self._pod_belongs_to_pool(pod): if exclude_daemonset_pods and self._pod_belongs_to_daemonset( pod): excluded_pods_by_ip[pod.status.host_ip].append(pod) elif pod.status.phase == 'Running': pods_by_ip[pod.status.host_ip].append(pod) else: pending_pods.append(pod) return pods_by_ip, pending_pods, excluded_pods_by_ip
def test_readers(self): staticconf.DictConfiguration(self.config) assert_equal(staticconf.read_float('SomeClass.ratio'), 7.7) assert_equal(staticconf.read_bool('globals'), False) assert_equal(staticconf.read_list_of_int('options'), [1, 7, 3, 9])
def _is_custom_aws_instance(): return staticconf.read_bool('is_custom_aws_instance', False)
def __init__( self, cluster: str, pool: str, scheduler: str, apps: List[str], pool_manager: Optional[PoolManager] = None, metrics_client: Optional[ClustermanMetricsBotoClient] = None, monitoring_enabled: bool = True, ) -> None: """ Class containing the core logic for autoscaling a cluster :param cluster: the name of the cluster to autoscale :param pool: the name of the pool to autoscale :param apps: a list of apps running on the pool :param pool_manager: a PoolManager object (used for simulations) :param metrics_client: a ClustermanMetricsBotoClient object (used for simulations) :param monitoring_enabled: set to False to disable sensu alerts during scaling """ self.cluster = cluster self.pool = pool self.scheduler = scheduler self.apps = apps self.monitoring_enabled = monitoring_enabled # TODO: handle multiple apps in the autoscaler (CLUSTERMAN-126) if len(self.apps) > 1: raise NotImplementedError( 'Scaling multiple apps in a cluster is not yet supported') logger.info( f'Initializing autoscaler engine for {self.pool} in {self.cluster}...' ) gauge_dimensions = {'cluster': cluster, 'pool': pool} monitoring_client = get_monitoring_client() self.target_capacity_gauge = monitoring_client.create_gauge( TARGET_CAPACITY_GAUGE_NAME, gauge_dimensions) self.resource_request_gauges: Dict[str, Any] = {} for resource in SignalResourceRequest._fields: self.resource_request_gauges[ resource] = monitoring_client.create_gauge( RESOURCE_GAUGE_BASE_NAME.format(resource=resource), gauge_dimensions, ) self.autoscaling_config = get_autoscaling_config( POOL_NAMESPACE.format(pool=self.pool, scheduler=self.scheduler), ) self.pool_manager = pool_manager or PoolManager( self.cluster, self.pool, self.scheduler) self.mesos_region = staticconf.read_string('aws.region') self.metrics_client = metrics_client or ClustermanMetricsBotoClient( self.mesos_region) self.default_signal: Signal if staticconf.read_bool('autoscale_signal.internal', default=False): # we should never get here unless we're on Kubernetes; this assert makes mypy happy assert isinstance(self.pool_manager.cluster_connector, KubernetesClusterConnector) self.default_signal = PendingPodsSignal( self.cluster, self.pool, self.scheduler, '__default__', DEFAULT_NAMESPACE, self.metrics_client, self.pool_manager.cluster_connector, ) else: self.default_signal = ExternalSignal( self.cluster, self.pool, self.scheduler, '__default__', DEFAULT_NAMESPACE, self.metrics_client, signal_namespace=staticconf.read_string( 'autoscaling.default_signal_role'), ) self.signal = self._get_signal_for_app(self.apps[0]) logger.info('Initialization complete')
def _get_signal_for_app(self, app: str) -> Signal: """Load the signal object to use for autoscaling for a particular app :param app: the name of the app to load a Signal for :returns: the configured app signal, or the default signal in case of an error """ logger.info( f'Loading autoscaling signal for {app} on {self.pool} in {self.cluster}' ) # TODO (CLUSTERMAN-126, CLUSTERMAN-195) apps will eventually have separate namespaces from pools pool_namespace = POOL_NAMESPACE.format(pool=app, scheduler=self.scheduler) try: # see if the pool has set up a custom signal correctly; if not, fall back to the default signal if staticconf.read_bool('autoscale_signal.internal', default=False, namespace=pool_namespace): # we should never get here unless we're on Kubernetes; this assert makes mypy happy assert isinstance(self.pool_manager.cluster_connector, KubernetesClusterConnector) return PendingPodsSignal( self.cluster, self.pool, self.scheduler, app, pool_namespace, self.metrics_client, self.pool_manager.cluster_connector, ) return ExternalSignal( self.cluster, self.pool, self.scheduler, app, pool_namespace, self.metrics_client, signal_namespace=staticconf.read_string( 'autoscale_signal.namespace', default=app, namespace=pool_namespace, ), ) except NoSignalConfiguredException: logger.info( f'No signal configured for {app}, falling back to default') return self.default_signal except Exception: msg = f'WARNING: loading signal for {app} failed, falling back to default' logger.exception(msg) sensu_checkin( check_name=SIGNAL_LOAD_CHECK_NAME, status=Status.WARNING, output=msg, source=self.cluster, scheduler=self.scheduler, page=False, ttl=None, app=app, noop=not self.monitoring_enabled, pool=self.pool, ) return self.default_signal