def prepare_session(policy_yaml): global log log = logging.getLogger('pk') config = pk_config.config() log.info('Received policy: \n{0}'.format(policy_yaml)) policy_yaml = resolve_queries(policy_yaml) log.info('Resolved policy: \n{0}'.format(policy_yaml)) policy = yaml.safe_load(policy_yaml) log.info('(C) Add exporters to prometheus configuration file starts') config_tpl = config['prometheus_config_template'] config_target = config['prometheus_config_target'] prom.add_exporters_to_prometheus_config(policy, config_tpl, config_target) log.info('(C) Add alerts to prometheus, generating rule files starts') prom.deploy_alerts_under_prometheus(config['prometheus_rules_directory'], policy.get('data',dict()).get('alerts'), policy.get('stack','pk')) log.info('(C) Notify prometheus to reload config starts') prom.notify_to_reload_config(config['prometheus_endpoint']) log.info('(C) Querying number of target nodes from Occopus starts') for onenode in policy.get('scaling',dict()).get('nodes',[]): instances = occo.query_number_of_worker_nodes( endpoint=config['occopus_endpoint'], infra_name=config['occopus_infra_name'], worker_name=onenode['name']) log.info('(C) Setting m_node_count for {} to {}'.format(onenode['name'], instances)) set_worker_node_instance_number(onenode,instances) log.info('(C) Querying number of service replicas from Swarm starts') for theservice in policy.get('scaling',dict()).get('services',[]): service_name = theservice.get('name','') full_service_name = get_full_service_name(policy, service_name) instances = k8s.query_k8s_replicas(config['k8s_endpoint'],full_service_name) log.info('(C) Setting m_container_count for {0} to {1}'.format(service_name, instances)) set_k8s_instance_number(policy,service_name,instances) return policy
def prepare_session(policy_yaml): global log log = logging.getLogger('pk') config = pk_config.config() log.info('Received policy: \n{0}'.format(policy_yaml)) policy_yaml = resolve_queries(policy_yaml) log.info('Resolved policy: \n{0}'.format(policy_yaml)) policy = yaml.safe_load(policy_yaml) #Set dryrun flags log.info('(C) Initializing dryrun settings from policy starts') pk_config.dryrun_set() dryrun = policy.get('data', dict()).get('constants', dict()).get('m_dryrun', None) if type(dryrun) == list: for comp in dryrun: if comp in pk_config.var_dryrun_components: pk_config.dryrun_set(comp, True) log.info('(C) Enable dryrun for the following components: {0}'.format( pk_config.dryrun_get())) #Initialize Prometheus log.info('(C) Add exporters to prometheus configuration file starts') config_tpl = config['prometheus_config_template'] config_target = config['prometheus_config_target'] prom.add_exporters_to_prometheus_config(policy, config_tpl, config_target) log.info('(C) Add alerts to prometheus, generating rule files starts') prom.deploy_alerts_under_prometheus( config['prometheus_rules_directory'], policy.get('data', dict()).get('alerts'), policy.get('stack', 'pk')) log.info('(C) Notify prometheus to reload config starts') prom.notify_to_reload_config(config['prometheus_endpoint']) #Initialise nodes through Occopus log.info( '(C) Querying number of target nodes from Cloud Orchestrator starts') #policy.setdefault('scaling', dict())["cloud_orchestrator"] = get for onenode in policy.get('scaling', dict()).get('nodes', []): cloud = get_cloud_orchestrator(onenode) instances = cloud.query_number_of_worker_nodes( config, worker_name=onenode['name']) log.info('(C) Setting m_node_count for {} to {}'.format( onenode['name'], instances)) set_worker_node_instance_number(onenode, instances) #Initialise service through K8S log.info('(C) Querying number of service replicas from K8s starts') for theservice in policy.get('scaling', dict()).get('services', []): service_name = theservice.get('name', '') full_service_name = get_full_service_name(policy, service_name) instances = k8s.query_k8s_replicas(config['k8s_endpoint'], full_service_name) log.info('(C) Setting m_container_count for {0} to {1}'.format( service_name, instances)) set_k8s_instance_number(policy, service_name, instances) #Initialise Optimizer log.info('(O) Scanning the optimizer parameters starts...') optim.collect_init_params_and_variables(policy) log.info('(O) Initializing optimizer starts...') optim.calling_rest_api_init() return policy
def stop(policy_yaml): global log log = logging.getLogger('pk') config = pk_config.config() policy = yaml.safe_load(policy_yaml) log.info('(C) Remove exporters from prometheus configuration file starts') prom.remove_exporters_from_prometheus_config(config['prometheus_config_template'], config['prometheus_config_target']) log.info('(C) Remove alerts from prometheus, deleting rule files starts') prom.remove_alerts_under_prometheus(config['prometheus_rules_directory'], policy.get('data',dict()).get('alerts',dict()), policy.get('stack','pk')) log.info('(C) Notify prometheus to reload config starts') prom.notify_to_reload_config(config['prometheus_endpoint'])
def prepare_session(policy_yaml): global log log = logging.getLogger('pk') config = pk_config.config() log.info('Received policy: \n{0}'.format(policy_yaml)) policy = yaml.safe_load(policy_yaml) resolve_queries(policy) log.info('(C) Add exporters to prometheus configuration file starts') prom.add_exporters_to_prometheus_config( policy, config['prometheus_config_template'], config['prometheus_config_target']) log.info('(C) Attach prometheus to network of exporters starts') prom.attach_prometheus_to_exporters_network(policy, config['swarm_endpoint']) log.info('(C) Add alerts to prometheus, generating rule files starts') prom.deploy_alerts_under_prometheus( config['prometheus_rules_directory'], policy.get('data', dict()).get('alerts'), policy.get('stack', 'pk')) log.info('(C) Notify prometheus to reload config starts') prom.notify_to_reload_config(config['prometheus_endpoint']) return policy