def perform_one_session(policy, results=None): global log log = logging.getLogger('pk') config = pk_config.config() log.info('--- session starts ---') log.info('(I) Collecting inputs for nodes starts') inputs = collect_inputs_for_nodes(policy) set_policy_inputs_for_nodes(policy, inputs) for x in inputs.keys(): log.info('(I) => "{0}": {1}'.format(x, inputs[x])) log.info('(Q) Evaluating queries and alerts for nodes starts') if results: queries, alerts = add_query_results_and_alerts_to_nodes( policy, results) else: queries, alerts = prom.evaluate_data_queries_and_alerts_for_nodes( config['prometheus_endpoint'], policy) for attrname, attrvalue in queries.iteritems(): log.info('(Q) => "{0}" is "{1}".'.format(attrname, attrvalue)) for attrname, attrvalue in alerts.iteritems(): log.info('(A) => "{0}" is "{1}".'.format(attrname, attrvalue)) log.info('(P) Policy evaluation for nodes starts') perform_policy_evaluation_on_worker_nodes(policy) log.info('(S) Scaling of nodes starts') perform_worker_node_scaling(policy) for attrname, attrvalue in alerts.iteritems(): prom.alerts_remove(attrname) for oneservice in policy.get('scaling', dict()).get('services', dict()): service_name = oneservice.get('name') log.info('(I) Collecting inputs for service "{0}" starts'.format( service_name)) inputs = collect_inputs_for_containers(policy, service_name) set_policy_inputs_for_containers(policy, service_name, inputs) for x in inputs.keys(): log.info('(I) => "{0}": {1}'.format(x, inputs[x])) log.info('(Q) Evaluating queries and alerts for service "{0}" starts'. format(service_name)) if results: queries, alerts = add_query_results_and_alerts_to_service( policy, results, service_name) else: queries, alerts = prom.evaluate_data_queries_and_alerts_for_a_service( config['prometheus_endpoint'], policy, service_name) for attrname, attrvalue in queries.iteritems(): log.info('(Q) => "{0}" is "{1}".'.format(attrname, attrvalue)) for attrname, attrvalue in alerts.iteritems(): log.info('(A) => "{0}" is "{1}".'.format(attrname, attrvalue)) log.info('(P) Policy evaluation for service "{0}" starts'.format( service_name)) perform_policy_evaluation_on_a_docker_service(policy, service_name) log.info('(S) Scaling of service "{0}" starts'.format(service_name)) perform_service_scaling(policy, service_name) for attrname, attrvalue in alerts.iteritems(): prom.alerts_remove(attrname) log.info('--- session finished ---') return
def perform_one_session(policy, results=None): global log log = logging.getLogger('pk') config = pk_config.config() log.info('--- session starts ---') log.info('(M) Maintaining worker nodes starts') k8s.down_nodes_maintenance(config['k8s_endpoint'], config['docker_node_unreachable_timeout']) for onenode in policy.get('scaling', dict()).get('nodes', []): node_name = onenode.get('name') log.info('(I) Collecting inputs for node {} starts'.format(node_name)) inputs = collect_inputs_for_nodes(policy, onenode) set_policy_inputs_for_nodes(policy, inputs, onenode) for x in inputs.keys(): log.info('(I) => "{0}": {1}'.format(x, inputs[x])) log.info('(Q) Evaluating queries and alerts for node {} starts'.format( node_name)) if results: queries, alerts = add_query_results_and_alerts_to_nodes( policy, results, onenode) else: queries, alerts = prom.evaluate_data_queries_and_alerts_for_nodes( config['prometheus_endpoint'], policy, onenode) for attrname, attrvalue in queries.iteritems(): log.info('(Q) => "{0}" is "{1}".'.format(attrname, attrvalue)) for attrname, attrvalue in alerts.iteritems(): log.info('(A) => "{0}" is "{1}".'.format(attrname, attrvalue)) if 'm_opt_advice' in onenode.get('scaling_rule', ''): log.info('(O) Creating sample for the optimizer starts') sample = optim.generate_sample(queries, inputs) log.info('(O) Sending sample for the optimizer starts') optim.calling_rest_api_sample(sample) log.info('(P) Policy evaluation for nodes starts') perform_policy_evaluation_on_worker_nodes(policy, onenode) log.info('(S) Scaling of nodes starts') perform_worker_node_scaling(onenode) for attrname, attrvalue in alerts.iteritems(): prom.alerts_remove(attrname) for oneservice in policy.get('scaling', dict()).get('services', []): service_name = oneservice.get('name') log.info('(I) Collecting inputs for service "{0}" starts'.format( service_name)) inputs = collect_inputs_for_containers(policy, service_name) set_policy_inputs_for_containers(policy, service_name, inputs) for x in inputs.keys(): log.info('(I) => "{0}": {1}'.format(x, inputs[x])) log.info('(Q) Evaluating queries and alerts for service "{0}" starts'. format(service_name)) if results: queries, alerts = add_query_results_and_alerts_to_service( policy, results, service_name) else: queries, alerts = prom.evaluate_data_queries_and_alerts_for_a_service( config['prometheus_endpoint'], policy, service_name) for attrname, attrvalue in queries.iteritems(): log.info('(Q) => "{0}" is "{1}".'.format(attrname, attrvalue)) for attrname, attrvalue in alerts.iteritems(): log.info('(A) => "{0}" is "{1}".'.format(attrname, attrvalue)) log.info('(P) Policy evaluation for service "{0}" starts'.format( service_name)) perform_policy_evaluation_on_a_k8s_deploy(policy, service_name) log.info('(S) Scaling of service "{0}" starts'.format(service_name)) perform_service_scaling(policy, service_name) for attrname, attrvalue in alerts.iteritems(): prom.alerts_remove(attrname) log.info('--- session finished ---') return
def alerts_init(): alert = yaml.safe_load(request.stream) log.info('(A) Resetting alerts based on external request.') prom.alerts_remove(None) return jsonify(dict(response='OK'))