class MyFlowService(): def __init__(self): self.client = OperetoClient() self.input = self.client.input self.failed = False def run(self): try: def _end_fake_parent_process(parent_pid, child_process_pids=[]): if self.client.is_success(child_process_pids): self.client.stop_process(parent_pid, 'success') else: self.failed = True self.client.stop_process(parent_pid, 'failure') ## create a "fake" parent process parent_1_pid = self.client.create_process( 'fake_parent_process', title='First commands group') ## attach some services to it as child processes cmd_1_pid = self.client.create_process('shell_command', title='df -k', command='df -k', pflow_id=parent_1_pid) cmd_2_pid = self.client.create_process('shell_command', title='netstat -rn', command='netstat -rn', pflow_id=parent_1_pid) _end_fake_parent_process(parent_1_pid, [cmd_1_pid, cmd_2_pid]) time.sleep(10) ## create a second "fake" parent process parent_2_pid = self.client.create_process( 'fake_parent_process', title='Second commands group') ## attach some services to it as child processes cmd_3_pid = self.client.create_process('shell_command', title='df -k', command='df -k', pflow_id=parent_2_pid) cmd_4_pid = self.client.create_process('shell_command', title='netstat -rn', command='netstat -rn', pflow_id=parent_2_pid) _end_fake_parent_process(parent_2_pid, [cmd_3_pid, cmd_4_pid]) time.sleep(5) ## set final result if self.failed: return self.client.FAILURE return self.client.SUCCESS except: traceback.format_exc() return self.client.FAILURE
class ServiceRunner(ServiceTemplate): def __init__(self, **kwargs): self.client = OperetoClient() ServiceTemplate.__init__(self, **kwargs) def validate_input(self): input_scheme = { "type": "object", "properties": { "deployment_operation": { "enum": [ 'create_statefulset', 'modify_statefulset', 'delete_statefulset', 'update_worker_image' ] }, "deployment_name": { "type": ["null", "string"] }, "agent_java_config": { "type": "string", "minLength": 1 }, "agent_log_level": { "enum": ['info', 'warn', 'error', 'fatal', 'debug'] }, "worker_config": { "type": "string", "minLength": 1 }, "agent_properties": item_properties_scheme, "required": [ 'deployment_operation', 'agent_java_config', 'agent_log_level', 'worker_config', 'agent_properties' ], "additionalProperties": True } } validator = JsonSchemeValidator(self.input, input_scheme) validator.validate() if self.input['deployment_name'] == 'opereto-worker-node': raise OperetoRuntimeError( error= 'Deployment name is invalid, this name is used for Opereto standard workers. Please insert different name.' ) ## post_operations if self.input['post_operations']: validator = JsonSchemeValidator(self.input['post_operations'], included_services_scheme) validator.validate() def process(self): def _get_agent_names(): names = [] for count in range(self.worker_replicas): names.append(self.deployment_name + '-' + str(count)) return names def _modify_agent(agent_id): try: self.client.get_agent(agent_id) except OperetoClientError: self.client.create_agent( agent_id=agent_id, name=agent_id, description= 'This agent worker is part of {} worker stateful set.'. format(self.deployment_name)) time.sleep(2) agent_properties = self.input['agent_properties'] agent_properties.update({ 'opereto.shared': True, 'worker.label': self.deployment_name }) self.client.modify_agent_properties(agent_id, agent_properties) def _agents_status(online=True): while (True): ok = True for agent_id in _get_agent_names(): try: agent_attr = self.client.get_agent(agent_id) if agent_attr['online'] != online: ok = False break except OperetoClientError: pass if ok: break time.sleep(5) def _tearrdown_statefileset(): print 'Deleting worker stateful set..' self.deployment_info = self.kubernetes_api.delete_stateful_set( self.deployment_name) print 'Waiting that all worker pods will be offline (may take some time)..' _agents_status(online=False) if self.deployment_operation == 'create_statefulset': print 'Creating worker stateful set..' self.deployment_info = self.kubernetes_api.create_stateful_set( self.deployment_template) for agent_id in _get_agent_names(): _modify_agent(agent_id) print 'Waiting that all worker pods will be online (may take some time)..' _agents_status(online=True) self.deployment_info = self.kubernetes_api.get_stateful_set( self.deployment_name) print self.deployment_info.status ## run post install services post_install_pids = [] for agent_id in _get_agent_names(): for service in self.input['post_operations']: input = service.get('input') or {} agent_name = service.get('agents') or agent_id pid = self.client.create_process( service=service['service'], agent=agent_name, title=service.get('title'), **input) post_install_pids.append(pid) if post_install_pids and not self.client.is_success( post_install_pids): _tearrdown_statefileset() return self.client.FAILURE elif self.deployment_operation == 'modify_statefulset': print 'Modifying worker stateful set..' self.deployment_info = self.kubernetes_api.modify_stateful_set( self.deployment_name, self.deployment_template) for agent_id in _get_agent_names(): _modify_agent(agent_id) print 'Waiting that all worker pods will be online (may take some time)..' _agents_status(online=True) self.deployment_info = self.kubernetes_api.get_stateful_set( self.deployment_name) print self.deployment_info.status elif self.deployment_operation == 'delete_statefulset': _tearrdown_statefileset() else: raise OperetoRuntimeError(error='Invalid operation: {}'.format( self.deployment_operation)) return self.client.SUCCESS def setup(self): self.kubernetes_api = KubernetesAPI() self.deployment_name = self.input['deployment_name'] self.deployment_operation = self.input['deployment_operation'] self.deployment_info = {} self.deployment_template = self.input['deployment_template'] self.worker_replicas = self.deployment_template["spec"]["replicas"] if self.deployment_operation in [ 'create_statefulset', 'modify_statefulset' ]: if self.deployment_name: self.deployment_template["metadata"][ "name"] = self.deployment_name self.deployment_template["spec"]["template"]["spec"][ "containers"][0]["name"] = self.deployment_name + "-worker" try: self.deployment_template["spec"]["selector"][ "matchLabels"][ "app"] = self.deployment_name + "-cluster" except: pass try: self.deployment_template["spec"]["template"]['metadata'][ "labels"]["app"] = self.deployment_name + "-cluster" except: pass else: self.deployment_name = self.deployment_template["metadata"][ "name"] if not self.deployment_template["spec"]["template"]["spec"][ "containers"][0].get('env'): self.deployment_template["spec"]["template"]["spec"][ "containers"][0]['env'] = [] self.deployment_template["spec"]["template"]["spec"]["containers"][ 0]['env'] += [{ "name": "agent_name", "valueFrom": { "fieldRef": { "fieldPath": "metadata.name" } } }, { "name": "opereto_host", "value": self.input['opereto_host'] }, { "name": "opereto_user", "valueFrom": { "secretKeyRef": { "name": self.input['worker_config'], "key": "OPERETO_USERNAME" } } }, { "name": "opereto_password", "valueFrom": { "secretKeyRef": { "name": self.input['worker_config'], "key": "OPERETO_PASSWORD" } } }, { "name": "javaParams", "value": self.input['agent_java_config'], }, { "name": "log_level", "value": self.input['agent_log_level'] }] print 'Deployment template:\n{}'.format( json.dumps(self.deployment_template, indent=4)) def teardown(self): print self.deployment_info
class ServiceRunner(ServiceTemplate): def __init__(self, **kwargs): self.client = OperetoClient() ServiceTemplate.__init__(self, **kwargs) def validate_input(self): input_scheme = { "type": "object", "properties": { "pod_operation": { "enum": ['create_pod', 'delete_pod'] }, "pod_name": { "type": ["null", "string"] }, "pod_template": { "type": "object" }, "agent_java_config": { "type": "string", "minLength": 1 }, "agent_log_level": { "enum": ['info', 'warn', 'error', 'fatal', 'debug'] }, "worker_config": { "type": "string", "minLength": 1 }, "agent_properties": item_properties_scheme, "required": [ 'pod_operation', 'pod_template', 'agent_java_config', 'agent_log_level', 'worker_config', 'agent_properties' ], "additionalProperties": True } } validator = JsonSchemeValidator(self.input, input_scheme) validator.validate() if self.input['pod_operation'] == 'delete_pod' and not self.input.get( 'pod_name'): raise OperetoRuntimeError( error='Pod name must be provided for this operation') if self.input['pod_name'].startswith('opereto-worker-node'): raise OperetoRuntimeError( error= 'Pod name is invalid, this name is used for Opereto standard elastic workers. Please select a different name.' ) ## post_operations if self.input['post_operations']: validator = JsonSchemeValidator(self.input['post_operations'], included_services_scheme) validator.validate() def process(self): def _modify_agent(agent_id): self.client.create_agent( agent_id=agent_id, name=agent_id, description= 'This agent worker is part of {} worker stateful set.'.format( self.pod_name)) time.sleep(2) agent_properties = self.input['agent_properties'] agent_properties.update({ 'opereto.shared': True, 'worker.label': self.pod_name }) self.client.modify_agent_properties(agent_id, agent_properties) def _agents_status(online=True): while (True): try: agent_attr = self.client.get_agent(self.pod_name) if agent_attr['online'] == online: break except OperetoClientError: pass time.sleep(5) def _tearrdown_pod(): print 'Deleting worker pod..' self.pod_info = self.kubernetes_api.delete_pod(self.pod_name) print 'Waiting that worker pod will be offline (may take some time)..' _agents_status(online=False) print 'Agent {} is offline.'.format(self.pod_name) if self.pod_operation == 'create_pod': print 'Creating worker pod..' _modify_agent(self.pod_name) self.kubernetes_api.create_pod(self.pod_template) print 'Waiting that worker pod will be online (may take some time)..' _agents_status(online=True) print 'Agent {} is online.'.format(self.pod_name) self.pod_info = self.kubernetes_api.get_pod(self.pod_name) print self.pod_info.status ## run post install services for service in self.input['post_operations']: input = service.get('input') or {} agent = service.get('agents') or self.pod_name pid = self.client.create_process(service=service['service'], agent=agent, title=service.get('title'), **input) if not self.client.is_success(pid): _tearrdown_pod() return self.client.FAILURE elif self.pod_operation == 'delete_pod': _tearrdown_pod() else: raise OperetoRuntimeError( error='Invalid operation: {}'.format(self.pod_operation)) return self.client.SUCCESS def setup(self): self.kubernetes_api = KubernetesAPI() self.pod_name = self.input['pod_name'] self.pod_operation = self.input['pod_operation'] self.pod_info = {} self.pod_template = self.input['pod_template'] if self.pod_operation == 'create_pod': if self.pod_name: self.pod_template["metadata"]["name"] = self.pod_name self.pod_template["spec"]["containers"][0][ "name"] = self.pod_name + "-worker" else: self.pod_name = self.pod_template["metadata"]["name"] if not self.pod_template["spec"]["containers"][0].get('env'): self.pod_template["spec"]["containers"][0]['env'] = [] self.pod_template["spec"]["containers"][0]['env'] += [{ "name": "agent_name", "valueFrom": { "fieldRef": { "fieldPath": "metadata.name" } } }, { "name": "opereto_host", "value": self.input['opereto_host'] }, { "name": "opereto_user", "valueFrom": { "secretKeyRef": { "name": self.input['worker_config'], "key": "OPERETO_USERNAME" } } }, { "name": "opereto_password", "valueFrom": { "secretKeyRef": { "name": self.input['worker_config'], "key": "OPERETO_PASSWORD" } } }, { "name": "javaParams", "value": self.input['agent_java_config'], }, { "name": "log_level", "value": self.input['agent_log_level'] }] print 'Pod template:\n{}'.format( json.dumps(self.pod_template, indent=4)) def teardown(self): print self.pod_info
class ServiceRunner(ServiceTemplate): def __init__(self, **kwargs): self.client = OperetoClient() ServiceTemplate.__init__(self, **kwargs) self.sflow_id = self.input['opereto_source_flow_id'] self.remove_test_results_dir = False self.op_state = self._get_state() or {} def validate_input(self): input_scheme = { "type": "object", "properties": { "test_results_path": { "type": "string", "minLength": 1 }, "parent_pid": { "type": "string" }, "listener_frequency": { "type": "integer", "minValue": 1 }, "debug_mode": { "type": "boolean" } }, "required": ['listener_frequency', 'test_results_path'], "additionalProperties": True } validator = JsonSchemeValidator(self.input, input_scheme) validator.validate() self.parent_pid = self.input['parent_pid'] or self.input['pid'] self.test_results_dir = self.input['test_results_path'] self.debug_mode = self.input['debug_mode'] self.result_keys = process_result_keys self.status_keys = process_status_keys self.tests_json_scheme = { "type": "object", "properties": { "test_suite": { "type": "object", "properties": { "links": { "type": "array" }, "status": { "enum": self.status_keys } } }, "test_records": { "type": "array", "items": [{ "type": "object", "properties": { "testname": default_entity_name_scheme, "status": { "enum": self.status_keys }, "title": default_entity_name_scheme, "links": { "type": "array", "items": [{ "type": "object", "properties": { "url": { "type": "string" }, "name": { "type": "string" } } }] } }, "required": ['testname', 'status'], "additionalProperties": True }] } }, "additionalProperties": True } self.end_of_test_suite = None self.test_data = {} self.suite_links = [] self._state = {} def _print_test_link(self, link): print( '[OPERETO_HTML]<br><a href="{}"><font style="color: #222; font-weight: 600; font-size: 13px;">{}</font></a>' .format(link['url'], link['name'])) def _append_to_process_log(self, pid, ppid, loglines, log_level='info'): log_request_data = { 'sflow_id': self.sflow_id, 'pflow_id': ppid, 'agent_id': self.input['opereto_agent'], 'product_id': self.input['opereto_product_id'], 'data': [] } count = 1 for line in loglines: try: millis = int(round(time.time() * 1000)) + count log_request_data['data'].append({ 'level': log_level, 'text': line.strip(), 'timestamp': millis }) except Exception as e: print(e) count += 1 self.client._call_rest_api( 'post', '/processes/{}/log'.format(pid), data=log_request_data, error='Failed to update test log (test pid = {})'.format(pid)) def _modify_record(self, test_record): testname = test_record['testname'] test_input = test_record.get('test_input') or {} title = test_record.get('title') or testname status = test_record['status'] test_links = test_record.get('links') or [] test_ppid = test_record.get('ppid') or self.parent_pid test_pid = test_record.get('pid') if testname not in self._state: if not test_pid: test_pid = self.client.create_process( 'opereto_test_listener_record', testname=testname, title=title, test_input=test_input, test_runner_id=test_ppid, pflow_id=test_ppid) self.client.wait_to_start(test_pid) self._state[testname] = { 'ppid': test_ppid, 'pid': test_pid, 'status': 'in_process', 'title': title, 'test_output_md5': '', 'summary_md5': '', 'last_log_line': 1 } else: test_pid = self._state[testname]['pid'] if self._state[testname]['status'] not in self.result_keys: if title != self._state[testname]['title']: ### TBD: add title change API call self._state[testname]['title'] = title results_dir = os.path.join(self.test_results_dir, testname) if os.path.exists(results_dir): output_json_file = os.path.join(results_dir, 'output.json') log_file = os.path.join(results_dir, 'stdout.log') summary_file = os.path.join(results_dir, 'summary.txt') if os.path.exists(output_json_file): output_json_md5 = get_file_md5sum(output_json_file) if output_json_md5 != self._state[testname][ 'test_output_md5']: with open(output_json_file, 'r') as of: output_json = json.load(of) self.client.modify_process_property('test_output', output_json, pid=test_pid) self._state[testname][ 'test_output_md5'] = output_json_md5 if os.path.exists(summary_file): summary_md5 = get_file_md5sum(summary_file) if summary_md5 != self._state[testname]['summary_md5']: with open(summary_file, 'r') as sf: summary = sf.read() self.client.modify_process_summary( test_pid, summary) self._state[testname]['summary_md5'] = summary_md5 if os.path.exists(log_file): with open(log_file, 'r') as lf: count = 1 loglines = [] for line in lf.readlines(): if count >= self._state[testname]['last_log_line']: if count > MAX_LOG_LINES_PER_PROCESS: message = 'Test log is too long. Please save test log in remote storage and add a link to it in Opereto log. See service info to learn how to add links to your tests.json file.' loglines.append( '[OPERETO_HTML]<br><br><font style="width: 800px; padding: 15px; color: #222; font-weight: 400; border:2px solid red; background-color: #f8f8f8;">{}</font><br><br>' .format(message)) break loglines.append(line.strip()) count += 1 self._append_to_process_log(test_pid, test_ppid, loglines) self._state[testname]['last_log_line'] = count if status in self.result_keys: links = [] for link in test_links: html_link = '[OPERETO_HTML]<br><a href="{}"><font style="color: #222; font-weight: 600; font-size: 13px;">{}</font></a>'.format( link['url'], link['name']) links.append(html_link) self._append_to_process_log(test_pid, test_ppid, links) self.client.stop_process(test_pid, status=status) self._state[testname]['status'] = status def process(self): def process_results(): try: tests_json = os.path.join(self.test_results_dir, 'tests.json') if os.path.exists(tests_json): with open(tests_json, 'r') as tf: try: self.test_data = json.load(tf) self.op_state['test_data'] = self.test_data try: validator = JsonSchemeValidator( self.test_data, self.tests_json_scheme) validator.validate() except Exception as e: print('Invalid tests json file: {}'.format(e)) return if 'test_records' in self.test_data: for test_record in self.test_data[ 'test_records']: self._modify_record(test_record) if 'test_suite' in self.test_data: if 'status' in self.test_data['test_suite']: self.op_state[ 'test_suite_final_status'] = self.test_data[ 'test_suite']['status'] if 'links' in self.test_data['test_suite']: self.suite_links = self.test_data[ 'test_suite']['links'] self.op_state[ 'test_suite'] = self.suite_links finally: try: self._save_state(self.op_state) except Exception as e: print(e) finally: self.end_of_test_suite = self.client.get_process_property( name='stop_listener_code') if self.debug_mode: print('[DEBUG] content of tests.json: {}'.format( json.dumps(self.test_data))) while (True): process_results() time.sleep(self.client.input['listener_frequency']) if self.end_of_test_suite: print('Stopping listener process..') break def setup(self): self._print_step_title('Start opereto test listener..') if not os.path.exists(self.input['test_results_path']): make_directory(self.input['test_results_path']) self.op_state = { 'test_suite_final_status': 'success', 'test_results_path': self.input['test_results_path'], 'test_data': {}, 'suite_links': [] } self._save_state(self.op_state) def teardown(self): if 'test_results_path' in self.op_state: remove_directory_if_exists(self.input['test_results_path']) self._print_step_title('Opereto test listener stopped.') print('Final content of tests_json: {}'.format(json.dumps( self.op_state['test_data']), indent=4)) suite_links = self.op_state.get('suite_links') or [] for link in suite_links: self._print_test_link(link) print('Final listener status is {}'.format( self.op_state['test_suite_final_status'])) if self.op_state['test_suite_final_status'] == 'success': return self.client.SUCCESS elif self.op_state['test_suite_final_status'] == 'failure': return self.client.FAILURE elif self.op_state['test_suite_final_status'] == 'warning': return self.client.WARNING else: return self.client.ERROR