def __init__(self, **kwargs): super(WFCurrent, self).__init__(**kwargs) self.workflow_name = kwargs.pop('workflow_name', '') self.spec = None self.workflow = None self.task_type = '' self.task = None self.pool = {} self.task_name = '' self.activity = '' self.lane_permissions = [] self.lane_relations = '' self.old_lane = '' self.lane_owners = None self.lane_name = '' if 'token' in self.input: self.token = self.input['token'] log.info("TOKEN iNCOMiNG: %s " % self.token) self.new_token = False else: self.token = uuid4().hex self.new_token = True log.info("TOKEN NEW: %s " % self.token) self.wfcache = WFCache(self.token) log.debug("\n\nWF_CACHE: %s" % self.wfcache.get()) self.set_client_cmds()
def handle_message(self, ch, method, properties, body): """ this is a pika.basic_consumer callback handles client inputs, runs appropriate workflows and views Args: ch: amqp channel method: amqp method properties: body: message body """ input = {} headers = {} try: self.sessid = method.routing_key input = json_decode(body) data = input['data'] # since this comes as "path" we dont know if it's view or workflow yet # TODO: just a workaround till we modify ui to if 'path' in data: if data['path'] in VIEW_METHODS: data['view'] = data['path'] else: data['wf'] = data['path'] session = Session(self.sessid) headers = {'remote_ip': input['_zops_remote_ip'], 'source': input['_zops_source']} if 'wf' in data: output = self._handle_workflow(session, data, headers) elif 'job' in data: self._handle_job(session, data, headers) return else: output = self._handle_view(session, data, headers) except HTTPError as e: import sys if hasattr(sys, '_called_from_test'): raise output = {"cmd": "error", "error": self._prepare_error_msg(e.message), "code": e.code} log.exception("Http error occurred") except: self.current = Current(session=session, input=data) self.current.headers = headers import sys if hasattr(sys, '_called_from_test'): raise err = traceback.format_exc() output = {"cmd": "error", "error": self._prepare_error_msg(err), "code": 500} log.exception("Worker error occurred with messsage body:\n%s" % body) if 'callbackID' in input: output['callbackID'] = input['callbackID'] log.info("OUTPUT for %s: %s" % (self.sessid, output)) output['reply_timestamp'] = time() self.send_output(output)
def process_request(self, req, resp): # req.stream corresponds to the WSGI wsgi.input environ variable, # and allows you to read bytes from the request body. # # See also: PEP 3333 if req.content_length in (None, 0): # Nothing to do req.context['data'] = req.params.copy() req.context['result'] = {} return else: req.context['result'] = {} body = req.stream.read() if not body: raise falcon.HTTPBadRequest('Empty request body', 'A valid JSON document is required.') try: json_data = body.decode('utf-8') req.context['data'] = json.loads(json_data) try: log.info("REQUEST DATA: %s" % json_data) except: log.exception("ERR: REQUEST DATA CANT BE LOGGED ") except (ValueError, UnicodeDecodeError): raise falcon.HTTPError(falcon.HTTP_753, 'Malformed JSON', 'Could not decode the request body. The ' 'JSON was incorrect or not encoded as ' 'UTF-8.')
def process_request(self, req, resp): # req.stream corresponds to the WSGI wsgi.input environ variable, # and allows you to read bytes from the request body. # # See also: PEP 3333 if req.content_length in (None, 0): # Nothing to do req.context['data'] = req.params.copy() req.context['result'] = {} return else: req.context['result'] = {} body = req.stream.read() if not body: raise falcon.HTTPBadRequest('Empty request body', 'A valid JSON document is required.') try: json_data = body.decode('utf-8') req.context['data'] = json.loads(json_data) try: log.info("REQUEST DATA: %s" % json_data) except: log.exception("ERR: REQUEST DATA CANT BE LOGGED ") except (ValueError, UnicodeDecodeError): raise falcon.HTTPError( falcon.HTTP_753, 'Malformed JSON', 'Could not decode the request body. The ' 'JSON was incorrect or not encoded as ' 'UTF-8.')
def __init__(self, queue_manager): log.info("ConcurrentTestCase class init with %s" % queue_manager) self.cmds = {} self.register_cmds() self.queue_manager = queue_manager self.clients = {} self.make_client('ulakbus') self.run_tests()
def kill_children(): """ kill subprocess on exit of manager (this) process """ log.info("Stopping worker(s)") for pid in child_pids: if pid is not None: os.kill(pid, signal.SIGTERM)
def on_modified(event): if not is_background: print("Restarting worker due to change in %s" % event.src_path) log.info("modified %s" % event.src_path) try: kill_children() run_children() except: log.exception("Error while restarting worker")
def run_children(): global child_pids child_pids = [] for i in range(int(no_subprocess)): proc = subprocess.Popen([sys.executable, __file__], stdout=subprocess.PIPE, stderr=subprocess.PIPE) child_pids.append(proc.pid) log.info("Started worker with pid %s" % proc.pid)
def exit(self, signal=None, frame=None): """ Properly close the AMQP connections """ self.input_channel.close() self.client_queue.close() self.connection.close() log.info("Worker exiting") sys.exit(0)
def start_engine(self, **kwargs): self.current = Current(**kwargs) self.check_for_authentication() self.check_for_permission() self.check_for_crud_permission() log.info("::::::::::: ENGINE STARTED :::::::::::\n" "\tCMD:%s\n" "\tSUBCMD:%s" % (self.current.input.get('cmd'), self.current.input.get('subcmd'))) self.workflow = self.load_or_create_workflow() self.current.workflow = self.workflow
def on_input_queue_declare(self, queue): """ AMQP connection callback. Creates input channel. Args: connection: AMQP connection """ log.info("input queue declared") super(TestQueueManager, self).on_input_queue_declare(queue) self.run_after_connection()
def clear_queue(self): """ clear outs all messages from INPUT_QUEUE_NAME """ def remove_message(ch, method, properties, body): print("Removed message: %s" % body) self.input_channel.basic_consume(remove_message, queue=self.INPUT_QUEUE_NAME, no_ack=True) try: self.input_channel.start_consuming() except (KeyboardInterrupt, SystemExit): log.info(" Exiting") self.exit()
def terminate_existing_login(self): existing_sess_id = UserSessionID(self.current.user_id).get() if existing_sess_id and self.current.session.sess_id != existing_sess_id: if Session(existing_sess_id).delete(): log.info("EXISTING LOGIN DEDECTED, WE SHOULD LOGUT IT FIRST") self.current.user.send_client_cmd( { "cmd": "error", "error": "Login required", "code": 401 }, via_queue=existing_sess_id) self.current.user.unbind_private_channel(existing_sess_id)
def check_for_crud_permission(self): # TODO: this should placed in to CrudView if 'model' in self.current.input: if 'cmd' in self.current.input: permission = "%s.%s" % (self.current.input["model"], self.current.input['cmd']) else: permission = self.current.input["model"] log.info("CHECK CRUD PERM: %s" % permission) if permission in settings.ANONYMOUS_WORKFLOWS: return if not self.current.has_permission(permission): raise falcon.HTTPForbidden("Permission denied", "You don't have required permission: %s" % permission)
def process_response(self, req, resp, resource): if 'result' not in req.context: return req.context['result']['is_login'] = '******' in req.env['session'] # print(":::::body: %s\n\n:::::result: %s" % (resp.body, req.context['result'])) if resp.body is None and req.context['result']: resp.body = json.dumps(req.context['result']) try: log.info("RESPONSE: %s" % resp.body) except: log.exception("ERR: RESPONSE CANT BE LOGGED ")
def run(self): """ actual consuming of incoming works starts here """ self.input_channel.basic_consume(self.handle_message, queue=self.INPUT_QUEUE_NAME, no_ack=True ) try: self.input_channel.start_consuming() except (KeyboardInterrupt, SystemExit): log.info(" Exiting") self.exit()
def check_for_permission(self): # TODO: Works but not beautiful, needs review! if self.current.task: permission = "%s.%s" % (self.current.workflow_name, self.current.name) else: permission = self.current.workflow_name log.info("CHECK PERM: %s" % permission) if (permission.startswith(tuple(settings.ANONYMOUS_WORKFLOWS)) or any('.' + perm in permission for perm in NO_PERM_TASKS)): return log.info("REQUIRE PERM: %s" % permission) if not self.current.has_permission(permission): raise falcon.HTTPForbidden("Permission denied", "You don't have required permission: %s" % permission)
def connect(self): """ make amqp connection and create channels and queue binding """ self.connection = pika.BlockingConnection(BLOCKING_MQ_PARAMS) self.client_queue = ClientQueue() self.input_channel = self.connection.channel() self.input_channel.exchange_declare(exchange=self.INPUT_EXCHANGE, type='topic', durable=True) self.input_channel.queue_declare(queue=self.INPUT_QUEUE_NAME) self.input_channel.queue_bind(exchange=self.INPUT_EXCHANGE, queue=self.INPUT_QUEUE_NAME) log.info("Bind to queue named '%s' queue with exchange '%s'" % (self.INPUT_QUEUE_NAME, self.INPUT_EXCHANGE))
def log_wf_state(self): """ logging the state of the workflow and data """ output = '\n- - - - - -\n' output += "WORKFLOW: %s" % self.current.workflow_name.upper() output += "\nTASK: %s ( %s )\n" % (self.current.name, self.current.task_type) output += "DATA:" for k, v in self.current.task_data.items(): if v: output += "\n\t%s: %s" % (k, v) output += "\nCURRENT:" output += "\n\tACTIVITY: %s" % self.current.activity output += "\n\tTOKEN: %s" % self.current.token log.info(output + "\n= = = = = =\n")
def backend_to_client(self, body): """ from backend to client """ try: body = json_decode(body) if 'callbackID' in body: self.message_stack[body['callbackID']] = body self.message_callbacks[body['callbackID']](body) elif 'cmd' in body: self.message_callbacks[body['cmd']](body) except: import traceback print("\nException BODY: %s \n" % pformat(body)) traceback.print_exc() log.info("WRITE MESSAGE TO CLIENT:\n%s" % (pformat(body), ))
def _prepare_post(self, wf_meta, data): """ by default data dict encoded as json and content type set as application/json when form data is post, UI should send wf_meta info to backend, but some tests works on lack of wf_meta scenario so wf_meta info is done optional as True, False. :param dict conf: additional configs for test client's post method. pass "no_json" in conf dict to prevent json encoding :param data: post data, wf_meta(bool): fake wf_meta will be created or not :return: RWrapper response object :rtype: ResponseWrapper """ if 'token' not in data and self.token: data['token'] = self.token if self.response_wrapper: form_data = self.response_wrapper.form_data.copy() else: form_data = {} if self.path: data['path'] = self.path.replace('/', '') if 'form' in data: form_data.update(data['form']) data['form'] = form_data if wf_meta and hasattr(self, 'current') and hasattr( self.current, 'spec'): if self.current.task.parent.task_spec.__class__.__name__ == 'UserTask': data['wf_meta'] = { 'name': self.current.workflow_name, 'current_lane': self.current.task.parent.task_spec.lane, 'current_step': self.current.task.parent.task_spec.name } post_data = { 'data': data, '_zops_remote_ip': '127.0.0.1', '_zops_source': 'Remote', } log.info("PostData : %s" % post_data) print("PostData : %s" % post_data) return post_data
def stc(self, response, request=None): """ STC means Success Test Callback. Looks for 200 or 201 codes in response code. Args: response: request: """ try: if not response['code'] in (200, 201): print("FAILED: Response not successful: \n") if not self.process_error_reponse(response): print("\nRESP:\n%s") print("\nREQ:\n %s" % (response, request)) else: return True except Exception as e: log.exception( "\n===========>\nFAILED API REQUEST\n<===========\n%s\n" % e) log.info("Response: \n%s\n\n" % response)
def client_to_backend(self, message, callback, caller_fn_name): """ from client to backend """ cbid = uuid.uuid4().hex message = json_encode({"callbackID": cbid, "data": message}) def cb(res): print("API Request: %s :: " % caller_fn_name, end='') result = callback(res, message) if ConcurrentTestCase.stc == callback and not result: FAIL = 'FAIL' else: FAIL = '--> %s' % callback.__name__ print('PASS' if result else FAIL) # self.message_callbacks[cbid] = lambda res: callable(res, message) self.message_callbacks[cbid] = cb log.info("GOT MESSAGE FOR BACKEND %s: %s" % (self.sess_id, message)) self.queue_manager.redirect_incoming_message(self.sess_id, message, self.request)
def __init__(self, **kwargs): self.workflow_name = kwargs.pop('workflow_name', '') self.request = kwargs.pop('request', {}) self.response = kwargs.pop('response', {}) try: self.session = self.request.env['session'] self.input = self.request.context['data'] self.output = self.request.context['result'] except AttributeError: # when we want to use engine functions independently, # we need to create a fake current object self.session = {} self.input = {} self.output = {} self.spec = None self.user_id = None self.workflow = None self.task_type = '' self.task_data = {} self.task = None self.log = log self.name = '' self.activity = '' self.auth = lazy_object_proxy.Proxy(lambda: AuthBackend(self.session)) self.user = lazy_object_proxy.Proxy(lambda: self.auth.get_user()) if 'token' in self.input: self.token = self.input['token'] log.info("TOKEN iNCOMiNG: %s " % self.token) self.new_token = False else: self.token = uuid4().hex self.new_token = True log.info("TOKEN NEW: %s " % self.token) self.wfcache = Cache(key=self.token, json=True) log.info("\n\nWFCACHE: %s" % self.wfcache.get()) self.set_task_data() self.permissions = []
def run_workers(no_subprocess, watch_paths=None, is_background=False): """ subprocess handler """ import atexit, os, subprocess, signal if watch_paths: from watchdog.observers import Observer # from watchdog.observers.fsevents import FSEventsObserver as Observer # from watchdog.observers.polling import PollingObserver as Observer from watchdog.events import FileSystemEventHandler def on_modified(event): if not is_background: print("Restarting worker due to change in %s" % event.src_path) log.info("modified %s" % event.src_path) try: kill_children() run_children() except: log.exception("Error while restarting worker") handler = FileSystemEventHandler() handler.on_modified = on_modified # global child_pids child_pids = [] log.info("starting %s workers" % no_subprocess) def run_children(): global child_pids child_pids = [] for i in range(int(no_subprocess)): proc = subprocess.Popen([sys.executable, __file__], stdout=subprocess.PIPE, stderr=subprocess.PIPE) child_pids.append(proc.pid) log.info("Started worker with pid %s" % proc.pid) def kill_children(): """ kill subprocess on exit of manager (this) process """ log.info("Stopping worker(s)") for pid in child_pids: if pid is not None: os.kill(pid, signal.SIGTERM) run_children() atexit.register(kill_children) signal.signal(signal.SIGTERM, kill_children) if watch_paths: observer = Observer() for path in watch_paths: if not is_background: print("Watching for changes under %s" % path) observer.schedule(handler, path=path, recursive=True) observer.start() while 1: try: sleep(1) except KeyboardInterrupt: log.info("Keyboard interrupt, exiting") if watch_paths: observer.stop() observer.join() sys.exit(0)
def __init__(self, *args, **kwargs): super(TestQueueManager, self).__init__(*args, **kwargs) log.info("queue manager init") self.test_class = lambda qm: 1
def set_test_class(self, kls): log.info("test class setted %s" % kls) self.test_class = kls
def run_after_connection(self): log.info("run after connect") self.test_class(self)
def __init__(self): self.connect() signal.signal(signal.SIGTERM, self.exit) log.info("Worker starting")