def perform_search(self, query, wf_bundle): """ Performs an ariel search from a provided AQL query string with workflow state in mind. :param query: query string to perform search :param wf_bundle: list containing two elements, the Resilient rest_client() and the Resilient workflow_id respectively :return: result: search results, None returned if workflow is stopped prior to search completion :raises SearchFailure or SearchJobFailure """ search_id = self.get_search_id(query) if search_id: start_time = time.time() # store the start time done = False LOG.info('Ariel search started under search_id: ' + str(search_id)) while not done: status = self.check_status(search_id) if wf_bundle: if (get_workflow_status(wf_bundle[0], wf_bundle[1])).is_terminated: LOG.info('Workflow terminated. Canceling search...') self.cancel_search(search_id) return None if status == qradar_constants.SEARCH_STATUS_COMPLETED: done = True elif status == qradar_constants.SEARCH_STATUS_WAIT: done = False else: LOG.error('Unexpected search status returned of: ' + str(status)) raise SearchFailure(search_id, status) if not done: # time_out defaults to 10 minutes. If customer overrides it to 0, it will never timeout if self.search_timeout != 0: if time.time() - start_time > self.search_timeout: self.cancel_search( search_id) # Cancel search on timeout # We could return the search results instead of raising an exception, but they would be incomplete. raise SearchTimeout(search_id, status) time.sleep(self.polling_period ) # polling_interval is defaulted to 5 sec else: LOG.error("search_id is None") raise SearchJobFailure(query) result = self.get_search_result(search_id) return result
def threaded_delete(datatable, workflow_id, row_id): """ wait for the workflow to complete before performing the delete row action Args: rest_client ([object]): resilient helper object workflow_id ([int]): workflow id to ensure it's complete before deleting row datatable ([object]): helper object row_id ([int]): row to queue for delete Returns: None """ MAX_SLEEP_UNTIL_WF_COMPLETES = 60 # no sleep time should exceed 60s MAX_LOOP = 60 # roughly an hour of waiting sleep_time = 10 # check that the workflow is still active, sleep if still active wf = get_workflow_status(datatable.res_client, workflow_id) ndx = 0 while wf.status == 'running' and ndx < MAX_LOOP: time.sleep(sleep_time) sleep_time += sleep_time sleep_time = min(sleep_time, MAX_SLEEP_UNTIL_WF_COMPLETES) wf = get_workflow_status(datatable.res_client, workflow_id) ndx += 1 if wf.status != 'running': # perform the delete rows() result = datatable.delete_row(row_id) if 'error' in result: LOG.error("Queued delete failed for row_id: %s. Error: %s", row_id, result['error']) else: LOG.debug("Queued delete succeeded for row_id: %s", row_id) else: LOG.error("Unable to delete row_id: %s with workflow %s state: %s", row_id, workflow_id, wf.status)
def _utilities_timer_function(self, event, *args, **kwargs): """ This function implements a simple timer. A workflow using this function will sleep for the specified amount of time. The function takes as input utilities_time or utilities_epoch as input. The function periodically checks the status of the calling workflow and will end function execution if the workflow has been terminated. The utilities_time parameter is a string is of format “time value” concatenated with a “time unit” character, where character is: ‘s’ for seconds ‘m’ for minutes ‘h’ for hours ‘d’ for days For example: '30s' = 30 seconds; '40m' = 40 minutes; The utilities_epoch parameter is an epoch time value that specifies the time the timer should stop sleeping. The timer function computes the total sleep time needed. """ try: # Initialize results payload rp = ResultPayload(CONFIG_DATA_SECTION, **kwargs) # Get the function parameters: utilities_time = kwargs.get("utilities_time") # text utilities_epoch = kwargs.get("utilities_epoch") # datetime picker log = logging.getLogger(__name__) log.info("utilities_time: %s", utilities_time) log.info("utilities_epoch: %s", utilities_epoch) if utilities_time is not None and utilities_epoch is not None: raise ValueError( "Utilities timer function takes one parameter as input: utilities_time OR utilities_epoch." ) # Get max timer to sleep from app.config setting and convert to seconds. max_timer = self.options.get("max_timer") if max_timer is None: # max_timer is not set in the app.config, so set a default and output a message. max_timer = "30d" yield StatusMessage( "Please specify [fn_utilities] max_timer in app.config. Setting default max_timer to '30d'." ) max_timer_in_seconds = get_sleep_time_in_seconds(max_timer) # Compute the time to wait in seconds if utilities_epoch is not None: total_time_in_seconds = get_sleep_time_from_epoch( utilities_epoch) else: total_time_in_seconds = get_sleep_time_in_seconds( utilities_time) if total_time_in_seconds > max_timer_in_seconds: raise ValueError( 'Requested sleep timer {}s is greater than max_timer {}s set in app.config' .format(total_time_in_seconds, max_timer_in_seconds)) # Compute the workflow check interval time based on the total time in seconds. wf_check_interval = compute_interval_time(total_time_in_seconds) # Get workflow instance ID wf_instance_id = event.message["workflow_instance"][ "workflow_instance_id"] res_client = self.rest_client() # Initialize before the while loop current_sleep_time = 0 wf_status = get_workflow_status(res_client, wf_instance_id) # Loop and sleep till total time to sleep achieved and while workflow is not terminated while (current_sleep_time < total_time_in_seconds) and ( total_time_in_seconds > 0) and not wf_status.is_terminated: yield StatusMessage( 'Sleeping for {}s. {}/{}s complete.'.format( wf_check_interval, current_sleep_time, total_time_in_seconds)) # Sleep interval time time.sleep(wf_check_interval) # Keep track of total sleep time current_sleep_time = current_sleep_time + wf_check_interval # Check the status of the workflow wf_status = get_workflow_status(res_client, wf_instance_id) # This case will be True where total_time_in_seconds is odd and it is the # final time through the while-loop. For example: total_time_in_seconds=5 # will have awf_check_interval=2, the last time through the loop the sleep # time should be 1. if (current_sleep_time + wf_check_interval) > total_time_in_seconds: wf_check_interval = total_time_in_seconds - current_sleep_time if wf_status.is_terminated: yield StatusMessage('Workflow was terminated.') yield StatusMessage('Total sleep time {} seconds complete.'.format( current_sleep_time)) # Return the workflow status results = rp.done(wf_status.is_terminated, wf_status.as_dict(), wf_status.reason) log.debug("RESULTS: %s", results) log.info("> Complete") # Produce a FunctionResult with the results yield FunctionResult(results) except Exception as err: yield FunctionError(err)
def _twilio_receive_messages_function(self, event, *args, **kwargs): """Function: Receive messages based on a destination number and a timeframe""" try: # Get the workflow_instance_id so we can raise an error if the workflow was terminated by the user workflow_instance_id = event.message["workflow_instance"]["workflow_instance_id"] res_payload = ResultPayload(CONFIG_DATA_SECTION, **kwargs) # Get the function parameters: twilio_phone_number = kwargs.get("twilio_phone_number") # text twilio_date_sent = kwargs.get("twilio_date_sent") # text twilio_date_sent_ts = kwargs.get("twilio_date_sent_ts") # number twilio_wait_timeout = kwargs.get("twilio_wait_timeout") # text if not twilio_wait_timeout: twilio_wait_timeout = DEFAULT_WAIT_TIMEOUT log = logging.getLogger(__name__) log.info("twilio_phone_number: %s", twilio_phone_number) log.info("twilio_date_sent: %s", twilio_date_sent) log.info("twilio_date_sent_ts: %s", twilio_date_sent_ts) log.info("twilio_wait_timeout: %s", twilio_wait_timeout) # Get configs validate_fields(['twilio_account_sid', 'twilio_auth_token', 'twilio_src_address'], self.options) account_sid = self.options.get("twilio_account_sid") auth_token = self.options.get("twilio_auth_token") src_address = self.options.get("twilio_src_address") # PUT YOUR FUNCTION IMPLEMENTATION CODE HERE yield StatusMessage("starting...") phone_number = clean_phone_number(twilio_phone_number) # timestamp date has precedence over string version if twilio_date_sent_ts and not twilio_date_sent: twilio_date_sent = readable_datetime(twilio_date_sent_ts) client = Client(account_sid, auth_token) # calculate timeout value wait_timeout = time.time() + get_interval(twilio_wait_timeout) continue_flg = True result_err_msg = None result_payload = [] result_rc = True wf_status = None rest_client = self.rest_client() converted_date = parse(twilio_date_sent) if twilio_date_sent else None # continue while the workflow is still active, no messages have been received and timeout period active while continue_flg and time.time() <= wait_timeout: # get the messages based on phone number and date sent messages = self.get_responses(client, converted_date, src_address, phone_number) if messages: continue_flg = False log.debug(str(messages)) # format messages for payload return for message in messages: entry = { "phone_number": message.from_, "messaging_service_sid": message.sid, "date_created": str(message.date_created), "date_created_ts": get_ts_from_datetime(message.date_created), "direction": message.direction, "message_body": message.body, "status": message.status, "error_message": message.error_message } result_payload.append(entry) # if no messages, delay and try again if not result_payload: result_rc = False time.sleep(SLEEP_TIME) # check to see if the workflow is still active wf_status = get_workflow_status(rest_client, workflow_instance_id) continue_flg = not wf_status.is_terminated if wf_status and wf_status.is_terminated: result_err_msg = u"Workflow was terminated: {}".format(wf_status.reason) yield StatusMessage(result_err_msg) log.warning(result_err_msg) result_rc = False elif not result_payload: result_err_msg = u"Timeout waiting for responses for phone number: {} since {}".format(twilio_phone_number, twilio_date_sent) yield StatusMessage(result_err_msg) log.warning(result_err_msg) yield StatusMessage("done...") results = res_payload.done(result_rc, result_payload, reason=result_err_msg) # Produce a FunctionResult with the results yield FunctionResult(results) except Exception: yield FunctionError()