def initialize_threading(self, app, pids=None):
        """Initialize the multiprocessing communication threads, allowing for parallel execution of workflows.

        Args:
            app (FlaskApp): The current_app object
            pids (list[Process], optional): Optional list of spawned processes. Defaults to None

        """
        if not (os.path.exists(walkoff.config.Config.ZMQ_PUBLIC_KEYS_PATH) and
                os.path.exists(walkoff.config.Config.ZMQ_PRIVATE_KEYS_PATH)):
            logging.fatal(
                "Certificates are missing - run generate_certificates.py script first."
            )
            sys.exit(0)
        self.pids = pids
        self.ctx = zmq.Context.instance()
        self.auth = ThreadAuthenticator()
        self.auth.start()
        self.auth.allow('127.0.0.1')
        self.auth.configure_curve(
            domain='*', location=walkoff.config.Config.ZMQ_PUBLIC_KEYS_PATH)

        self.manager = WorkflowExecutionController(self.cache)
        self.receiver = Receiver(app)

        self.receiver_thread = threading.Thread(
            target=self.receiver.receive_results)
        self.receiver_thread.start()

        self.threading_is_initialized = True
        logger.debug('Controller threading initialized')
 def test_construct_case_update_message_no_subscriptions(self):
     message = WorkflowExecutionController._create_case_update_message(18, CaseControl.CREATE)
     self.assertEqual(message.type, CommunicationPacket.CASE)
     message = message.case_control_message
     self.assertEqual(message.id, 18)
     self.assertEqual(message.type, CaseControl.CREATE)
     self.assertEqual(len(message.subscriptions), 0)
 def test_update_case(self, mock_send):
     self.controller.update_case(14, self.subscriptions)
     expected_message = WorkflowExecutionController._create_case_update_message(
         14,
         CaseControl.UPDATE,
         subscriptions=self.subscriptions)
     expected_message = expected_message.SerializeToString()
     self.assert_message_sent(mock_send, expected_message)
    def test_set_argumets_for_proto(self):
        message = ExecuteWorkflowMessage()
        uid = uuid4()
        selection = [1, 'a', '32', 46]
        arguments = [
            Argument('name1', value=32), Argument('name2', reference=uid, selection=selection)]
        WorkflowExecutionController._set_arguments_for_proto(message, arguments)
        self.assertEqual(len(message.arguments), len(arguments))
        self.assertEqual(message.arguments[0].name, arguments[0].name)
        self.assertEqual(message.arguments[0].value, str(arguments[0].value))
        self.assertEqual(message.arguments[0].reference, '')
        self.assertEqual(message.arguments[0].selection, '')

        self.assertEqual(message.arguments[1].name, arguments[1].name)
        self.assertEqual(message.arguments[1].value, '')
        self.assertEqual(message.arguments[1].reference, str(uid))
        self.assertEqual(message.arguments[1].selection, json.dumps(selection))
 def test_construct_case_update_message(self):
     message = WorkflowExecutionController._create_case_update_message(
         18,
         CaseControl.CREATE,
         subscriptions=self.subscriptions)
     self.assertEqual(message.type, CommunicationPacket.CASE)
     message = message.case_control_message
     self.assertEqual(message.id, 18)
     self.assertEqual(message.type, CaseControl.CREATE)
     for i in range(2):
         self.assertEqual(message.subscriptions[i].id, self.subscriptions[i].id)
         self.assertEqual(message.subscriptions[i].events, self.subscriptions[i].events)
class MultiprocessedExecutor(object):
    def __init__(self, cache, event_logger):
        """Initializes a multiprocessed executor, which will handle the execution of workflows.
        """
        self.threading_is_initialized = False
        self.id = "controller"
        self.pids = None
        self.workflows_executed = 0

        self.ctx = None  # TODO: Test if you can always use the singleton
        self.auth = None

        self.manager = None
        self.receiver = None
        self.receiver_thread = None
        self.cache = cache
        self.event_logger = event_logger

        self.execution_db = ExecutionDatabase.instance

    def initialize_threading(self, app, pids=None):
        """Initialize the multiprocessing communication threads, allowing for parallel execution of workflows.

        Args:
            app (FlaskApp): The current_app object
            pids (list[Process], optional): Optional list of spawned processes. Defaults to None

        """
        if not (os.path.exists(walkoff.config.Config.ZMQ_PUBLIC_KEYS_PATH) and
                os.path.exists(walkoff.config.Config.ZMQ_PRIVATE_KEYS_PATH)):
            logging.fatal(
                "Certificates are missing - run generate_certificates.py script first."
            )
            sys.exit(0)
        self.pids = pids
        self.ctx = zmq.Context.instance()
        self.auth = ThreadAuthenticator()
        self.auth.start()
        self.auth.allow('127.0.0.1')
        self.auth.configure_curve(
            domain='*', location=walkoff.config.Config.ZMQ_PUBLIC_KEYS_PATH)

        self.manager = WorkflowExecutionController(self.cache)
        self.receiver = Receiver(app)

        self.receiver_thread = threading.Thread(
            target=self.receiver.receive_results)
        self.receiver_thread.start()

        self.threading_is_initialized = True
        logger.debug('Controller threading initialized')

    def wait_and_reset(self, num_workflows):
        """Waits for all of the workflows to be completed

        Args:
            num_workflows (int): The number of workflows to wait for
        """
        timeout = 0
        shutdown = 10

        while timeout < shutdown:
            if self.receiver is not None and num_workflows == self.receiver.workflows_executed:
                break
            timeout += 0.1
            gevent.sleep(0.1)
        assert (num_workflows == self.receiver.workflows_executed)
        self.receiver.workflows_executed = 0

    def shutdown_pool(self):
        """Shuts down the threadpool"""
        self.manager.send_exit_to_worker_comms()
        if len(self.pids) > 0:
            for p in self.pids:
                if p.is_alive():
                    logger.info(
                        'Multiprocessed executor shutting down process {}'.
                        format(p))
                    os.kill(p.pid, signal.SIGABRT)
                    p.join(timeout=3)
                    try:
                        os.kill(p.pid, signal.SIGKILL)
                    except (OSError, AttributeError):
                        pass
        if self.receiver_thread:
            self.receiver.thread_exit = True
            self.receiver_thread.join(timeout=1)
        self.threading_is_initialized = False
        logger.debug('Controller thread pool shutdown')

        if self.auth:
            self.auth.stop()
        if self.ctx:
            self.ctx.destroy()
        self.cleanup_threading()
        return

    def cleanup_threading(self):
        """Once the threadpool has been shutdown, clear out all of the data structures used in the pool"""
        self.pids = []
        self.receiver_thread = None
        self.workflows_executed = 0
        self.threading_is_initialized = False
        self.manager = None
        self.receiver = None

    def execute_workflow(self,
                         workflow_id,
                         execution_id_in=None,
                         start=None,
                         start_arguments=None,
                         resume=False,
                         environment_variables=None):
        """Executes a workflow

        Args:
            workflow_id (Workflow): The Workflow to be executed.
            execution_id_in (UUID, optional): The optional execution ID to provide for the workflow. Should only be
                used (and is required) when resuming a workflow. Must be valid UUID4. Defaults to None.
            start (UUID, optional): The ID of the first, or starting action. Defaults to None.
            start_arguments (list[Argument]): The arguments to the starting action of the workflow. Defaults to None.
            resume (bool, optional): Optional boolean to resume a previously paused workflow. Defaults to False.
            environment_variables (list[EnvironmentVariable]): Optional list of environment variables to pass into
                the workflow. These will not be persistent.

        Returns:
            (UUID): The execution ID of the Workflow.
        """
        workflow = self.execution_db.session.query(Workflow).filter_by(
            id=workflow_id).first()
        if not workflow:
            logger.error(
                'Attempted to execute workflow {} which does not exist'.format(
                    execution_id_in))
            return None, 'Attempted to execute workflow which does not exist'

        execution_id = execution_id_in if execution_id_in else str(
            uuid.uuid4())

        if start is not None:
            logger.info(
                'Executing workflow {0} (id={1}) with starting action action {2}'
                .format(workflow.name, workflow.id, start))
        else:
            logger.info(
                'Executing workflow {0} (id={1}) with default starting action'.
                format(workflow.name, workflow.id, start))

        workflow_data = {
            'execution_id': execution_id,
            'id': str(workflow.id),
            'name': workflow.name
        }
        self._log_and_send_event(WalkoffEvent.WorkflowExecutionPending,
                                 sender=workflow_data)
        self.manager.add_workflow(workflow.id, execution_id, start,
                                  start_arguments, resume,
                                  environment_variables)

        self._log_and_send_event(WalkoffEvent.SchedulerJobExecuted)
        return execution_id

    def pause_workflow(self, execution_id):
        """Pauses a workflow that is currently executing.

        Args:
            execution_id (UUID): The execution id of the workflow.

        Returns:
            (bool): True if Workflow successfully paused, False otherwise
        """
        logger.info('Pausing workflow {}'.format(execution_id))
        workflow_status = self.execution_db.session.query(
            WorkflowStatus).filter_by(execution_id=execution_id).first()
        if workflow_status and workflow_status.status == WorkflowStatusEnum.running:
            self.manager.pause_workflow(execution_id)
            return True
        else:
            logger.warning(
                'Cannot pause workflow {0}. Invalid key, or workflow not running.'
                .format(execution_id))
            return False

    def resume_workflow(self, execution_id):
        """Resumes a workflow that is currently paused.

        Args:
            execution_id (UUID): The execution id of the workflow.

        Returns:
            (bool): True if workflow successfully resumed, False otherwise
        """
        logger.info('Resuming workflow {}'.format(execution_id))
        workflow_status = self.execution_db.session.query(
            WorkflowStatus).filter_by(execution_id=execution_id).first()

        if workflow_status and workflow_status.status == WorkflowStatusEnum.paused:
            saved_state = self.execution_db.session.query(
                SavedWorkflow).filter_by(
                    workflow_execution_id=execution_id).first()
            workflow = self.execution_db.session.query(Workflow).filter_by(
                id=workflow_status.workflow_id).first()
            workflow._execution_id = execution_id
            self._log_and_send_event(WalkoffEvent.WorkflowResumed,
                                     sender=workflow)

            start = saved_state.action_id if saved_state else workflow.start
            self.execute_workflow(workflow.id,
                                  execution_id_in=execution_id,
                                  start=start,
                                  resume=True)
            return True
        else:
            logger.warning(
                'Cannot resume workflow {0}. Invalid key, or workflow not paused.'
                .format(execution_id))
            return False

    def abort_workflow(self, execution_id):
        """Abort a workflow

        Args:
            execution_id (UUID): The execution id of the workflow.

        Returns:
            (bool): True if successfully aborted workflow, False otherwise
        """
        logger.info('Aborting workflow {}'.format(execution_id))
        workflow_status = self.execution_db.session.query(
            WorkflowStatus).filter_by(execution_id=execution_id).first()

        if workflow_status:
            if workflow_status.status in [
                    WorkflowStatusEnum.pending, WorkflowStatusEnum.paused,
                    WorkflowStatusEnum.awaiting_data
            ]:
                workflow = self.execution_db.session.query(Workflow).filter_by(
                    id=workflow_status.workflow_id).first()
                if workflow is not None:
                    self._log_and_send_event(WalkoffEvent.WorkflowAborted,
                                             sender={
                                                 'execution_id': execution_id,
                                                 'id':
                                                 workflow_status.workflow_id,
                                                 'name': workflow.name
                                             })
            elif workflow_status.status == WorkflowStatusEnum.running:
                self.manager.abort_workflow(execution_id)
            return True
        else:
            logger.warning(
                'Cannot resume workflow {0}. Invalid key, or workflow already shutdown.'
                .format(execution_id))
            return False

    def resume_trigger_step(self, execution_id, data_in, arguments=None):
        """Resumes a workflow awaiting trigger data, if the conditions are met.

        Args:
            execution_id (UUID): The execution ID of the workflow
            data_in (dict): The data to send to the trigger
            arguments (list[Argument], optional): Optional list of new Arguments for the trigger action.
                Defaults to None.

        Returns:
            (bool): True if successfully resumed trigger step, false otherwise
        """
        logger.info('Resuming workflow {} from trigger'.format(execution_id))
        saved_state = self.execution_db.session.query(SavedWorkflow).filter_by(
            workflow_execution_id=execution_id).first()
        workflow = self.execution_db.session.query(Workflow).filter_by(
            id=saved_state.workflow_id).first()
        workflow._execution_id = execution_id

        executed = False
        exec_action = None
        for action in workflow.actions:
            if action.id == saved_state.action_id:
                exec_action = action
                executed = action.execute_trigger(data_in,
                                                  saved_state.accumulator)
                break

        if executed:
            self._log_and_send_event(
                WalkoffEvent.TriggerActionTaken,
                sender=exec_action,
                data={'workflow_execution_id': execution_id})
            self.execute_workflow(workflow.id,
                                  execution_id_in=execution_id,
                                  start=str(saved_state.action_id),
                                  start_arguments=arguments,
                                  resume=True)
            return True
        else:
            self._log_and_send_event(
                WalkoffEvent.TriggerActionNotTaken,
                sender=exec_action,
                data={'workflow_execution_id': execution_id})
            return False

    def get_waiting_workflows(self):
        """Gets a list of the execution IDs of workflows currently awaiting data to be sent to a trigger.

        Returns:
            (list[UUID]): A list of execution IDs of workflows currently awaiting data to be sent to a trigger.
        """
        self.execution_db.session.expire_all()
        wf_statuses = self.execution_db.session.query(
            WorkflowStatus).filter_by(
                status=WorkflowStatusEnum.awaiting_data).all()
        return [str(wf_status.execution_id) for wf_status in wf_statuses]

    def get_workflow_status(self, execution_id):
        """Gets the current status of a workflow by its execution ID

        Args:
            execution_id (UUID): The execution ID of the workflow

        Returns:
            (int): The status of the workflow
        """
        workflow_status = self.execution_db.session.query(
            WorkflowStatus).filter_by(execution_id=execution_id).first()
        if workflow_status:
            return workflow_status.status
        else:
            logger.error(
                "Workflow execution id {} does not exist in WorkflowStatus table."
            ).format(execution_id)
            return 0

    def _log_and_send_event(self, event, sender=None, data=None):
        sender = sender or self
        sender_id = sender.id if not isinstance(sender, dict) else sender['id']
        self.event_logger.log(event, sender_id, data=data)
        event.send(sender, data=data)

    def create_case(self, case_id, subscriptions):
        """Creates a Case

        Args:
            case_id (int): The ID of the Case
            subscriptions (list[Subscription]): List of Subscriptions to subscribe to
        """
        self.manager.create_case(case_id, subscriptions)

    def update_case(self, case_id, subscriptions):
        """Updates a Case

        Args:
            case_id (int): The ID of the Case
            subscriptions (list[Subscription]): List of Subscriptions to subscribe to
        """
        self.manager.create_case(case_id, subscriptions)

    def delete_case(self, case_id):
        """Deletes a Case

        Args:
            case_id (int): The ID of the Case to delete
        """
        self.manager.delete_case(case_id)
 def test_create_workflow_control_message(self):
     uid = str(uuid4())
     message = WorkflowExecutionController._create_workflow_control_message(WorkflowControl.PAUSE, uid)
     self.assertEqual(message.type, CommunicationPacket.WORKFLOW)
     self.assertEqual(message.workflow_control_message.type, WorkflowControl.PAUSE)
     self.assertEqual(message.workflow_control_message.workflow_execution_id, uid)
 def test_delete_case(self, mock_send):
     self.controller.delete_case(37)
     expected_message = WorkflowExecutionController._create_case_update_message(37, CaseControl.DELETE)
     expected_message = expected_message.SerializeToString()
     self.assert_message_sent(mock_send, expected_message)
 def setUpClass(cls):
     initialize_test_config()
     cls.subscriptions = [Subscription(str(uuid4()), ['a', 'b', 'c']), Subscription(str(uuid4()), ['b'])]
     cls.cache = MockRedisCacheAdapter()
     cls.controller = WorkflowExecutionController(cls.cache)
     setup_dbs()
 def test_pause_workflow(self, mock_send):
     uid = str(uuid4())
     message = WorkflowExecutionController._create_workflow_control_message(WorkflowControl.PAUSE, uid)
     self.controller.pause_workflow(uid)
     expected_message = message.SerializeToString()
     self.assert_message_sent(mock_send, expected_message)