def set_current_task(workflow_id, task_id): workflow_model = session.query(WorkflowModel).filter_by( id=workflow_id).first() processor = WorkflowProcessor(workflow_model) task_id = uuid.UUID(task_id) spiff_task = processor.bpmn_workflow.get_task(task_id) _verify_user_and_role(processor, spiff_task) user_uid = g.user.uid if spiff_task.state != spiff_task.COMPLETED and spiff_task.state != spiff_task.READY: raise ApiError( "invalid_state", "You may not move the token to a task who's state is not " "currently set to COMPLETE or READY.") # Only reset the token if the task doesn't already have it. if spiff_task.state == spiff_task.COMPLETED: spiff_task.reset_token( reset_data=True ) # Don't try to copy the existing data back into this task. processor.save() WorkflowService.log_task_action(user_uid, processor, spiff_task, WorkflowService.TASK_ACTION_TOKEN_RESET) WorkflowService.update_task_assignments(processor) workflow_api_model = WorkflowService.processor_to_workflow_api( processor, spiff_task) return WorkflowApiSchema().dump(workflow_api_model)
def test_documentation_processing_handles_replacements(self): self.load_example_data() workflow = self.create_workflow('random_fact') processor = WorkflowProcessor(workflow) processor.do_engine_steps() task = processor.next_task() task.task_spec.documentation = "Some simple docs" docs = WorkflowService._process_documentation(task) self.assertEqual("Some simple docs", docs) task.data = {"replace_me": "new_thing"} task.task_spec.documentation = "{{replace_me}}" docs = WorkflowService._process_documentation(task) self.assertEqual("new_thing", docs) documentation = """ # Bigger Test * bullet one * bullet two has {{replace_me}} # other stuff. """ expected = """ # Bigger Test * bullet one * bullet two has new_thing # other stuff. """ task.task_spec.documentation = documentation result = WorkflowService._process_documentation(task) self.assertEqual(expected, result)
def test_study_sponsors_script_ensure_delete(self, mock_get): mock_get.return_value.ok = True mock_get.return_value.text = self.protocol_builder_response( 'sponsors.json') flask.g.user = UserModel(uid='dhf8r') app.config['PB_ENABLED'] = True self.load_example_data() study = session.query(StudyModel).first() workflow_spec_model = self.load_test_spec( "study_sponsors_associates_delete") workflow_model = StudyService._create_workflow_model( study, workflow_spec_model) WorkflowService.test_spec("study_sponsors_associates_delete") processor = WorkflowProcessor(workflow_model) processor.do_engine_steps() # change user and make sure we can access the study flask.g.user = UserModel(uid='lb3dp') flask.g.token = 'my spiffy token' app.config['PB_ENABLED'] = False output = user_studies() self.assertEqual(len(output), 0) flask.g.token = 'my spiffy token' app.config['PB_ENABLED'] = False output = user_studies() self.assertEqual(len(output), 0)
def validate_all(study_id, category=None, spec_id=None): """Step through all the local workflows and validate them, returning any errors. This make take forever. Please provide a real study id to use for validation, an optional category can be specified to only validate that category, and you can further specify a specific spec, if needed.""" from crc.models.workflow import WorkflowSpecModel from crc.services.workflow_service import WorkflowService from crc.api.common import ApiError from crc.models.study import StudyModel from crc.models.user import UserModel from flask import g study = session.query(StudyModel).filter(StudyModel.id == study_id).first() g.user = session.query(UserModel).filter(UserModel.uid == study.user_uid).first() g.token = "anything_is_fine_just_need_something." specs = session.query(WorkflowSpecModel).all() for spec in specs: if spec_id and spec_id != spec.id: continue if category and (not spec.category or spec.category.display_name != category): continue try: WorkflowService.test_spec(spec.id, validate_study_id=study_id) except ApiError as e: if e.code == 'disabled_workflow': print(f"Skipping {spec.id} in category {spec.category.display_name}, it is disabled for this study.") else: print(f"API Error {e.code}, validate workflow {spec.id} in Category {spec.category.display_name}") return except WorkflowTaskExecException as e: print(f"Workflow Error, {e}, in Task {e.task.name} validate workflow {spec.id} in Category {spec.category.display_name}") return except Exception as e: print(f"Unexpected Error, {e} validate workflow {spec.id} in Category {spec.category.display_name}") print(e) return
def test_get_dot_value(self): path = "a.b.c" source = {"a": {"b": {"c": "abracadara"}}, "a.b.c": "garbage"} result = WorkflowService.get_dot_value(path, source) self.assertEqual("abracadara", result) result2 = WorkflowService.get_dot_value(path, {"a.b.c": "garbage"}) self.assertEqual("garbage", result2)
def get_workflow(workflow_id, soft_reset=False, hard_reset=False): workflow_model: WorkflowModel = session.query(WorkflowModel).filter_by( id=workflow_id).first() processor = WorkflowProcessor(workflow_model, soft_reset=soft_reset, hard_reset=hard_reset) workflow_api_model = WorkflowService.processor_to_workflow_api(processor) WorkflowService.update_task_assignments(processor) return WorkflowApiSchema().dump(workflow_api_model)
def test_random_data_populate_form_on_auto_complete(self): self.load_example_data() workflow = self.create_workflow('enum_options_with_search') processor = WorkflowProcessor(workflow) processor.do_engine_steps() task = processor.next_task() task_api = WorkflowService.spiff_task_to_api_task(task, add_docs_and_forms=True) WorkflowService.populate_form_with_random_data(task, task_api, required_only=False) self.assertTrue(isinstance(task.data["sponsor"], dict))
def test_validation_of_workflow_fails_if_workflow_does_not_define_user_for_lane(self): error = None try: workflow = self.create_workflow('invalid_roles', as_user="******") WorkflowService.test_spec(workflow.workflow_spec_id) except ApiError as ae: error = ae self.assertIsNotNone(error, "An error should be raised.") self.assertEqual("invalid_role", error.code)
def add_workflow_spec_category(body): WorkflowService.cleanup_workflow_spec_category_display_order() count = session.query(WorkflowSpecCategoryModel).count() body['display_order'] = count schema = WorkflowSpecCategoryModelSchema() new_cat: WorkflowSpecCategoryModel = schema.load(body, session=session) session.add(new_cat) session.commit() return schema.dump(new_cat)
def get_workflow_from_spec(spec_id): workflow_model = WorkflowService.get_workflow_from_spec(spec_id, g.user) processor = WorkflowProcessor(workflow_model) processor.do_engine_steps() processor.save() WorkflowService.update_task_assignments(processor) workflow_api_model = WorkflowService.processor_to_workflow_api(processor) return WorkflowApiSchema().dump(workflow_api_model)
def process_root_directory(self, root_directory): files, directories = self.process_directory(root_directory) for file in files: if file.name.endswith('.json'): category_model = self.process_category(file, root_directory) WorkflowService.cleanup_workflow_spec_category_display_order() for directory in directories: directory_path = os.path.join(root_directory, directory) self.process_category_directory(directory_path)
def test_enum_options_from_file(self): self.load_example_data() workflow = self.create_workflow('enum_options_from_file') processor = WorkflowProcessor(workflow) processor.do_engine_steps() task = processor.next_task() WorkflowService.process_options(task, task.task_spec.form.fields[0]) options = task.task_spec.form.fields[0].options self.assertEqual(28, len(options)) self.assertEqual('1000', options[0]['id']) self.assertEqual("UVA - INTERNAL - GM USE ONLY", options[0]['name'])
def test_enum_options_from_file(self): self.load_example_data() workflow = self.create_workflow('enum_options_from_file') processor = WorkflowProcessor(workflow) processor.do_engine_steps() task = processor.next_task() WorkflowService.process_options(task, task.task_spec.form.fields[0]) options = task.task_spec.form.fields[0].options self.assertEqual(29, len(options)) self.assertEqual('0', options[0].id) self.assertEqual("Other", options[0].name)
def test_study_sponsors_script_validation(self, mock_get): mock_get.return_value.ok = True mock_get.return_value.text = self.protocol_builder_response('sponsors.json') app.config['PB_ENABLED'] = True flask.g.user = UserModel(uid='dhf8r') self.load_example_data() # study_info script complains if irb_documents.xls is not loaded # during the validate phase I'm going to assume that we will never # have a case where irb_documents.xls is not loaded ?? self.load_test_spec("study_sponsors_data_store") WorkflowService.test_spec("study_sponsors_data_store") # This would raise errors if it didn't validate
def test_waiting_task_error(self): workflow = self.create_workflow('raise_error') workflow.status = WorkflowStatus.waiting session.commit() status_before = session.query(WorkflowModel.status).filter(WorkflowModel.id == workflow.id).scalar() WorkflowService.do_waiting() status_after = session.query(WorkflowModel.status).filter(WorkflowModel.id == workflow.id).scalar() self.assertEqual('waiting', status_before.value) self.assertEqual('erroring', status_after.value)
def test_default_values_for_enum_as_checkbox(self): self.load_test_spec('enum_results') workflow = self.create_workflow('enum_results') processor = WorkflowProcessor(workflow) processor.do_engine_steps() task = processor.next_task() service = WorkflowService() checkbox_enum_field = task.task_spec.form.fields[0] radio_enum_field = task.task_spec.form.fields[1] self.assertEqual([], service.get_default_value(checkbox_enum_field, task)) self.assertEqual(None, service.get_default_value(radio_enum_field, task))
def test_required_fields(self): self.load_example_data() spec_model = self.load_test_spec('required_fields') final_data = WorkflowService.test_spec(spec_model.id) self.assertIsNotNone(final_data) self.assertIn('string_required', final_data) self.assertIn('string_not_required', final_data) final_data = WorkflowService.test_spec(spec_model.id, required_only=True) self.assertIsNotNone(final_data) self.assertIn('string_required', final_data) self.assertNotIn('string_not_required', final_data)
def update_task(workflow_id, task_id, body, terminate_loop=None, update_all=False): workflow_model = session.query(WorkflowModel).filter_by( id=workflow_id).first() if workflow_model is None: raise ApiError("invalid_workflow_id", "The given workflow id is not valid.", status_code=404) processor = WorkflowProcessor(workflow_model) task_id = uuid.UUID(task_id) spiff_task = processor.bpmn_workflow.get_task(task_id) _verify_user_and_role(processor, spiff_task) user = UserService.current_user( allow_admin_impersonate=False) # Always log as the real user. if not spiff_task: raise ApiError("empty_task", "Processor failed to obtain task.", status_code=404) if spiff_task.state != spiff_task.READY: raise ApiError( "invalid_state", "You may not update a task unless it is in the READY state. " "Consider calling a token reset to make this task Ready.") if terminate_loop and spiff_task.is_looping(): spiff_task.terminate_loop() # Extract the details specific to the form submitted form_data = WorkflowService().extract_form_data(body, spiff_task) # Update the task __update_task(processor, spiff_task, form_data, user) # If we need to update all tasks, then get the next ready task and if it a multi-instance with the same # task spec, complete that form as well. if update_all: last_index = spiff_task.task_info()["mi_index"] next_task = processor.next_task() while next_task and next_task.task_info()["mi_index"] > last_index: __update_task(processor, next_task, form_data, user) last_index = next_task.task_info()["mi_index"] next_task = processor.next_task() WorkflowService.update_task_assignments(processor) workflow_api_model = WorkflowService.processor_to_workflow_api(processor) return WorkflowApiSchema().dump(workflow_api_model)
def delete_workflow_specification(spec_id): if spec_id is None: raise ApiError('unknown_spec', 'Please provide a valid Workflow Specification ID.') spec: WorkflowSpecModel = session.query(WorkflowSpecModel).filter_by( id=spec_id).first() category_id = spec.category_id if spec is None: raise ApiError( 'unknown_spec', 'The Workflow Specification "' + spec_id + '" is not recognized.') # Delete all workflow models related to this specification WorkflowService.delete_workflow_spec_workflow_models(spec_id) # Delete all files related to this specification WorkflowService.delete_workflow_spec_files(spec_id) # Delete all events related to this specification WorkflowService.delete_workflow_spec_task_events(spec_id) # .delete() doesn't work when we need a cascade. Must grab the record, and explicitly delete workflow_spec = session.query(WorkflowSpecModel).filter_by( id=spec_id).first() session.delete(workflow_spec) session.commit() # Reorder the remaining specs WorkflowService.cleanup_workflow_spec_display_order(category_id)
def validate_workflow_specification(spec_id): errors = [] try: WorkflowService.test_spec(spec_id) except ApiError as ae: ae.message = "When populating all fields ... " + ae.message errors.append(ae) try: # Run the validation twice, the second time, just populate the required fields. WorkflowService.test_spec(spec_id, required_only=True) except ApiError as ae: ae.message = "When populating only required fields ... " + ae.message errors.append(ae) return ApiErrorSchema(many=True).dump(errors)
def get_workflow(workflow_id, do_engine_steps=True): """Retrieve workflow based on workflow_id, and return it in the last saved State. If do_engine_steps is False, return the workflow without running any engine tasks or logging any events. """ workflow_model: WorkflowModel = session.query(WorkflowModel).filter_by( id=workflow_id).first() processor = WorkflowProcessor(workflow_model) if do_engine_steps: processor.do_engine_steps() processor.save() WorkflowService.update_task_assignments(processor) workflow_api_model = WorkflowService.processor_to_workflow_api(processor) return WorkflowApiSchema().dump(workflow_api_model)
def add_workflow_specification(body): category_id = body['category_id'] WorkflowService.cleanup_workflow_spec_display_order(category_id) count = session.query(WorkflowSpecModel).filter_by( category_id=category_id).count() body['display_order'] = count # Libraries and standalone workflows don't get a category_id if body['library'] is True or body['standalone'] is True: body['category_id'] = None new_spec: WorkflowSpecModel = WorkflowSpecModelSchema().load( body, session=session) session.add(new_spec) session.commit() return WorkflowSpecModelSchema().dump(new_spec)
def __update_task(processor, task, data, user): """All the things that need to happen when we complete a form, abstracted here because we need to do it multiple times when completing all tasks in a multi-instance task""" task.update_data(data) WorkflowService.post_process_form( task) # some properties may update the data store. processor.complete_task(task) # Log the action before doing the engine steps, as doing so could effect the state of the task # the workflow could wrap around in the ngine steps, and the task could jump from being completed to # another state. What we are logging here is the completion. WorkflowService.log_task_action(user.uid, processor, task, WorkflowService.TASK_ACTION_COMPLETE) processor.do_engine_steps() processor.save()
def test_documentation_processing_handles_conditionals(self): self.load_example_data() workflow = self.create_workflow('random_fact') processor = WorkflowProcessor(workflow) processor.do_engine_steps() task = processor.next_task() task.task_spec.documentation = "This test {% if works == 'yes' %}works{% endif %}" docs = WorkflowService._process_documentation(task) self.assertEqual("This test ", docs) task.data = {"works": 'yes'} docs = WorkflowService._process_documentation(task) self.assertEqual("This test works", docs)
def test_timer_event(self): workflow = self.create_workflow('timer_event') processor = WorkflowProcessor(workflow) processor.do_engine_steps() task = processor.next_task() processor.complete_task(task) tasks = processor.get_ready_user_tasks() self.assertEqual(tasks, []) processor.save() time.sleep(.3) # our timer is at .25 sec so we have to wait for it # get done waiting WorkflowService.do_waiting() wf = db.session.query(WorkflowModel).filter( WorkflowModel.id == workflow.id).first() self.assertTrue(wf.status != WorkflowStatus.waiting)
def test_get_task_events(self): self.load_example_data() spec = ExampleDataLoader().create_spec('hello_world', 'Hello World', category_id=0, standalone=True, from_tests=True) user = session.query(UserModel).first() self.assertIsNotNone(user) WorkflowService.get_workflow_from_spec(spec.id, user) rv = self.app.get(f'/v1.0/task_events', follow_redirects=True, content_type="application/json", headers=self.logged_in_headers()) self.assert_success(rv)
def test_study_sponsors_script_fail(self, mock_get): mock_get.return_value.ok = True mock_get.return_value.text = self.protocol_builder_response( 'sponsors.json') flask.g.user = UserModel(uid='dhf8r') app.config['PB_ENABLED'] = True self.load_example_data() study = session.query(StudyModel).first() workflow_spec_model = self.load_test_spec( "study_sponsors_associate_fail") workflow_model = StudyService._create_workflow_model( study, workflow_spec_model) WorkflowService.test_spec("study_sponsors_associate_fail") processor = WorkflowProcessor(workflow_model) with self.assertRaises(ApiError): processor.do_engine_steps()
def reorder_workflow_specification(spec_id, direction): if direction not in ('up', 'down'): raise ApiError(code='bad_direction', message='The direction must be `up` or `down`.') spec = session.query(WorkflowSpecModel).filter( WorkflowSpecModel.id == spec_id).first() if spec: WorkflowService.cleanup_workflow_spec_display_order(spec.category_id) ordered_specs = WorkflowService.reorder_workflow_spec(spec, direction) else: raise ApiError( code='bad_spec_id', message= f'The spec_id {spec_id} did not return a specification. Please check that it is valid.' ) schema = WorkflowSpecModelSchema(many=True) return schema.dump(ordered_specs)
def test_enum_defaults_correctly_populated(self): self.load_example_data() spec_model = self.load_test_spec('required_fields') final_data = WorkflowService.test_spec(spec_model.id, required_only=True) self.assertIsNotNone(final_data) self.assertIn('enum_with_default', final_data) self.assertEqual('maybe', final_data['enum_with_default'])
def test_waiting_event_error(self): workflow = self.create_workflow('timer_event_error') processor = WorkflowProcessor(workflow) processor.do_engine_steps() processor.save() time.sleep(.3) # our timer is at .25 sec so we have to wait for it # get done waiting wf = db.session.query(WorkflowModel).filter( WorkflowModel.id == workflow.id).first() self.assertTrue(wf.status == WorkflowStatus.waiting) with self.assertLogs('crc', level='ERROR') as cm: WorkflowService.do_waiting() self.assertEqual(1, len(cm.output)) self.assertRegex(cm.output[0], f"workflow #%i" % workflow.id) self.assertRegex(cm.output[0], f"study #%i" % workflow.study_id) self.assertTrue(wf.status == WorkflowStatus.erroring)