def get_spec(files: List[FileModel], workflow_spec_id): """Returns a SpiffWorkflow specification for the given workflow spec, using the files provided. The Workflow_spec_id is only used to generate better error messages.""" parser = WorkflowProcessor.get_parser() process_id = None for file in files: data = SpecFileService().get_spec_file_data(file.id).data if file.type == FileType.bpmn: bpmn: etree.Element = etree.fromstring(data) if file.primary and file.workflow_spec_id == workflow_spec_id: process_id = SpecFileService.get_process_id(bpmn) parser.add_bpmn_xml(bpmn, filename=file.name) elif file.type == FileType.dmn: dmn: etree.Element = etree.fromstring(data) parser.add_dmn_xml(dmn, filename=file.name) if process_id is None: raise (ApiError( code="no_primary_bpmn_error", message="There is no primary BPMN model defined for workflow %s" % workflow_spec_id)) try: spec = parser.get_spec(process_id) except ValidationException as ve: raise ApiError( code="workflow_validation_error", message="Failed to parse the Workflow Specification. " + "Error is '%s.'" % str(ve), file_name=ve.filename, task_id=ve.id, tag=ve.tag) return spec
def test_lookup_fails_for_xls(self): BaseTest.load_test_spec('enum_options_with_search') # Using an old xls file should raise an error file_model_xls = session.query(FileModel).filter( FileModel.name == 'sponsors.xls').first() file_data_xls = SpecFileService().get_spec_file_data(file_model_xls.id) # file_data_model_xls = session.query(FileDataModel).filter(FileDataModel.file_model_id == file_model_xls.id).first() with self.assertRaises(ApiError) as ae: LookupService.build_lookup_table(file_model_xls.id, 'sponsors.xls', file_data_xls.data, 'CUSTOMER_NUMBER', 'CUSTOMER_NAME') self.assertIn('Error opening excel file', ae.exception.args[0]) # Using an xlsx file should work file_model_xlsx = session.query(FileModel).filter( FileModel.name == 'sponsors.xlsx').first() file_data_xlsx = SpecFileService().get_spec_file_data( file_model_xlsx.id) # file_data_model_xlsx = session.query(FileDataModel).filter(FileDataModel.file_model_id == file_model_xlsx.id).first() lookup_model = LookupService.build_lookup_table( file_model_xlsx.id, 'sponsors.xlsx', file_data_xlsx.data, 'CUSTOMER_NUMBER', 'CUSTOMER_NAME') self.assertEqual(28, len(lookup_model.dependencies)) self.assertIn('CUSTOMER_NAME', lookup_model.dependencies[0].data.keys()) self.assertIn('CUSTOMER_NUMBER', lookup_model.dependencies[0].data.keys())
def process_template(self, task, study_id, workflow=None, *args, **kwargs): """Entry point, mostly worried about wiring it all up.""" if len(args) < 2 or len(args) > 3: raise ApiError( code="missing_argument", message= "The CompleteTemplate script requires 2 arguments. The first argument is " "the name of the docx template to use. The second " "argument is a code for the document, as " "set in the reference document %s. " % FileService.DOCUMENT_LIST) task_study_id = task.workflow.data[WorkflowProcessor.STUDY_ID_KEY] file_name = args[0] if task_study_id != study_id: raise ApiError( code="invalid_argument", message="The given task does not match the given study.") file_data = None if workflow is not None: # Get the workflow specification file with the given name. file_models = SpecFileService().get_spec_files( workflow_spec_id=workflow.workflow_spec_id, file_name=file_name) if len(file_models) > 0: file_model = file_models[0] else: raise ApiError( code="invalid_argument", message="Uable to locate a file with the given name.") file_data = SpecFileService().get_spec_file_data( file_model.id).data # Get images from file/files fields if len(args) == 3: image_file_data = self.get_image_file_data(args[2], task) else: image_file_data = None try: return JinjaService().make_template(BytesIO(file_data), task.data, image_file_data) except ApiError as ae: # In some cases we want to provide a very specific error, that does not get obscured when going # through the python expression engine. We can do that by throwing a WorkflowTaskExecException, # which the expression engine should just pass through. raise WorkflowTaskExecException(task, ae.message, exception=ae, line_number=ae.line_number, error_line=ae.error_line)
def test_delete_workflow_spec_file(self): self.load_example_data() file_model = session.query(FileModel).filter( column('workflow_spec_id').isnot(None)).first() file_data_before = SpecFileService().get_spec_file_data( file_model.id).data self.assertGreater(len(file_data_before), 0) SpecFileService().delete_spec_file(file_model.id) with self.assertRaises(ApiError) as ae: SpecFileService().get_spec_file_data(file_model.id) self.assertIn('No model found for file with file_id', ae.exception.message) print('test_delete_workflow_spec_file')
def run_master_spec(spec_model, study): """Executes a BPMN specification for the given study, without recording any information to the database Useful for running the master specification, which should not persist. """ lasttime = firsttime() spec_files = SpecFileService().get_spec_files(spec_model.id, include_libraries=True) lasttime = sincetime('load Files', lasttime) spec = WorkflowProcessor.get_spec(spec_files, spec_model.id) lasttime = sincetime('get spec', lasttime) try: bpmn_workflow = BpmnWorkflow( spec, script_engine=WorkflowProcessor._script_engine) bpmn_workflow.data[WorkflowProcessor.STUDY_ID_KEY] = study.id bpmn_workflow.data[ WorkflowProcessor.VALIDATION_PROCESS_KEY] = False lasttime = sincetime('get_workflow', lasttime) bpmn_workflow.do_engine_steps() lasttime = sincetime('run steps', lasttime) except WorkflowException as we: raise ApiError.from_task_spec("error_running_master_spec", str(we), we.sender) if not bpmn_workflow.is_completed(): raise ApiError( "master_spec_not_automatic", "The master spec should only contain fully automated tasks, it failed to complete." ) return bpmn_workflow.last_task.data
def __get_lookup_model(workflow, task_spec_id, field_id): lookup_model = db.session.query(LookupFileModel) \ .filter(LookupFileModel.workflow_spec_id == workflow.workflow_spec_id) \ .filter(LookupFileModel.field_id == field_id) \ .filter(LookupFileModel.task_spec_id == task_spec_id) \ .order_by(desc(LookupFileModel.id)).first() # The above may return a model, if it does, it might still be out of date. # We need to check the file date to assure we have the most recent file. is_current = False if lookup_model: if lookup_model.is_ldap: # LDAP is always current is_current = True else: current_date = SpecFileService().last_modified( lookup_model.file_model.id) is_current = current_date == lookup_model.last_updated if not is_current: # Very very very expensive, but we don't know need this till we do. logging.warning( "!!!! Making a very expensive call to update the lookup models." ) lookup_model = LookupService.create_lookup_model( workflow, task_spec_id, field_id) return lookup_model
def downgrade(): # TODO: This is a work in progress, and depends on what we do in upgrade() op.add_column('lookup_file', sa.Column('file_data_model_id', sa.Integer(), nullable=True)) op.create_foreign_key(None, 'lookup_file', 'file', ['file_data_model_id'], ['id']) op.drop_constraint('lookup_file_file_model_id_key', 'lookup_file', type_='foreignkey') op.drop_column('lookup_file', 'file_model_id') op.create_table( 'workflow_spec_dependency_file', sa.Column('file_data_id', sa.Integer(), nullable=False), sa.Column('workflow_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint( ['file_data_id'], ['file_data.id'], ), sa.ForeignKeyConstraint( ['workflow_id'], ['workflow.id'], ), sa.PrimaryKeyConstraint('file_data_id', 'workflow_id')) location = SpecFileService.get_sync_file_root() FromFilesystemService().update_file_metadata_from_filesystem(location) print('downgrade: ')
def test_file_upload_with_previous_name(self): self.load_example_data() workflow_spec_model = session.query(WorkflowSpecModel).first() # Add file data = {'file': (io.BytesIO(b'asdf'), 'test_file.xlsx')} rv = self.app.post('/v1.0/spec_file?workflow_spec_id=%s' % workflow_spec_model.id, data=data, follow_redirects=True, content_type='multipart/form-data', headers=self.logged_in_headers()) self.assert_success(rv) file_json = json.loads(rv.get_data(as_text=True)) file_id = file_json['id'] # Set file to archived file_model = session.query(FileModel).filter_by(id=file_id).first() file_model.archived = True session.commit() # Assert we have the correct file data and the file is archived file_data_model = SpecFileService().get_spec_file_data(file_model.id) self.assertEqual(b'asdf', file_data_model.data) file_model = session.query(FileModel).filter_by( id=file_model.id).first() self.assertEqual(True, file_model.archived) # Upload file with same name data = {'file': (io.BytesIO(b'xyzpdq'), 'test_file.xlsx')} rv = self.app.post('/v1.0/spec_file?workflow_spec_id=%s' % workflow_spec_model.id, data=data, follow_redirects=True, content_type='multipart/form-data', headers=self.logged_in_headers()) self.assert_success(rv) file_json = json.loads(rv.get_data(as_text=True)) file_id = file_json['id'] # Assert we have the correct file data and the file is *not* archived file_data_model = SpecFileService().get_spec_file_data(file_id) self.assertEqual(b'xyzpdq', file_data_model.data) file_model = session.query(FileModel).filter_by(id=file_id).first() self.assertEqual(False, file_model.archived)
def test_get_spec_files(self): self.load_example_data() spec = session.query(WorkflowSpecModel.id).first() spec_files = SpecFileService().get_spec_files(spec.id) workflow = session.query(WorkflowModel).first() processor = WorkflowProcessor(workflow) self.assertIsInstance(processor, WorkflowProcessor) print('test_get_spec_files')
def get_spec_files(workflow_spec_id, include_libraries=False): if workflow_spec_id is None: raise ApiError(code='missing_spec_id', message='Please specify the workflow_spec_id.') file_models = SpecFileService.get_spec_files( workflow_spec_id=workflow_spec_id, include_libraries=include_libraries) files = [to_file_api(model) for model in file_models] return FileSchema(many=True).dump(files)
def create_spec(self, id, display_name="", description="", filepath=None, master_spec=False, category_id=None, display_order=None, from_tests=False, standalone=False, library=False): """Assumes that a directory exists in static/bpmn with the same name as the given id. further assumes that the [id].bpmn is the primary file for the workflow. returns an array of data models to be added to the database.""" global file file_service = FileService() spec = WorkflowSpecModel(id=id, display_name=display_name, description=description, is_master_spec=master_spec, category_id=category_id, display_order=display_order, standalone=standalone, library=library) db.session.add(spec) db.session.commit() if not filepath and not from_tests: filepath = os.path.join(app.root_path, 'static', 'bpmn', id, "*.*") if not filepath and from_tests: filepath = os.path.join(app.root_path, '..', 'tests', 'data', id, "*.*") files = glob.glob(filepath) for file_path in files: if os.path.isdir(file_path): continue # Don't try to process sub directories noise, file_extension = os.path.splitext(file_path) filename = os.path.basename(file_path) is_status = filename.lower() == 'status.bpmn' is_primary = filename.lower() == id + '.bpmn' file = None try: file = open(file_path, 'rb') data = file.read() content_type = CONTENT_TYPES[file_extension[1:]] SpecFileService.add_workflow_spec_file(workflow_spec=spec, name=filename, content_type=content_type, binary_data=data, primary=is_primary, is_status=is_status) except IsADirectoryError as de: # Ignore sub directories pass finally: if file: file.close() return spec
def test_add_workflow_spec_file(self): self.load_example_data() spec = db.session.query(WorkflowSpecModel).first() file_data = b"abcdef" file_name = 'random_fact.svg' content_type = CONTENT_TYPES[file_name[-3:]] # This creates a file on the filesystem file_model = SpecFileService().add_workflow_spec_file( spec, file_name, content_type, file_data) # This reads from a file on the filesystem spec_file_data = SpecFileService().get_spec_file_data( file_model.id).data self.assertEqual(file_data, spec_file_data)
def to_file_api(file_model): """Converts a FileModel object to something we can return via the api""" if file_model.workflow_spec_id is not None: file_data_model = SpecFileService().get_spec_file_data(file_model.id) elif file_model.is_reference: file_data_model = ReferenceFileService().get_reference_file_data( file_model.name) else: file_data_model = FileService.get_file_data(file_model.id) return File.from_models(file_model, file_data_model, DocumentService.get_dictionary())
def update_spec_file_info(file_id, body): if file_id is None: raise ApiError('no_such_file', 'Please provide a valid File ID.') file_model = session.query(FileModel).filter( FileModel.id == file_id).first() if file_model is None: raise ApiError('unknown_file_model', 'The file_model "' + file_id + '" is not recognized.') new_file_model = SpecFileService().update_spec_file_info(file_model, body) return FileSchema().dump(to_file_api(new_file_model))
def test_updates_to_file_cause_lookup_rebuild(self): spec = BaseTest.load_test_spec('enum_options_with_search') workflow = self.create_workflow('enum_options_with_search') file_model = session.query(FileModel).filter( FileModel.name == "sponsors.xlsx").first() LookupService.lookup(workflow, "Task_Enum_Lookup", "sponsor", "sam", limit=10) lookup_records = session.query(LookupFileModel).all() self.assertIsNotNone(lookup_records) self.assertEqual(1, len(lookup_records)) lookup_record = lookup_records[0] lookup_data = session.query(LookupDataModel).filter( LookupDataModel.lookup_file_model == lookup_record).all() self.assertEqual(28, len(lookup_data)) # Update the workflow specification file. file_path = os.path.join(app.root_path, '..', 'tests', 'data', 'enum_options_with_search', 'sponsors_modified.xlsx') file = open(file_path, 'rb') if file_model.workflow_spec_id is not None: workflow_spec_model = session.query(WorkflowSpecModel).filter( WorkflowSpecModel.id == file_model.workflow_spec_id).first() SpecFileService().update_spec_file_data(workflow_spec_model, file_model.name, file.read()) elif file_model.is_reference: ReferenceFileService().update_reference_file( file_model, file.read()) else: FileService.update_file(file_model, file.read(), CONTENT_TYPES['xlsx']) file.close() # restart the workflow, so it can pick up the changes. processor = WorkflowProcessor.reset(workflow) workflow = processor.workflow_model LookupService.lookup(workflow, "Task_Enum_Lookup", "sponsor", "sam", limit=10) lookup_records = session.query(LookupFileModel).all() lookup_record = lookup_records[0] lookup_data = session.query(LookupDataModel).filter( LookupDataModel.lookup_file_model == lookup_record).all() self.assertEqual(4, len(lookup_data))
def replace_file(self, name, file_path): """Replaces a stored file with the given name with the contents of the file at the given path.""" file = open(file_path, "rb") data = file.read() file_model = session.query(FileModel).filter( FileModel.name == name).first() workflow_spec_model = session.query(WorkflowSpecModel).filter( WorkflowSpecModel.id == file_model.workflow_spec_id).first() noise, file_extension = os.path.splitext(file_path) content_type = CONTENT_TYPES[file_extension[1:]] SpecFileService().update_spec_file_data(workflow_spec_model, file_model.name, data)
def test_update_file_data(self): self.load_example_data() spec = session.query(WorkflowSpecModel).first() data = {} data['file'] = io.BytesIO( self.minimal_bpmn("abcdef")), 'my_new_file.bpmn' rv_1 = self.app.post('/v1.0/spec_file?workflow_spec_id=%s' % spec.id, data=data, follow_redirects=True, content_type='multipart/form-data', headers=self.logged_in_headers()) file_json_1 = json.loads(rv_1.get_data(as_text=True)) self.assertEqual(80, file_json_1['size']) file_id = file_json_1['id'] rv_2 = self.app.get('/v1.0/spec_file/%i/data' % file_id, headers=self.logged_in_headers()) self.assert_success(rv_2) rv_data_2 = rv_2.get_data() self.assertIsNotNone(rv_data_2) self.assertEqual(self.minimal_bpmn("abcdef"), rv_data_2) data['file'] = io.BytesIO( self.minimal_bpmn("efghijk")), 'my_new_file.bpmn' rv_3 = self.app.put('/v1.0/spec_file/%i/data' % file_id, data=data, follow_redirects=True, content_type='multipart/form-data', headers=self.logged_in_headers()) self.assert_success(rv_3) self.assertIsNotNone(rv_3.get_data()) file_json_3 = json.loads(rv_3.get_data(as_text=True)) self.assertEqual(FileType.bpmn.value, file_json_3['type']) self.assertEqual("application/octet-stream", file_json_3['content_type']) self.assertEqual(spec.id, file_json_3['workflow_spec_id']) # Assure it is updated in the database and properly persisted. file_model = session.query(FileModel).filter( FileModel.id == file_id).first() file_data = SpecFileService().get_spec_file_data(file_model.id) self.assertEqual(81, len(file_data.data)) rv_4 = self.app.get('/v1.0/spec_file/%i/data' % file_id, headers=self.logged_in_headers()) self.assert_success(rv_4) data = rv_4.get_data() self.assertIsNotNone(data) self.assertEqual(self.minimal_bpmn("efghijk"), data)
def load_example_data(self, use_crc_data=False, use_rrt_data=False): """use_crc_data will cause this to load the mammoth collection of documents we built up developing crc, use_rrt_data will do the same for hte rrt project, otherwise it depends on a small setup for running tests.""" from example_data import ExampleDataLoader ExampleDataLoader.clean_db() # If in production mode, only add the first user. if app.config['PRODUCTION']: ldap_info = LdapService.user_info(self.users[0]['uid']) session.add( UserModel(uid=self.users[0]['uid'], ldap_info=ldap_info)) else: for user_json in self.users: ldap_info = LdapService.user_info(user_json['uid']) session.add( UserModel(uid=user_json['uid'], ldap_info=ldap_info)) if use_crc_data: ExampleDataLoader().load_all() elif use_rrt_data: ExampleDataLoader().load_rrt() else: ExampleDataLoader().load_test_data() session.commit() for study_json in self.studies: study_model = StudyModel(**study_json) session.add(study_model) StudyService._add_all_workflow_specs_to_study(study_model) session.commit() update_seq = f"ALTER SEQUENCE %s RESTART WITH %s" % ( StudyModel.__tablename__ + '_id_seq', study_model.id + 1) print("Update Sequence." + update_seq) session.execute(update_seq) session.flush() specs = session.query(WorkflowSpecModel).all() self.assertIsNotNone(specs) for spec in specs: files = session.query(FileModel).filter_by( workflow_spec_id=spec.id).all() self.assertIsNotNone(files) self.assertGreater(len(files), 0) for file in files: # file_data = session.query(FileDataModel).filter_by(file_model_id=file.id).all() file_data = SpecFileService().get_spec_file_data(file.id).data self.assertIsNotNone(file_data) self.assertGreater(len(file_data), 0)
def __init__(self, workflow_model: WorkflowModel, validate_only=False): """Create a Workflow Processor based on the serialized information available in the workflow model.""" self.workflow_model = workflow_model spec = None if workflow_model.bpmn_workflow_json is None: self.spec_files = SpecFileService().get_spec_files( workflow_spec_id=workflow_model.workflow_spec_id, include_libraries=True) spec = self.get_spec(self.spec_files, workflow_model.workflow_spec_id) self.workflow_spec_id = workflow_model.workflow_spec_id try: self.bpmn_workflow = self.__get_bpmn_workflow( workflow_model, spec, validate_only) self.bpmn_workflow.script_engine = self._script_engine if UserService.has_user(): current_user = UserService.current_user( allow_admin_impersonate=True) current_user_data = UserModelSchema().dump(current_user) tasks = self.bpmn_workflow.get_tasks(SpiffTask.READY) for task in tasks: task.data['current_user'] = current_user_data if self.WORKFLOW_ID_KEY not in self.bpmn_workflow.data: if not workflow_model.id: session.add(workflow_model) session.commit() # If the model is new, and has no id, save it, write it into the workflow model # and save it again. In this way, the workflow process is always aware of the # database model to which it is associated, and scripts running within the model # can then load data as needed. self.bpmn_workflow.data[ WorkflowProcessor.WORKFLOW_ID_KEY] = workflow_model.id workflow_model.bpmn_workflow_json = WorkflowProcessor._serializer.serialize_workflow( self.bpmn_workflow, include_spec=True) self.save() except MissingSpecError as ke: raise ApiError(code="unexpected_workflow_structure", message="Failed to deserialize workflow" " '%s' due to a mis-placed or missing task '%s'" % (self.workflow_spec_id, str(ke)))
def upgrade(): """""" bind = op.get_bind() session = sa.orm.Session(bind=bind) op.drop_table('workflow_spec_dependency_file') op.add_column('lookup_file', sa.Column('file_model_id', sa.Integer(), nullable=True)) op.add_column('lookup_file', sa.Column('last_updated', sa.DateTime(), nullable=True)) op.create_foreign_key(None, 'lookup_file', 'file', ['file_model_id'], ['id']) processed_files = [] location = SpecFileService.get_sync_file_root() if os.path.exists(location): rmtree(location) # Process workflow spec files files = session.query(FileModel).filter( FileModel.workflow_spec_id is not None).all() for file in files: if file.archived is not True: ToFilesystemService().write_file_to_system(session, file, location) processed_files.append(file.id) # Process reference files # get_reference_files only returns files where archived is False reference_files = ReferenceFileService.get_reference_files() for reference_file in reference_files: ToFilesystemService().write_file_to_system(session, reference_file, location) processed_files.append(reference_file.id) session.flush() lookups = session.query(LookupFileModel).all() for lookup in lookups: session.delete(lookup) session.commit() for file_id in processed_files: processed_data_models = session.query(FileDataModel).filter( FileDataModel.file_model_id == file_id).all() for processed_data_model in processed_data_models: session.delete(processed_data_model) session.commit() print(f'upgrade: in processed files: file_id: {file_id}') print('upgrade: done: ')
def get_spec_file_data(file_id): file_model = session.query(FileModel).filter( FileModel.id == file_id).first() if file_model is not None: file_data_model = SpecFileService().get_spec_file_data(file_id) if file_data_model is not None: return send_file( io.BytesIO(file_data_model.data), attachment_filename=file_model.name, mimetype=file_model.content_type, cache_timeout=-1 # Don't cache these files on the browser. ) else: raise ApiError( code='missing_data_model', message=f'The data model for file {file_id} does not exist.') else: raise ApiError( code='missing_file_model', message=f'The file model for file_id {file_id} does not exist.')
def add_spec_file(workflow_spec_id): if workflow_spec_id: file = connexion.request.files['file'] # check if we have a primary already have_primary = FileModel.query.filter( FileModel.workflow_spec_id == workflow_spec_id, FileModel.type == FileType.bpmn, FileModel.primary == True).all() # set this to primary if we don't already have one if not have_primary: primary = True else: primary = False workflow_spec = session.query(WorkflowSpecModel).filter_by( id=workflow_spec_id).first() file_model = SpecFileService.add_workflow_spec_file(workflow_spec, file.filename, file.content_type, file.stream.read(), primary=primary) return FileSchema().dump(to_file_api(file_model)) else: raise ApiError(code='missing_workflow_spec_id', message="You must include a workflow_spec_id")
def get_file_data_link(file_id, auth_token, version=None): if not verify_token(auth_token): raise ApiError( 'not_authenticated', 'You need to include an authorization token in the URL with this') file_model = session.query(FileModel).filter( FileModel.id == file_id).first() if file_model.workflow_spec_id is not None: file_data = SpecFileService().get_spec_file_data(file_id) elif file_model.is_reference: file_data = ReferenceFileService().get_reference_file_data(file_id) else: file_data = FileService.get_file_data(file_id, version) if file_data is None: raise ApiError('no_such_file', f'The file id you provided ({file_id}) does not exist') return send_file( io.BytesIO(file_data.data), attachment_filename=file_model.name, mimetype=file_model.content_type, cache_timeout=-1, # Don't cache these files on the browser. last_modified=file_data.date_created, as_attachment=True)
def update_spec_file_data(file_id): file_model = session.query(FileModel).filter_by( id=file_id).with_for_update().first() if file_model is None: raise ApiError('no_such_file', f'The file id you provided ({file_id}) does not exist') if file_model.workflow_spec_id is None: raise ApiError( code='no_spec_id', message=f'There is no workflow_spec_id for file {file_id}.') workflow_spec_model = session.query(WorkflowSpecModel).filter( WorkflowSpecModel.id == file_model.workflow_spec_id).first() if workflow_spec_model is None: raise ApiError( code='missing_spec', message= f'The workflow spec for id {file_model.workflow_spec_id} does not exist.' ) file = connexion.request.files['file'] SpecFileService().update_spec_file_data(workflow_spec_model, file_model.name, file.stream.read()) return FileSchema().dump(to_file_api(file_model))
def write_reference_file_info_to_system(file_path, file_model): SpecFileService.write_file_info_to_system(file_path, file_model)
def delete_spec_file(file_id): SpecFileService.delete_spec_file(file_id)
def clear_test_sync_files(): sync_file_root = SpecFileService().get_sync_file_root() if os.path.exists(sync_file_root): shutil.rmtree(sync_file_root)
def get_reference_file_path(file_name): sync_file_root = SpecFileService().get_sync_file_root() file_path = os.path.join(sync_file_root, 'Reference', file_name) return file_path
def create_lookup_model(workflow_model, task_spec_id, field_id): """ This is all really expensive, but should happen just once (per file change). Checks to see if the options are provided in a separate lookup table associated with the workflow, and if so, assures that data exists in the database, and return a model than can be used to locate that data. Returns: an array of LookupData, suitable for returning to the API. """ processor = WorkflowProcessor( workflow_model ) # VERY expensive, Ludicrous for lookup / type ahead spec, field = processor.find_spec_and_field(task_spec_id, field_id) # Clear out all existing lookup models for this workflow and field. existing_models = db.session.query(LookupFileModel) \ .filter(LookupFileModel.workflow_spec_id == workflow_model.workflow_spec_id) \ .filter(LookupFileModel.task_spec_id == task_spec_id) \ .filter(LookupFileModel.field_id == field_id).all() for model in existing_models: # Do it one at a time to cause the required cascade of deletes. db.session.delete(model) # Use the contents of a file to populate enum field options if field.has_property(Task.FIELD_PROP_SPREADSHEET_NAME): if not (field.has_property(Task.FIELD_PROP_VALUE_COLUMN) or field.has_property(Task.FIELD_PROP_LABEL_COLUMN)): raise ApiError.from_task_spec( "invalid_enum", "For enumerations based on an xls file, you must include 3 properties: %s, " "%s, and %s" % (Task.FIELD_PROP_SPREADSHEET_NAME, Task.FIELD_PROP_VALUE_COLUMN, Task.FIELD_PROP_LABEL_COLUMN), task_spec=spec) # Get the file data from the File Service file_name = field.get_property(Task.FIELD_PROP_SPREADSHEET_NAME) value_column = field.get_property(Task.FIELD_PROP_VALUE_COLUMN) label_column = field.get_property(Task.FIELD_PROP_LABEL_COLUMN) latest_files = SpecFileService().get_spec_files( workflow_spec_id=workflow_model.workflow_spec_id, file_name=file_name) if len(latest_files) < 1: raise ApiError( "invalid_enum", "Unable to locate the lookup data file '%s'" % file_name) else: file = latest_files[0] file_data = SpecFileService().get_spec_file_data(file.id).data lookup_model = LookupService.build_lookup_table( file.id, file_name, file_data, value_column, label_column, workflow_model.workflow_spec_id, task_spec_id, field_id) # Use the results of an LDAP request to populate enum field options elif field.has_property(Task.FIELD_PROP_LDAP_LOOKUP): lookup_model = LookupFileModel( workflow_spec_id=workflow_model.workflow_spec_id, task_spec_id=task_spec_id, field_id=field_id, is_ldap=True) else: raise ApiError.from_task_spec( "unknown_lookup_option", "Lookup supports using spreadsheet or LDAP options, " "and neither of those was provided.", spec) db.session.add(lookup_model) db.session.commit() return lookup_model