示例#1
0
 def clean_db():
     session.flush(
     )  # Clear out any transactions before deleting it all to avoid spurious errors.
     for table in reversed(db.metadata.sorted_tables):
         session.execute(table.delete())
     session.commit()
     session.flush()
 def clean_db():
     session.flush()  # Clear out any transactions before deleting it all to avoid spurious errors.
     engine = session.bind.engine
     connection = engine.connect()
     for table in reversed(db.metadata.sorted_tables):
         if engine.dialect.has_table(connection, table):
             session.execute(table.delete())
     session.commit()
     session.flush()
示例#3
0
    def update_file(file_model, binary_data, content_type):
        session.flush(
        )  # Assure the database is up-to-date before running this.

        latest_data_model = session.query(FileDataModel). \
            filter(FileDataModel.file_model_id == file_model.id).\
            order_by(desc(FileDataModel.date_created)).first()

        md5_checksum = UUID(hashlib.md5(binary_data).hexdigest())
        size = len(binary_data)

        if (latest_data_model is not None) and (md5_checksum
                                                == latest_data_model.md5_hash):
            # This file does not need to be updated, it's the same file.  If it is arhived,
            # then de-arvhive it.
            file_model.archived = False
            session.add(file_model)
            session.commit()
            return file_model

        # Verify the extension
        file_extension = FileService.get_extension(file_model.name)
        if file_extension not in FileType._member_names_:
            raise ApiError(
                'unknown_extension',
                'The file you provided does not have an accepted extension:' +
                file_extension,
                status_code=404)
        else:
            file_model.type = FileType[file_extension]
            file_model.content_type = content_type
            file_model.archived = False  # Unarchive the file if it is archived.

        if latest_data_model is None:
            version = 1
        else:
            version = latest_data_model.version + 1

        try:
            user_uid = UserService.current_user().uid
        except ApiError as ae:
            user_uid = None
        new_file_data_model = FileDataModel(data=binary_data,
                                            file_model_id=file_model.id,
                                            file_model=file_model,
                                            version=version,
                                            md5_hash=md5_checksum,
                                            size=size,
                                            user_uid=user_uid)
        session.add_all([file_model, new_file_data_model])
        session.commit()
        session.flush(
        )  # Assure the id is set on the model before returning it.

        return file_model
示例#4
0
    def load_example_data(self, use_crc_data=False, use_rrt_data=False):
        """use_crc_data will cause this to load the mammoth collection of documents
        we built up developing crc, use_rrt_data will do the same for hte rrt project,
         otherwise it depends on a small setup for running tests."""
        from example_data import ExampleDataLoader
        ExampleDataLoader.clean_db()
        # If in production mode, only add the first user.
        if app.config['PRODUCTION']:
            ldap_info = LdapService.user_info(self.users[0]['uid'])
            session.add(
                UserModel(uid=self.users[0]['uid'], ldap_info=ldap_info))
        else:
            for user_json in self.users:
                ldap_info = LdapService.user_info(user_json['uid'])
                session.add(
                    UserModel(uid=user_json['uid'], ldap_info=ldap_info))

        if use_crc_data:
            ExampleDataLoader().load_all()
        elif use_rrt_data:
            ExampleDataLoader().load_rrt()
        else:
            ExampleDataLoader().load_test_data()

        session.commit()
        for study_json in self.studies:
            study_model = StudyModel(**study_json)
            session.add(study_model)
            StudyService._add_all_workflow_specs_to_study(study_model)
            session.commit()
            update_seq = f"ALTER SEQUENCE %s RESTART WITH %s" % (
                StudyModel.__tablename__ + '_id_seq', study_model.id + 1)
            print("Update Sequence." + update_seq)
            session.execute(update_seq)
        session.flush()

        specs = session.query(WorkflowSpecModel).all()
        self.assertIsNotNone(specs)

        for spec in specs:
            files = session.query(FileModel).filter_by(
                workflow_spec_id=spec.id).all()
            self.assertIsNotNone(files)
            self.assertGreater(len(files), 0)
            for file in files:
                # file_data = session.query(FileDataModel).filter_by(file_model_id=file.id).all()
                file_data = SpecFileService().get_spec_file_data(file.id).data
                self.assertIsNotNone(file_data)
                self.assertGreater(len(file_data), 0)
示例#5
0
 def test_list_multiple_files_for_workflow_spec(self):
     self.load_example_data()
     spec = self.load_test_spec("random_fact")
     svgFile = FileModel(name="test.svg",
                         type=FileType.svg,
                         primary=False,
                         workflow_spec_id=spec.id)
     session.add(svgFile)
     session.flush()
     rv = self.app.get('/v1.0/file?workflow_spec_id=%s' % spec.id,
                       follow_redirects=True,
                       content_type="application/json",
                       headers=self.logged_in_headers())
     self.assert_success(rv)
     json_data = json.loads(rv.get_data(as_text=True))
     self.assertEqual(2, len(json_data))
def upgrade():
    """"""
    bind = op.get_bind()
    session = sa.orm.Session(bind=bind)

    op.drop_table('workflow_spec_dependency_file')
    op.add_column('lookup_file',
                  sa.Column('file_model_id', sa.Integer(), nullable=True))
    op.add_column('lookup_file',
                  sa.Column('last_updated', sa.DateTime(), nullable=True))
    op.create_foreign_key(None, 'lookup_file', 'file', ['file_model_id'],
                          ['id'])

    processed_files = []
    location = SpecFileService.get_sync_file_root()
    if os.path.exists(location):
        rmtree(location)
    # Process workflow spec files
    files = session.query(FileModel).filter(
        FileModel.workflow_spec_id is not None).all()
    for file in files:
        if file.archived is not True:
            ToFilesystemService().write_file_to_system(session, file, location)
            processed_files.append(file.id)

    # Process reference files
    # get_reference_files only returns files where archived is False
    reference_files = ReferenceFileService.get_reference_files()
    for reference_file in reference_files:
        ToFilesystemService().write_file_to_system(session, reference_file,
                                                   location)
        processed_files.append(reference_file.id)

    session.flush()
    lookups = session.query(LookupFileModel).all()
    for lookup in lookups:
        session.delete(lookup)
    session.commit()
    for file_id in processed_files:
        processed_data_models = session.query(FileDataModel).filter(
            FileDataModel.file_model_id == file_id).all()
        for processed_data_model in processed_data_models:
            session.delete(processed_data_model)
            session.commit()
        print(f'upgrade: in processed files: file_id: {file_id}')
    print('upgrade: done: ')
示例#7
0
    def update_file(file_model, binary_data, content_type):
        session.flush()  # Assure the database is up-to-date before running this.

        latest_data_model = session.query(FileDataModel). \
            filter(FileDataModel.file_model_id == file_model.id).\
            order_by(desc(FileDataModel.date_created)).first()

        md5_checksum = UUID(hashlib.md5(binary_data).hexdigest())
        if (latest_data_model is not None) and (md5_checksum == latest_data_model.md5_hash):
            # This file does not need to be updated, it's the same file.  If it is arhived,
            # then de-arvhive it.
            file_model.archived = False
            session.add(file_model)
            session.commit()
            return file_model

        # Verify the extension
        file_extension = FileService.get_extension(file_model.name)
        if file_extension not in FileType._member_names_:
            raise ApiError('unknown_extension',
                           'The file you provided does not have an accepted extension:' +
                           file_extension, status_code=404)
        else:
            file_model.type = FileType[file_extension]
            file_model.content_type = content_type
            file_model.archived = False  # Unarchive the file if it is archived.

        if latest_data_model is None:
            version = 1
        else:
            version = latest_data_model.version + 1

        # If this is a BPMN, extract the process id.
        if file_model.type == FileType.bpmn:
            bpmn: etree.Element = etree.fromstring(binary_data)
            file_model.primary_process_id = FileService.get_process_id(bpmn)

        new_file_data_model = FileDataModel(
            data=binary_data, file_model_id=file_model.id, file_model=file_model,
            version=version, md5_hash=md5_checksum, date_created=datetime.now()
        )
        session.add_all([file_model, new_file_data_model])
        session.commit()
        session.flush()  # Assure the id is set on the model before returning it.

        return file_model
示例#8
0
 def create_workflow(self,
                     dir_name,
                     display_name=None,
                     study=None,
                     category_id=None,
                     as_user="******"):
     session.flush()
     spec = session.query(WorkflowSpecModel).filter(
         WorkflowSpecModel.id == dir_name).first()
     if spec is None:
         if display_name is None:
             display_name = dir_name
         spec = self.load_test_spec(dir_name,
                                    display_name,
                                    category_id=category_id)
     if study is None:
         study = self.create_study(uid=as_user)
     workflow_model = StudyService._create_workflow_model(study, spec)
     return workflow_model
示例#9
0
    def _add_lots_of_random_approvals(self,
                                      n=100,
                                      workflow_spec_name="random_fact"):
        num_studies_before = db.session.query(StudyModel).count()
        statuses = [name for name, value in ApprovalStatus.__members__.items()]

        # Add a whole bunch of approvals with random statuses
        for i in range(n):
            approver_uids = random.choices(["lb3dp", "dhf8r"])
            self._create_study_workflow_approvals(
                user_uid=random.choice(["lb3dp", "dhf8r"]),
                title="".join(random.choices(string.ascii_lowercase, k=64)),
                primary_investigator_id=random.choice(["lb3dp", "dhf8r"]),
                approver_uids=approver_uids,
                statuses=random.choices(statuses, k=len(approver_uids)),
                workflow_spec_name=workflow_spec_name)

        session.flush()
        num_studies_after = db.session.query(StudyModel).count()
        self.assertEqual(num_studies_after, num_studies_before + n)