Example #1
0
    def test_update_workflow_specification(self):
        self.load_example_data()

        category_id = 99
        category = WorkflowSpecCategoryModel(id=category_id,
                                             name='trap',
                                             display_name="It's a trap!",
                                             display_order=0)
        session.add(category)
        session.commit()

        db_spec_before: WorkflowSpecModel = session.query(
            WorkflowSpecModel).first()
        spec_id = db_spec_before.id
        self.assertNotEqual(db_spec_before.category_id, category_id)

        db_spec_before.category_id = category_id
        rv = self.app.put('/v1.0/workflow-specification/%s' % spec_id,
                          content_type="application/json",
                          headers=self.logged_in_headers(),
                          data=json.dumps(
                              WorkflowSpecModelSchema().dump(db_spec_before)))
        self.assert_success(rv)
        json_data = json.loads(rv.get_data(as_text=True))
        api_spec = WorkflowSpecModelSchema().load(json_data, session=session)
        self.assertEqual(db_spec_before, api_spec)

        db_spec_after: WorkflowSpecModel = session.query(
            WorkflowSpecModel).filter_by(id=spec_id).first()
        self.assertIsNotNone(db_spec_after.category_id)
        self.assertIsNotNone(db_spec_after.category)
        self.assertEqual(db_spec_after.category.display_name,
                         category.display_name)
        self.assertEqual(db_spec_after.category.display_order,
                         category.display_order)
    def process_workflow_spec(json_file, directory):
        file_path = os.path.join(directory, json_file)

        with open(file_path, 'r') as f_open:
            data = f_open.read()
            data_obj = json.loads(data)
            workflow_spec_model = session.query(WorkflowSpecModel).\
                filter(WorkflowSpecModel.id == data_obj['id']).\
                first()
            if not workflow_spec_model:
                category_id = None
                if data_obj['category'] is not None:
                    category_id = session.query(
                        WorkflowSpecCategoryModel.id).filter(
                            WorkflowSpecCategoryModel.display_name ==
                            data_obj['category']['display_name']).scalar()
                workflow_spec_model = WorkflowSpecModel(
                    id=data_obj['id'],
                    display_name=data_obj['display_name'],
                    description=data_obj['description'],
                    is_master_spec=data_obj['is_master_spec'],
                    category_id=category_id,
                    display_order=data_obj['display_order'],
                    standalone=data_obj['standalone'],
                    library=data_obj['library'])
                session.add(workflow_spec_model)
                session.commit()

            return workflow_spec_model
Example #3
0
    def synch_with_protocol_builder_if_enabled(user):
        """Assures that the studies we have locally for the given user are
        in sync with the studies available in protocol builder. """

        if ProtocolBuilderService.is_enabled():

            app.logger.info("The Protocol Builder is enabled. app.config['PB_ENABLED'] = " +
                            str(app.config['PB_ENABLED']))

            # Get studies matching this user from Protocol Builder
            pb_studies: List[ProtocolBuilderStudy] = ProtocolBuilderService.get_studies(user.uid)

            # Get studies from the database
            db_studies = session.query(StudyModel).filter_by(user_uid=user.uid).all()

            # Update all studies from the protocol builder, create new studies as needed.
            # Futher assures that every active study (that does exist in the protocol builder)
            # has a reference to every available workflow (though some may not have started yet)
            for pb_study in pb_studies:
                db_study = next((s for s in db_studies if s.id == pb_study.STUDYID), None)
                if not db_study:
                    db_study = StudyModel(id=pb_study.STUDYID)
                    session.add(db_study)
                    db_studies.append(db_study)
                db_study.update_from_protocol_builder(pb_study)
                StudyService._add_all_workflow_specs_to_study(db_study)

            # Mark studies as inactive that are no longer in Protocol Builder
            for study in db_studies:
                pb_study = next((pbs for pbs in pb_studies if pbs.STUDYID == study.id), None)
                if not pb_study:
                    study.protocol_builder_status = ProtocolBuilderStatus.ABANDONED

            db.session.commit()
def update_or_create_current_file(remote, workflow_spec_id, updatefile):
    currentfile = file_get(workflow_spec_id, updatefile['filename'])
    if not currentfile:
        currentfile = FileModel()
        currentfile.name = updatefile['filename']
        if workflow_spec_id == 'REFERENCE_FILES':
            currentfile.workflow_spec_id = None
            currentfile.is_reference = True
        else:
            currentfile.workflow_spec_id = workflow_spec_id

    currentfile.date_created = updatefile['date_created']
    currentfile.type = updatefile['type']
    currentfile.primary = updatefile['primary']
    currentfile.content_type = updatefile['content_type']
    currentfile.primary_process_id = updatefile['primary_process_id']
    session.add(currentfile)
    try:
        content = WorkflowSyncService.get_remote_file_by_hash(
            remote, updatefile['md5_hash'])
        FileService.update_file(currentfile, content, updatefile['type'])
    except ApiError:
        # Remote files doesn't exist, don't update it.
        print("Remote file " + currentfile.name +
              " does not exist, so not syncing.")
Example #5
0
def add_study(body):
    """Or any study like object. Body should include a title, and primary_investigator_id """
    if 'primary_investigator_id' not in body:
        raise ApiError(
            "missing_pi",
            "Can't create a new study without a Primary Investigator.")
    if 'title' not in body:
        raise ApiError("missing_title",
                       "Can't create a new study without a title.")

    study_model = StudyModel(
        user_uid=UserService.current_user().uid,
        title=body['title'],
        primary_investigator_id=body['primary_investigator_id'],
        last_updated=datetime.utcnow(),
        status=StudyStatus.in_progress)
    session.add(study_model)
    StudyService.add_study_update_event(study_model,
                                        status=StudyStatus.in_progress,
                                        event_type=StudyEventType.user,
                                        user_uid=g.user.uid)

    errors = StudyService._add_all_workflow_specs_to_study(study_model)
    session.commit()
    study = StudyService().get_study(study_model.id, do_status=True)
    study_data = StudySchema().dump(study)
    study_data["errors"] = ApiErrorSchema(many=True).dump(errors)
    return study_data
    def reset(workflow_model, clear_data=False, delete_files=False):
        print('WorkflowProcessor: reset: ')

        # Try to execute a cancel notify
        try:
            wp = WorkflowProcessor(workflow_model)
            wp.cancel_notify(
            )  # The executes a notification to all endpoints that
        except Exception as e:
            app.logger.error(
                f"Unable to send a cancel notify for workflow %s during a reset."
                f" Continuing with the reset anyway so we don't get in an unresolvable"
                f" state. An %s error occured with the following information: %s"
                % (workflow_model.id, e.__class__.__name__, str(e)))
        workflow_model.bpmn_workflow_json = None
        if clear_data:
            # Clear form_data from task_events
            task_events = session.query(TaskEventModel). \
                filter(TaskEventModel.workflow_id == workflow_model.id).all()
            for task_event in task_events:
                task_event.form_data = {}
                session.add(task_event)
        if delete_files:
            files = FileModel.query.filter(
                FileModel.workflow_id == workflow_model.id).all()
            for file in files:
                FileService.delete_file(file.id)
        session.commit()
        return WorkflowProcessor(workflow_model)
Example #7
0
    def update_workflow_spec_file_model(workflow_spec: WorkflowSpecModel,
                                        file_model: FileModel, binary_data,
                                        content_type):
        # Verify the extension
        file_extension = FileService.get_extension(file_model.name)
        if file_extension not in FileType._member_names_:
            raise ApiError(
                'unknown_extension',
                'The file you provided does not have an accepted extension:' +
                file_extension,
                status_code=404)
        else:
            file_model.type = FileType[file_extension]
            file_model.content_type = content_type
            file_model.archived = False  # Unarchive the file if it is archived.

        # If this is a BPMN, extract the process id.
        if file_model.type == FileType.bpmn:
            try:
                bpmn: etree.Element = etree.fromstring(binary_data)
                file_model.primary_process_id = SpecFileService.get_process_id(
                    bpmn)
                file_model.is_review = FileService.has_swimlane(bpmn)
            except etree.XMLSyntaxError as xse:
                raise ApiError("invalid_xml",
                               "Failed to parse xml: " + str(xse),
                               file_name=file_model.name)

        session.add(file_model)
        session.commit()

        return file_model
Example #8
0
 def _create_workflow_model(study: StudyModel, spec):
     workflow_model = WorkflowModel(status=WorkflowStatus.not_started,
                                    study=study,
                                    workflow_spec_id=spec.id,
                                    last_updated=datetime.now())
     session.add(workflow_model)
     session.commit()
     return workflow_model
Example #9
0
def add_workflow_spec_category(body):
    WorkflowService.cleanup_workflow_spec_category_display_order()
    count = session.query(WorkflowSpecCategoryModel).count()
    body['display_order'] = count
    schema = WorkflowSpecCategoryModelSchema()
    new_cat: WorkflowSpecCategoryModel = schema.load(body, session=session)
    session.add(new_cat)
    session.commit()
    return schema.dump(new_cat)
Example #10
0
    def update_file(file_model, binary_data, content_type):
        session.flush(
        )  # Assure the database is up-to-date before running this.

        latest_data_model = session.query(FileDataModel). \
            filter(FileDataModel.file_model_id == file_model.id).\
            order_by(desc(FileDataModel.date_created)).first()

        md5_checksum = UUID(hashlib.md5(binary_data).hexdigest())
        size = len(binary_data)

        if (latest_data_model is not None) and (md5_checksum
                                                == latest_data_model.md5_hash):
            # This file does not need to be updated, it's the same file.  If it is arhived,
            # then de-arvhive it.
            file_model.archived = False
            session.add(file_model)
            session.commit()
            return file_model

        # Verify the extension
        file_extension = FileService.get_extension(file_model.name)
        if file_extension not in FileType._member_names_:
            raise ApiError(
                'unknown_extension',
                'The file you provided does not have an accepted extension:' +
                file_extension,
                status_code=404)
        else:
            file_model.type = FileType[file_extension]
            file_model.content_type = content_type
            file_model.archived = False  # Unarchive the file if it is archived.

        if latest_data_model is None:
            version = 1
        else:
            version = latest_data_model.version + 1

        try:
            user_uid = UserService.current_user().uid
        except ApiError as ae:
            user_uid = None
        new_file_data_model = FileDataModel(data=binary_data,
                                            file_model_id=file_model.id,
                                            file_model=file_model,
                                            version=version,
                                            md5_hash=md5_checksum,
                                            size=size,
                                            user_uid=user_uid)
        session.add_all([file_model, new_file_data_model])
        session.commit()
        session.flush(
        )  # Assure the id is set on the model before returning it.

        return file_model
Example #11
0
 def create_user(self,
                 uid="dhf8r",
                 email="*****@*****.**",
                 display_name="Hoopy Frood"):
     user = session.query(UserModel).filter(UserModel.uid == uid).first()
     if user is None:
         ldap_user = LdapService.user_info(uid)
         user = UserModel(uid=uid, ldap_info=ldap_user)
         session.add(user)
         session.commit()
     return user
 def save(self):
     """Saves the current state of this processor to the database """
     self.workflow_model.bpmn_workflow_json = self.serialize()
     complete_states = [SpiffTask.CANCELLED, SpiffTask.COMPLETED]
     tasks = list(self.get_all_user_tasks())
     self.workflow_model.status = self.get_status()
     self.workflow_model.total_tasks = len(tasks)
     self.workflow_model.completed_tasks = sum(
         1 for t in tasks if t.state in complete_states)
     self.workflow_model.last_updated = datetime.utcnow()
     session.add(self.workflow_model)
     session.commit()
Example #13
0
def update_file_info(file_id, body):
    if file_id is None:
        raise ApiError('no_such_file', 'Please provide a valid File ID.')

    file_model = session.query(FileModel).filter_by(id=file_id).first()

    if file_model is None:
        raise ApiError('unknown_file_model', 'The file_model "' + file_id + '" is not recognized.')

    file_model = FileModelSchema().load(body, session=session)
    session.add(file_model)
    session.commit()
    return FileSchema().dump(to_file_api(file_model))
Example #14
0
 def test_delete_study_with_workflow_and_status(self):
     self.load_example_data()
     workflow = session.query(WorkflowModel).first()
     stats2 = TaskEventModel(study_id=workflow.study_id,
                             workflow_id=workflow.id,
                             user_uid=self.users[0]['uid'])
     session.add(stats2)
     session.commit()
     rv = self.app.delete('/v1.0/study/%i' % workflow.study_id,
                          headers=self.logged_in_headers())
     self.assert_success(rv)
     del_study = session.query(StudyModel).filter(
         StudyModel.id == workflow.study_id).first()
     self.assertIsNone(del_study)
Example #15
0
def update_study(study_id, body):
    if study_id is None:
        raise ApiError('unknown_study', 'Please provide a valid Study ID.')

    study_model = session.query(StudyModel).filter_by(id=study_id).first()
    if study_model is None:
        raise ApiError('unknown_study',
                       'The study "' + study_id + '" is not recognized.')

    study: Study = StudySchema().load(body)
    study.update_model(study_model)
    session.add(study_model)
    session.commit()
    return StudySchema().dump(study)
Example #16
0
def add_workflow_specification(body):
    category_id = body['category_id']
    WorkflowService.cleanup_workflow_spec_display_order(category_id)
    count = session.query(WorkflowSpecModel).filter_by(
        category_id=category_id).count()
    body['display_order'] = count
    # Libraries and standalone workflows don't get a category_id
    if body['library'] is True or body['standalone'] is True:
        body['category_id'] = None
    new_spec: WorkflowSpecModel = WorkflowSpecModelSchema().load(
        body, session=session)
    session.add(new_spec)
    session.commit()
    return WorkflowSpecModelSchema().dump(new_spec)
Example #17
0
def update_workflow_specification(spec_id, body):
    if spec_id is None:
        raise ApiError('unknown_spec',
                       'Please provide a valid Workflow Spec ID.')
    spec = session.query(WorkflowSpecModel).filter_by(id=spec_id).first()

    if spec is None:
        raise ApiError('unknown_study',
                       'The spec "' + spec_id + '" is not recognized.')

    schema = WorkflowSpecModelSchema()
    spec = schema.load(body, session=session, instance=spec, partial=True)
    session.add(spec)
    session.commit()
    return schema.dump(spec)
Example #18
0
def _upsert_user(ldap_info):
    user = session.query(UserModel).filter(UserModel.uid == ldap_info.uid).first()

    if user is None:
        # Add new user
        user = UserModel()
    else:
        user = session.query(UserModel).filter(UserModel.uid == ldap_info.uid).with_for_update().first()

    user.uid = ldap_info.uid
    user.ldap_info = ldap_info

    session.add(user)
    session.commit()
    return user
Example #19
0
 def create_study(self,
                  uid="dhf8r",
                  title="Beer consumption in the bipedal software engineer",
                  primary_investigator_id="lb3dp"):
     study = session.query(StudyModel).filter_by(user_uid=uid).filter_by(
         title=title).first()
     if study is None:
         user = self.create_user(uid=uid)
         study = StudyModel(title=title,
                            status=StudyStatus.in_progress,
                            user_uid=user.uid,
                            primary_investigator_id=primary_investigator_id)
         session.add(study)
         session.commit()
     return study
Example #20
0
def update_datastore(id, body):
    """allow a modification to a datastore item """
    if id is None:
        raise ApiError('unknown_id', 'Please provide a valid ID.')

    item = session.query(DataStoreModel).filter_by(id=id).first()
    if item is None:
        raise ApiError('unknown_item',
                       'The item "' + id + '" is not recognized.')

    DataStoreSchema().load(body, instance=item, session=session)
    item.last_updated = datetime.utcnow()
    session.add(item)
    session.commit()
    return DataStoreSchema().dump(item)
Example #21
0
 def test_list_multiple_files_for_workflow_spec(self):
     self.load_example_data()
     spec = self.load_test_spec("random_fact")
     svgFile = FileModel(name="test.svg",
                         type=FileType.svg,
                         primary=False,
                         workflow_spec_id=spec.id)
     session.add(svgFile)
     session.flush()
     rv = self.app.get('/v1.0/file?workflow_spec_id=%s' % spec.id,
                       follow_redirects=True,
                       content_type="application/json",
                       headers=self.logged_in_headers())
     self.assert_success(rv)
     json_data = json.loads(rv.get_data(as_text=True))
     self.assertEqual(2, len(json_data))
    def __init__(self, workflow_model: WorkflowModel, validate_only=False):
        """Create a Workflow Processor based on the serialized information available in the workflow model."""

        self.workflow_model = workflow_model

        spec = None
        if workflow_model.bpmn_workflow_json is None:
            self.spec_files = SpecFileService().get_spec_files(
                workflow_spec_id=workflow_model.workflow_spec_id,
                include_libraries=True)
            spec = self.get_spec(self.spec_files,
                                 workflow_model.workflow_spec_id)

        self.workflow_spec_id = workflow_model.workflow_spec_id

        try:
            self.bpmn_workflow = self.__get_bpmn_workflow(
                workflow_model, spec, validate_only)
            self.bpmn_workflow.script_engine = self._script_engine

            if UserService.has_user():
                current_user = UserService.current_user(
                    allow_admin_impersonate=True)
                current_user_data = UserModelSchema().dump(current_user)
                tasks = self.bpmn_workflow.get_tasks(SpiffTask.READY)
                for task in tasks:
                    task.data['current_user'] = current_user_data

            if self.WORKFLOW_ID_KEY not in self.bpmn_workflow.data:
                if not workflow_model.id:
                    session.add(workflow_model)
                    session.commit()
                    # If the model is new, and has no id, save it, write it into the workflow model
                    # and save it again.  In this way, the workflow process is always aware of the
                    # database model to which it is associated, and scripts running within the model
                    # can then load data as needed.
                self.bpmn_workflow.data[
                    WorkflowProcessor.WORKFLOW_ID_KEY] = workflow_model.id
                workflow_model.bpmn_workflow_json = WorkflowProcessor._serializer.serialize_workflow(
                    self.bpmn_workflow, include_spec=True)
                self.save()

        except MissingSpecError as ke:
            raise ApiError(code="unexpected_workflow_structure",
                           message="Failed to deserialize workflow"
                           " '%s'  due to a mis-placed or missing task '%s'" %
                           (self.workflow_spec_id, str(ke)))
Example #23
0
    def update_file(file_model, binary_data, content_type):
        session.flush()  # Assure the database is up-to-date before running this.

        latest_data_model = session.query(FileDataModel). \
            filter(FileDataModel.file_model_id == file_model.id).\
            order_by(desc(FileDataModel.date_created)).first()

        md5_checksum = UUID(hashlib.md5(binary_data).hexdigest())
        if (latest_data_model is not None) and (md5_checksum == latest_data_model.md5_hash):
            # This file does not need to be updated, it's the same file.  If it is arhived,
            # then de-arvhive it.
            file_model.archived = False
            session.add(file_model)
            session.commit()
            return file_model

        # Verify the extension
        file_extension = FileService.get_extension(file_model.name)
        if file_extension not in FileType._member_names_:
            raise ApiError('unknown_extension',
                           'The file you provided does not have an accepted extension:' +
                           file_extension, status_code=404)
        else:
            file_model.type = FileType[file_extension]
            file_model.content_type = content_type
            file_model.archived = False  # Unarchive the file if it is archived.

        if latest_data_model is None:
            version = 1
        else:
            version = latest_data_model.version + 1

        # If this is a BPMN, extract the process id.
        if file_model.type == FileType.bpmn:
            bpmn: etree.Element = etree.fromstring(binary_data)
            file_model.primary_process_id = FileService.get_process_id(bpmn)

        new_file_data_model = FileDataModel(
            data=binary_data, file_model_id=file_model.id, file_model=file_model,
            version=version, md5_hash=md5_checksum, date_created=datetime.now()
        )
        session.add_all([file_model, new_file_data_model])
        session.commit()
        session.flush()  # Assure the id is set on the model before returning it.

        return file_model
Example #24
0
def add_workflow_spec_library(spec_id, library_id):
    validate_spec_and_library(spec_id, library_id)
    libraries: WorkflowLibraryModel = session.query(
        WorkflowLibraryModel).filter_by(workflow_spec_id=spec_id).all()
    libraryids = [x.library_spec_id for x in libraries]
    if library_id in libraryids:
        raise ApiError(
            'unknown_spec',
            'The Library Specification "' + spec_id + '" is already attached.')
    newlib = WorkflowLibraryModel()
    newlib.workflow_spec_id = spec_id
    newlib.library_spec_id = library_id
    session.add(newlib)
    session.commit()
    libraries: WorkflowLibraryModel = session.query(
        WorkflowLibraryModel).filter_by(workflow_spec_id=spec_id).all()
    return WorkflowLibraryModelSchema(many=True).dump(libraries)
Example #25
0
    def update_study_associates(study_id, associates):
        """
        updates the list of associates in the database for a study_id and a list
        of dicts that contains associates
        """
        if study_id is None:
            raise ApiError('study_id not specified',
                           "This function requires the study_id parameter")

        for person in associates:
            if not LdapService.user_exists(person.get('uid',
                                                      'impossible_uid')):
                if person.get('uid', 'impossible_uid') == 'impossible_uid':
                    raise ApiError(
                        'associate with no uid',
                        'One of the associates passed as a parameter doesnt have '
                        'a uid specified')
                raise ApiError(
                    'trying_to_grant_access_to_user_not_found_in_ldap',
                    "You are trying to grant access to "
                    "%s, but that user was not found in "
                    "ldap "
                    "- please check to ensure it is a "
                    "valid uva uid" % person.get('uid'))

        study = db.session.query(StudyModel).filter(
            StudyModel.id == study_id).first()
        if study is None:
            raise ApiError('study_id not found',
                           "A study with id# %d was not found" % study_id)

        db.session.query(StudyAssociated).filter(
            StudyAssociated.study_id == study_id).delete()
        for person in associates:
            newAssociate = StudyAssociated()
            newAssociate.study_id = study_id
            newAssociate.uid = person['uid']
            newAssociate.role = person.get('role', None)
            newAssociate.send_email = person.get('send_email', False)
            newAssociate.access = person.get('access', False)
            session.add(newAssociate)
        session.commit()
Example #26
0
def update_workflow_spec_category(cat_id, body):
    if cat_id is None:
        raise ApiError('unknown_category',
                       'Please provide a valid Workflow Spec Category ID.')

    category = session.query(WorkflowSpecCategoryModel).filter_by(
        id=cat_id).first()

    if category is None:
        raise ApiError('unknown_category',
                       'The category "' + cat_id + '" is not recognized.')

    schema = WorkflowSpecCategoryModelSchema()
    category = schema.load(body,
                           session=session,
                           instance=category,
                           partial=True)
    session.add(category)
    session.commit()
    return schema.dump(category)
Example #27
0
    def test_reset_workflow_from_broken_spec(self):
        # Start the basic two_forms workflow and complete a task.
        workflow = self.create_workflow('two_forms')
        workflow_api = self.get_workflow_api(workflow)
        self.complete_form(workflow, workflow_api.next_task, {"color": "blue"})
        # self.assertTrue(workflow_api.is_latest_spec)

        # Break the bpmn json
        workflow.bpmn_workflow_json = '{"something":"broken"}'
        session.add(workflow)
        session.commit()

        # Try to load the workflow, we should get an error
        with self.assertRaises(Exception):
            workflow_api = self.complete_form(workflow, workflow_api.next_task,
                                              {"name": "Dan"})

        # Now, Reset the workflow, and we should not get an error
        workflow_api = self.restart_workflow_api(workflow_api, clear_data=True)
        self.assertIsNotNone(workflow_api)
 def add_log(task, level, code, message, study_id, workflow_id):
     if level not in TaskLogLevels:
         raise ApiError(
             "invalid_logging_level",
             f"Please specify a valid log level. {TaskLogLevels}")
     try:
         user_uid = UserService.current_user().uid
     except ApiError as e:
         user_uid = "unknown"
     log_message = f"Workflow {workflow_id}, study {study_id}, task {task.get_name()}, user {user_uid}: {message}"
     app.logger.log(TaskLogLevels[level].value, log_message)
     log_model = TaskLogModel(level=level,
                              code=code,
                              user_uid=user_uid,
                              message=message,
                              study_id=study_id,
                              workflow_id=workflow_id,
                              task=task.get_name())
     session.add(log_model)
     session.commit()
     return log_model
    def add_reference_file(name, content_type, binary_data):
        """Create a file with the given name, but not associated with a spec or workflow.
           Only one file with the given reference name can exist."""
        file_model = session.query(FileModel). \
            filter(FileModel.is_reference == True). \
            filter(FileModel.name == name).first()
        if not file_model:
            file_extension = FileService.get_extension(name)
            file_type = FileType[file_extension].value

            file_model = FileModel(name=name,
                                   is_reference=True,
                                   type=file_type,
                                   content_type=content_type)
            session.add(file_model)
            session.commit()
        else:
            raise ApiError(
                code='file_already_exists',
                message=f"The reference file {name} already exists.")
        return ReferenceFileService().update_reference_file(
            file_model, binary_data)
Example #30
0
    def update_from_github(file_ids, source_target=GithubObject.NotSet):
        gh_token = app.config['GITHUB_TOKEN']
        github_repo = app.config['GITHUB_REPO']
        _github = Github(gh_token)
        repo = _github.get_user().get_repo(github_repo)

        for file_id in file_ids:
            file_data_model = FileDataModel.query.filter_by(
                file_model_id=file_id).order_by(desc(
                    FileDataModel.version)).first()
            try:
                repo_file = repo.get_contents(file_data_model.file_model.name,
                                              ref=source_target)
            except UnknownObjectException:
                return {
                    'error':
                    'Attempted to update from repository but file was not present'
                }
            else:
                file_data_model.data = repo_file.decoded_content
                session.add(file_data_model)
                session.commit()